From c0c03d2e8f29b5188ea04fb72c7796e027785f24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E7=A5=96=E5=BD=AC?= Date: Wed, 5 Mar 2025 14:07:59 +0800 Subject: [PATCH] fix: Fix the issue of returning 502 when using the Olama local service --- app/services/llm.py | 15 +++++++++++++++ requirements.txt | 1 + 2 files changed, 16 insertions(+) diff --git a/app/services/llm.py b/app/services/llm.py index 2c45ef9..34b3dfb 100644 --- a/app/services/llm.py +++ b/app/services/llm.py @@ -7,6 +7,8 @@ import g4f from loguru import logger from openai import AzureOpenAI, OpenAI from openai.types.chat import ChatCompletion +from ollama import Client as ollamaClient +from ollama import ChatResponse as ollamaChatResponse from app.config import config @@ -39,6 +41,19 @@ def _generate_response(prompt: str) -> str: base_url = config.app.get("ollama_base_url", "") if not base_url: base_url = "http://localhost:11434/v1" + + + client = ollamaClient(host=base_url) + response: ollamaChatResponse = client.chat(model=model_name, messages=[ + { + 'role': 'user', + 'content': prompt, + }, + ]) + + return response.message.content.strip().split("").pop().strip(); + + print(response['message']['content']) elif llm_provider == "openai": api_key = config.app.get("openai_api_key") model_name = config.app.get("openai_model_name") diff --git a/requirements.txt b/requirements.txt index b1cfd8d..8c69dce 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,3 +14,4 @@ redis==5.2.0 python-multipart==0.0.19 streamlit-authenticator==0.4.1 pyyaml +ollama \ No newline at end of file