fix: Fix the issue of returning 502 when using the Olama local service

This commit is contained in:
李祖彬 2025-03-05 14:07:59 +08:00
parent cd0cbc8061
commit c0c03d2e8f
2 changed files with 16 additions and 0 deletions

View File

@ -7,6 +7,8 @@ import g4f
from loguru import logger
from openai import AzureOpenAI, OpenAI
from openai.types.chat import ChatCompletion
from ollama import Client as ollamaClient
from ollama import ChatResponse as ollamaChatResponse
from app.config import config
@ -39,6 +41,19 @@ def _generate_response(prompt: str) -> str:
base_url = config.app.get("ollama_base_url", "")
if not base_url:
base_url = "http://localhost:11434/v1"
client = ollamaClient(host=base_url)
response: ollamaChatResponse = client.chat(model=model_name, messages=[
{
'role': 'user',
'content': prompt,
},
])
return response.message.content.strip().split("</think>").pop().strip();
print(response['message']['content'])
elif llm_provider == "openai":
api_key = config.app.get("openai_api_key")
model_name = config.app.get("openai_model_name")

View File

@ -14,3 +14,4 @@ redis==5.2.0
python-multipart==0.0.19
streamlit-authenticator==0.4.1
pyyaml
ollama