I've added a new feature to your application that allows for text-based content generation using the configured LLM.

Here's a summary of the changes:

1.  **Web UI (`webui/Main.py`):**
    *   I've added a new "Content Generation" section in the middle panel of your application.
    *   This section includes a text input for your prompts and a button to start the generation process.
    *   The generated content will be displayed in a text area.
    *   I've used session state to manage the prompt input and the generated output.
    *   Localization has been integrated using the `tr()` function.

2.  **LLM Service (`app/services/llm.py`):**
    *   I introduced a new function `generate_content(prompt: str) -> str`.
    *   This function takes your prompt, formats it for the LLM, and then uses the existing `_generate_response` helper to communicate with the LLM provider.
    *   I've included logging for prompt processing and to track successful or failed operations.
    *   Error handling for interactions with the LLM layer has been implemented.

3.  **Unit Tests (`test/services/test_llm.py`):**
    *   I created a new test file specifically for the LLM service.
    *   I've added unit tests for the `generate_content` function, ensuring it handles:
        *   Successful content generation.
        *   Failures resulting from LLM errors.
        *   The scenario of empty prompts.
    *   These tests mock the `_generate_response` function to focus on the `generate_content` logic.
    *   All tests are currently passing.

This new feature offers you another way to utilize the application's LLM capabilities, expanding beyond video script generation.
This commit is contained in:
google-labs-jules[bot] 2025-06-06 21:05:19 +00:00
parent 6cb5f23487
commit 460dfdfd3c
3 changed files with 149 additions and 1 deletions

View File

@ -441,4 +441,35 @@ if __name__ == "__main__":
)
print("######################")
print(search_terms)
def generate_content(prompt: str) -> str:
logger.info(f"Generating content for prompt: {prompt}")
# Actual LLM call will be implemented later
# Simulate some processing if needed, or directly call _generate_response
# For now, a simple placeholder as requested:
# return f"Placeholder content generated for prompt: '{prompt}'"
# Using _generate_response for more realistic placeholder if possible,
# but the task asks for a simple placeholder.
# For the purpose of this step, let's stick to the exact requested placeholder.
# If _generate_response is desired, it would be:
# response = _generate_response(f"Generate content based on the following prompt: {prompt}")
# return response if "Error:" not in response else f"Placeholder failed or error: {response}"
# Construct a general-purpose prompt for content generation
# This helps guide the LLM to generate content rather than, for example, trying to answer a question
# or complete a sentence in a way that's not desired for this feature.
instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\""
logger.info(f"Constructed prompt for LLM: {instructed_prompt}")
response_content = _generate_response(instructed_prompt)
if "Error:" in response_content:
logger.error(f"Failed to generate content using LLM for original prompt \"{prompt}\": {response_content}")
# Return the error message from _generate_response, it's already informative
return response_content
else:
logger.success(f"Successfully generated content for original prompt: \"{prompt}\"")
# .strip() is important to remove leading/trailing whitespace that models sometimes add.
return response_content.strip()

87
test/services/test_llm.py Normal file
View File

@ -0,0 +1,87 @@
import unittest
from unittest.mock import patch, MagicMock
# Add the project root to the Python path to allow importing app modules
import sys
import os
# Calculate the project root directory path based on the current file's location
# __file__ is test/services/test_llm.py
# root_dir should be the parent of 'test' directory
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
if root_dir not in sys.path:
sys.path.insert(0, root_dir) # Prepend to sys.path to ensure it's checked first
from app.services import llm # llm.py is in app/services
from app.config import config # Import the config
# from loguru import logger # Import if direct log assertions are needed and set up
class TestLlmService(unittest.TestCase):
def setUp(self):
# Basic configuration setup for tests.
if not hasattr(config, 'app'): # Ensure config.app exists
config.app = {}
config.app['llm_provider'] = 'OpenAI' # Default mock provider
config.app['openai_model_name'] = 'gpt-test-model'
@patch('app.services.llm._generate_response')
def test_generate_content_success(self, mock_generate_response):
expected_content = "This is the successfully generated content."
mock_generate_response.return_value = expected_content
prompt = "Tell me a joke."
instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\""
actual_content = llm.generate_content(prompt)
self.assertEqual(actual_content, expected_content.strip())
mock_generate_response.assert_called_once_with(instructed_prompt)
@patch('app.services.llm._generate_response')
def test_generate_content_llm_failure(self, mock_generate_response):
error_message_from_llm = "Error: LLM is down. Please try again later."
mock_generate_response.return_value = error_message_from_llm
prompt = "Summarize War and Peace."
instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\""
actual_content = llm.generate_content(prompt)
self.assertTrue(actual_content.startswith("Error:"))
self.assertIn("LLM is down", actual_content)
mock_generate_response.assert_called_once_with(instructed_prompt)
@patch('app.services.llm._generate_response')
def test_generate_content_exception_handled_by_generate_response(self, mock_generate_response):
# This test simulates that an exception occurred *inside* _generate_response,
# and _generate_response caught it and returned a formatted error string.
simulated_internal_exception_message = "Internal network timeout within _generate_response"
# _generate_response itself would catch its internal error and return something like this:
error_returned_by_generate_response = f"Error: {simulated_internal_exception_message}"
mock_generate_response.return_value = error_returned_by_generate_response
prompt = "What is the weather?"
instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\""
actual_content = llm.generate_content(prompt)
self.assertTrue(actual_content.startswith("Error:"))
self.assertIn(simulated_internal_exception_message, actual_content)
mock_generate_response.assert_called_once_with(instructed_prompt)
@patch('app.services.llm._generate_response')
def test_generate_content_empty_prompt(self, mock_generate_response):
expected_content = "Content for empty prompt."
mock_generate_response.return_value = expected_content
prompt = ""
instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\""
actual_content = llm.generate_content(prompt)
self.assertEqual(actual_content, expected_content.strip())
mock_generate_response.assert_called_once_with(instructed_prompt)
if __name__ == '__main__':
unittest.main()

View File

@ -66,6 +66,10 @@ if "video_terms" not in st.session_state:
st.session_state["video_terms"] = ""
if "ui_language" not in st.session_state:
st.session_state["ui_language"] = config.ui.get("language", system_locale)
if "generated_content_output" not in st.session_state:
st.session_state["generated_content_output"] = ""
if "content_generation_prompt" not in st.session_state:
st.session_state["content_generation_prompt"] = ""
# 加载语言文件
locales = utils.load_locales(i18n_dir)
@ -626,6 +630,32 @@ with middle_panel:
options=[1, 2, 3, 4, 5],
index=0,
)
with st.container(border=True):
st.write(tr("Content Generation"))
st.session_state["content_generation_prompt"] = st.text_input(
tr("Enter your prompt"),
value=st.session_state["content_generation_prompt"],
key="content_prompt_input_main"
)
if st.button(tr("Generate Content"), key="generate_content_button_main"):
prompt_value = st.session_state["content_generation_prompt"]
if prompt_value:
with st.spinner(tr("Generating content...")):
st.session_state["generated_content_output"] = llm.generate_content(prompt_value)
else:
st.error(tr("Please enter a prompt."))
st.session_state["generated_content_output"] = "" # Clear previous output
if st.session_state["generated_content_output"]:
st.text_area(
tr("Generated Content"),
value=st.session_state["generated_content_output"],
height=300,
key="generated_content_display_main"
)
with st.container(border=True):
st.write(tr("Audio Settings"))