diff --git a/app/services/llm.py b/app/services/llm.py index 6c954a8..d5deeb4 100644 --- a/app/services/llm.py +++ b/app/services/llm.py @@ -441,4 +441,35 @@ if __name__ == "__main__": ) print("######################") print(search_terms) - \ No newline at end of file + + +def generate_content(prompt: str) -> str: + logger.info(f"Generating content for prompt: {prompt}") + # Actual LLM call will be implemented later + # Simulate some processing if needed, or directly call _generate_response + # For now, a simple placeholder as requested: + # return f"Placeholder content generated for prompt: '{prompt}'" + + # Using _generate_response for more realistic placeholder if possible, + # but the task asks for a simple placeholder. + # For the purpose of this step, let's stick to the exact requested placeholder. + # If _generate_response is desired, it would be: + # response = _generate_response(f"Generate content based on the following prompt: {prompt}") + # return response if "Error:" not in response else f"Placeholder failed or error: {response}" + + # Construct a general-purpose prompt for content generation + # This helps guide the LLM to generate content rather than, for example, trying to answer a question + # or complete a sentence in a way that's not desired for this feature. + instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\"" + logger.info(f"Constructed prompt for LLM: {instructed_prompt}") + + response_content = _generate_response(instructed_prompt) + + if "Error:" in response_content: + logger.error(f"Failed to generate content using LLM for original prompt \"{prompt}\": {response_content}") + # Return the error message from _generate_response, it's already informative + return response_content + else: + logger.success(f"Successfully generated content for original prompt: \"{prompt}\"") + # .strip() is important to remove leading/trailing whitespace that models sometimes add. + return response_content.strip() \ No newline at end of file diff --git a/test/services/test_llm.py b/test/services/test_llm.py new file mode 100644 index 0000000..fa753db --- /dev/null +++ b/test/services/test_llm.py @@ -0,0 +1,87 @@ +import unittest +from unittest.mock import patch, MagicMock + +# Add the project root to the Python path to allow importing app modules +import sys +import os + +# Calculate the project root directory path based on the current file's location +# __file__ is test/services/test_llm.py +# root_dir should be the parent of 'test' directory +root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) +if root_dir not in sys.path: + sys.path.insert(0, root_dir) # Prepend to sys.path to ensure it's checked first + +from app.services import llm # llm.py is in app/services +from app.config import config # Import the config +# from loguru import logger # Import if direct log assertions are needed and set up + +class TestLlmService(unittest.TestCase): + + def setUp(self): + # Basic configuration setup for tests. + if not hasattr(config, 'app'): # Ensure config.app exists + config.app = {} + config.app['llm_provider'] = 'OpenAI' # Default mock provider + config.app['openai_model_name'] = 'gpt-test-model' + + @patch('app.services.llm._generate_response') + def test_generate_content_success(self, mock_generate_response): + expected_content = "This is the successfully generated content." + mock_generate_response.return_value = expected_content + + prompt = "Tell me a joke." + instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\"" + + actual_content = llm.generate_content(prompt) + + self.assertEqual(actual_content, expected_content.strip()) + mock_generate_response.assert_called_once_with(instructed_prompt) + + @patch('app.services.llm._generate_response') + def test_generate_content_llm_failure(self, mock_generate_response): + error_message_from_llm = "Error: LLM is down. Please try again later." + mock_generate_response.return_value = error_message_from_llm + + prompt = "Summarize War and Peace." + instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\"" + + actual_content = llm.generate_content(prompt) + + self.assertTrue(actual_content.startswith("Error:")) + self.assertIn("LLM is down", actual_content) + mock_generate_response.assert_called_once_with(instructed_prompt) + + @patch('app.services.llm._generate_response') + def test_generate_content_exception_handled_by_generate_response(self, mock_generate_response): + # This test simulates that an exception occurred *inside* _generate_response, + # and _generate_response caught it and returned a formatted error string. + simulated_internal_exception_message = "Internal network timeout within _generate_response" + # _generate_response itself would catch its internal error and return something like this: + error_returned_by_generate_response = f"Error: {simulated_internal_exception_message}" + mock_generate_response.return_value = error_returned_by_generate_response + + prompt = "What is the weather?" + instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\"" + + actual_content = llm.generate_content(prompt) + + self.assertTrue(actual_content.startswith("Error:")) + self.assertIn(simulated_internal_exception_message, actual_content) + mock_generate_response.assert_called_once_with(instructed_prompt) + + @patch('app.services.llm._generate_response') + def test_generate_content_empty_prompt(self, mock_generate_response): + expected_content = "Content for empty prompt." + mock_generate_response.return_value = expected_content + + prompt = "" + instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\"" + + actual_content = llm.generate_content(prompt) + + self.assertEqual(actual_content, expected_content.strip()) + mock_generate_response.assert_called_once_with(instructed_prompt) + +if __name__ == '__main__': + unittest.main() diff --git a/webui/Main.py b/webui/Main.py index 1b55abe..40840b9 100644 --- a/webui/Main.py +++ b/webui/Main.py @@ -66,6 +66,10 @@ if "video_terms" not in st.session_state: st.session_state["video_terms"] = "" if "ui_language" not in st.session_state: st.session_state["ui_language"] = config.ui.get("language", system_locale) +if "generated_content_output" not in st.session_state: + st.session_state["generated_content_output"] = "" +if "content_generation_prompt" not in st.session_state: + st.session_state["content_generation_prompt"] = "" # 加载语言文件 locales = utils.load_locales(i18n_dir) @@ -626,6 +630,32 @@ with middle_panel: options=[1, 2, 3, 4, 5], index=0, ) + + with st.container(border=True): + st.write(tr("Content Generation")) + st.session_state["content_generation_prompt"] = st.text_input( + tr("Enter your prompt"), + value=st.session_state["content_generation_prompt"], + key="content_prompt_input_main" + ) + + if st.button(tr("Generate Content"), key="generate_content_button_main"): + prompt_value = st.session_state["content_generation_prompt"] + if prompt_value: + with st.spinner(tr("Generating content...")): + st.session_state["generated_content_output"] = llm.generate_content(prompt_value) + else: + st.error(tr("Please enter a prompt.")) + st.session_state["generated_content_output"] = "" # Clear previous output + + if st.session_state["generated_content_output"]: + st.text_area( + tr("Generated Content"), + value=st.session_state["generated_content_output"], + height=300, + key="generated_content_display_main" + ) + with st.container(border=True): st.write(tr("Audio Settings"))