mirror of
https://github.com/harry0703/MoneyPrinterTurbo.git
synced 2026-02-25 02:17:21 +08:00
Here's a summary of the changes:
1. **Web UI (`webui/Main.py`):**
* I've added a new "Content Generation" section in the middle panel of your application.
* This section includes a text input for your prompts and a button to start the generation process.
* The generated content will be displayed in a text area.
* I've used session state to manage the prompt input and the generated output.
* Localization has been integrated using the `tr()` function.
2. **LLM Service (`app/services/llm.py`):**
* I introduced a new function `generate_content(prompt: str) -> str`.
* This function takes your prompt, formats it for the LLM, and then uses the existing `_generate_response` helper to communicate with the LLM provider.
* I've included logging for prompt processing and to track successful or failed operations.
* Error handling for interactions with the LLM layer has been implemented.
3. **Unit Tests (`test/services/test_llm.py`):**
* I created a new test file specifically for the LLM service.
* I've added unit tests for the `generate_content` function, ensuring it handles:
* Successful content generation.
* Failures resulting from LLM errors.
* The scenario of empty prompts.
* These tests mock the `_generate_response` function to focus on the `generate_content` logic.
* All tests are currently passing.
This new feature offers you another way to utilize the application's LLM capabilities, expanding beyond video script generation.
88 lines
4.0 KiB
Python
88 lines
4.0 KiB
Python
import unittest
|
|
from unittest.mock import patch, MagicMock
|
|
|
|
# Add the project root to the Python path to allow importing app modules
|
|
import sys
|
|
import os
|
|
|
|
# Calculate the project root directory path based on the current file's location
|
|
# __file__ is test/services/test_llm.py
|
|
# root_dir should be the parent of 'test' directory
|
|
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
|
|
if root_dir not in sys.path:
|
|
sys.path.insert(0, root_dir) # Prepend to sys.path to ensure it's checked first
|
|
|
|
from app.services import llm # llm.py is in app/services
|
|
from app.config import config # Import the config
|
|
# from loguru import logger # Import if direct log assertions are needed and set up
|
|
|
|
class TestLlmService(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
# Basic configuration setup for tests.
|
|
if not hasattr(config, 'app'): # Ensure config.app exists
|
|
config.app = {}
|
|
config.app['llm_provider'] = 'OpenAI' # Default mock provider
|
|
config.app['openai_model_name'] = 'gpt-test-model'
|
|
|
|
@patch('app.services.llm._generate_response')
|
|
def test_generate_content_success(self, mock_generate_response):
|
|
expected_content = "This is the successfully generated content."
|
|
mock_generate_response.return_value = expected_content
|
|
|
|
prompt = "Tell me a joke."
|
|
instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\""
|
|
|
|
actual_content = llm.generate_content(prompt)
|
|
|
|
self.assertEqual(actual_content, expected_content.strip())
|
|
mock_generate_response.assert_called_once_with(instructed_prompt)
|
|
|
|
@patch('app.services.llm._generate_response')
|
|
def test_generate_content_llm_failure(self, mock_generate_response):
|
|
error_message_from_llm = "Error: LLM is down. Please try again later."
|
|
mock_generate_response.return_value = error_message_from_llm
|
|
|
|
prompt = "Summarize War and Peace."
|
|
instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\""
|
|
|
|
actual_content = llm.generate_content(prompt)
|
|
|
|
self.assertTrue(actual_content.startswith("Error:"))
|
|
self.assertIn("LLM is down", actual_content)
|
|
mock_generate_response.assert_called_once_with(instructed_prompt)
|
|
|
|
@patch('app.services.llm._generate_response')
|
|
def test_generate_content_exception_handled_by_generate_response(self, mock_generate_response):
|
|
# This test simulates that an exception occurred *inside* _generate_response,
|
|
# and _generate_response caught it and returned a formatted error string.
|
|
simulated_internal_exception_message = "Internal network timeout within _generate_response"
|
|
# _generate_response itself would catch its internal error and return something like this:
|
|
error_returned_by_generate_response = f"Error: {simulated_internal_exception_message}"
|
|
mock_generate_response.return_value = error_returned_by_generate_response
|
|
|
|
prompt = "What is the weather?"
|
|
instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\""
|
|
|
|
actual_content = llm.generate_content(prompt)
|
|
|
|
self.assertTrue(actual_content.startswith("Error:"))
|
|
self.assertIn(simulated_internal_exception_message, actual_content)
|
|
mock_generate_response.assert_called_once_with(instructed_prompt)
|
|
|
|
@patch('app.services.llm._generate_response')
|
|
def test_generate_content_empty_prompt(self, mock_generate_response):
|
|
expected_content = "Content for empty prompt."
|
|
mock_generate_response.return_value = expected_content
|
|
|
|
prompt = ""
|
|
instructed_prompt = f"Please generate detailed content based on the following topic or instruction: \"{prompt}\""
|
|
|
|
actual_content = llm.generate_content(prompt)
|
|
|
|
self.assertEqual(actual_content, expected_content.strip())
|
|
mock_generate_response.assert_called_once_with(instructed_prompt)
|
|
|
|
if __name__ == '__main__':
|
|
unittest.main()
|