mirror of
https://github.com/harry0703/MoneyPrinterTurbo.git
synced 2026-02-21 16:37:21 +08:00
Merge branch 'main' of github.com:elf-mouse/MoneyPrinterTurbo
This commit is contained in:
commit
1dbfcfadab
23
README.md
23
README.md
@ -242,8 +242,8 @@ python main.py
|
||||
|
||||
当前支持2种字幕生成方式:
|
||||
|
||||
- edge: 生成速度更快,性能更好,对电脑配置没有要求,但是质量可能不稳定
|
||||
- whisper: 生成速度较慢,性能较差,对电脑配置有一定要求,但是质量更可靠。
|
||||
- **edge**: 生成`速度快`,性能更好,对电脑配置没有要求,但是质量可能不稳定
|
||||
- **whisper**: 生成`速度慢`,性能较差,对电脑配置有一定要求,但是`质量更可靠`。
|
||||
|
||||
可以修改 `config.toml` 配置文件中的 `subtitle_provider` 进行切换
|
||||
|
||||
@ -253,6 +253,25 @@ python main.py
|
||||
1. whisper 模式下需要到 HuggingFace 下载一个模型文件,大约 3GB 左右,请确保网络通畅
|
||||
2. 如果留空,表示不生成字幕。
|
||||
|
||||
> 由于国内无法访问 HuggingFace,可以使用以下方法下载 `whisper-large-v3` 的模型文件
|
||||
|
||||
下载地址:
|
||||
- 百度网盘: https://pan.baidu.com/s/11h3Q6tsDtjQKTjUu3sc5cA?pwd=xjs9
|
||||
- 夸克网盘:https://pan.quark.cn/s/3ee3d991d64b
|
||||
|
||||
模型下载后解压,整个目录放到 `.\MoneyPrinterTurbo\models` 里面,
|
||||
最终的文件路径应该是这样: `.\MoneyPrinterTurbo\models\whisper-large-v3`
|
||||
```
|
||||
MoneyPrinterTurbo
|
||||
├─models
|
||||
│ └─whisper-large-v3
|
||||
│ config.json
|
||||
│ model.bin
|
||||
│ preprocessor_config.json
|
||||
│ tokenizer.json
|
||||
│ vocabulary.json
|
||||
```
|
||||
|
||||
## 背景音乐 🎵
|
||||
|
||||
用于视频的背景音乐,位于项目的 `resource/songs` 目录下。
|
||||
|
||||
@ -76,13 +76,27 @@ def _generate_response(prompt: str) -> str:
|
||||
|
||||
if llm_provider == "qwen":
|
||||
import dashscope
|
||||
from dashscope.api_entities.dashscope_response import GenerationResponse
|
||||
dashscope.api_key = api_key
|
||||
response = dashscope.Generation.call(
|
||||
model=model_name,
|
||||
messages=[{"role": "user", "content": prompt}]
|
||||
)
|
||||
content = response["output"]["text"]
|
||||
return content.replace("\n", "")
|
||||
if response:
|
||||
if isinstance(response, GenerationResponse):
|
||||
status_code = response.status_code
|
||||
if status_code != 200:
|
||||
raise Exception(
|
||||
f"[{llm_provider}] returned an error response: \"{response}\"")
|
||||
|
||||
content = response["output"]["text"]
|
||||
return content.replace("\n", "")
|
||||
else:
|
||||
raise Exception(
|
||||
f"[{llm_provider}] returned an invalid response: \"{response}\"")
|
||||
else:
|
||||
raise Exception(
|
||||
f"[{llm_provider}] returned an empty response")
|
||||
|
||||
if llm_provider == "gemini":
|
||||
import google.generativeai as genai
|
||||
@ -130,13 +144,13 @@ def _generate_response(prompt: str) -> str:
|
||||
if llm_provider == "cloudflare":
|
||||
import requests
|
||||
response = requests.post(
|
||||
f"https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/run/{model_name}",
|
||||
f"https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/run/{model_name}",
|
||||
headers={"Authorization": f"Bearer {api_key}"},
|
||||
json={
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a friendly assistant"},
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a friendly assistant"},
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
}
|
||||
)
|
||||
result = response.json()
|
||||
|
||||
Loading…
Reference in New Issue
Block a user