增加了审核api
This commit is contained in:
parent
18d60cae93
commit
fa5f4f5a3c
Binary file not shown.
@ -155,7 +155,7 @@ class TweetService:
|
||||
system_prompt, user_prompt = self.prompt_builder.build_judge_prompt(topic, content)
|
||||
|
||||
# 审核内容
|
||||
judged_data = await self.content_judger.judge_content(content, topic)
|
||||
judged_data = await self.content_judger.judge_content_with_prompt(content, topic, system_prompt, user_prompt)
|
||||
judge_success = judged_data.get('judge_success', False)
|
||||
|
||||
# 生成请求ID
|
||||
@ -213,7 +213,8 @@ class TweetService:
|
||||
continue
|
||||
|
||||
try:
|
||||
judged_data = await self.content_judger.judge_content(content, topic)
|
||||
system_prompt, user_prompt = self.prompt_builder.build_judge_prompt(topic, content)
|
||||
judged_data = await self.content_judger.judge_content_with_prompt(content, topic, system_prompt, user_prompt)
|
||||
judged_contents[topic_index] = judged_data
|
||||
except Exception as e:
|
||||
logger.critical(f"为选题 {topic_index} 处理内容审核时发生意外错误: {e}", exc_info=True)
|
||||
|
||||
BIN
poster/__pycache__/text_generator.cpython-312.pyc
Normal file
BIN
poster/__pycache__/text_generator.cpython-312.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -7,7 +7,7 @@
|
||||
|
||||
import logging
|
||||
import json
|
||||
from typing import Dict, Any
|
||||
from typing import Dict, Any, Tuple, Optional
|
||||
|
||||
from core.ai import AIAgent
|
||||
from core.config import ConfigManager, GenerateTopicConfig, GenerateContentConfig
|
||||
@ -85,3 +85,58 @@ class ContentGenerator:
|
||||
else:
|
||||
logger.error(f"解析内容JSON失败 for {topic_index}")
|
||||
return {"error": "JSONDecodeError", "raw_content": raw_result}
|
||||
|
||||
async def generate_content_with_prompt(self, topic: Dict[str, Any], system_prompt: str, user_prompt: str) -> Dict[str, Any]:
|
||||
"""
|
||||
使用已构建的提示词生成内容
|
||||
|
||||
Args:
|
||||
topic: 选题信息字典
|
||||
system_prompt: 已构建好的系统提示词
|
||||
user_prompt: 已构建好的用户提示词
|
||||
|
||||
Returns:
|
||||
包含生成内容的字典
|
||||
"""
|
||||
topic_index = topic.get('index', 'N/A')
|
||||
logger.info(f"使用预构建提示词为选题 {topic_index} 生成内容...")
|
||||
|
||||
# 保存提示以供调试
|
||||
output_dir = self.output_manager.get_topic_dir(topic_index)
|
||||
self.output_manager.save_text(system_prompt, "content_system_prompt.txt", subdir=output_dir.name)
|
||||
self.output_manager.save_text(user_prompt, "content_user_prompt.txt", subdir=output_dir.name)
|
||||
|
||||
# 获取模型参数
|
||||
model_params = {}
|
||||
if hasattr(self.content_config, 'model') and isinstance(self.content_config.model, dict):
|
||||
model_params = {
|
||||
'temperature': self.content_config.model.get('temperature'),
|
||||
'top_p': self.content_config.model.get('top_p'),
|
||||
'presence_penalty': self.content_config.model.get('presence_penalty')
|
||||
}
|
||||
# 移除None值
|
||||
model_params = {k: v for k, v in model_params.items() if v is not None}
|
||||
|
||||
# 调用AI
|
||||
try:
|
||||
raw_result, _, _, _ = await self.ai_agent.generate_text(
|
||||
system_prompt=system_prompt,
|
||||
user_prompt=user_prompt,
|
||||
use_stream=True,
|
||||
stage="内容生成",
|
||||
**model_params
|
||||
)
|
||||
self.output_manager.save_text(raw_result, "content_raw_response.txt", subdir=output_dir.name)
|
||||
except Exception as e:
|
||||
logger.critical(f"为选题 {topic_index} 生成内容时AI调用失败: {e}", exc_info=True)
|
||||
return {"error": str(e)}
|
||||
|
||||
# 解析和保存结果
|
||||
content_data = process_llm_json_text(raw_result)
|
||||
if content_data:
|
||||
self.output_manager.save_json(content_data, "article.json", subdir=output_dir.name)
|
||||
logger.info(f"成功为选题 {topic_index} 生成并保存内容。")
|
||||
return content_data
|
||||
else:
|
||||
logger.error(f"解析内容JSON失败 for {topic_index}")
|
||||
return {"error": "JSONDecodeError", "raw_content": raw_result}
|
||||
@ -128,3 +128,85 @@ class ContentJudger:
|
||||
else:
|
||||
logger.warning(f"审核响应JSON格式不正确或缺少键")
|
||||
return {"judge_success": False, "error": "Invalid JSON response", "raw_response": raw_result}
|
||||
|
||||
async def judge_content_with_prompt(self, generated_content: Union[str, Dict[str, Any]], topic: Dict[str, Any], system_prompt: str, user_prompt: str) -> Dict[str, Any]:
|
||||
"""
|
||||
使用预构建的提示词审核生成的内容
|
||||
|
||||
Args:
|
||||
generated_content: 已生成的原始内容(JSON字符串或字典对象)
|
||||
topic: 与内容相关的原始选题字典
|
||||
system_prompt: 系统提示词
|
||||
user_prompt: 用户提示词
|
||||
|
||||
Returns:
|
||||
一个包含审核结果的字典
|
||||
"""
|
||||
logger.info("开始使用预构建提示词审核生成的内容...")
|
||||
|
||||
# 获取主题索引,用于保存文件
|
||||
topic_index = topic.get('index', 'unknown')
|
||||
topic_dir = f"topic_{topic_index}"
|
||||
|
||||
# 从原始内容中提取tag
|
||||
original_tag = []
|
||||
original_content = process_llm_json_text(generated_content)
|
||||
if original_content and isinstance(original_content, dict) and "tag" in original_content:
|
||||
original_tag = original_content.get("tag", [])
|
||||
logger.info(f"从原始内容中提取到标签: {original_tag}")
|
||||
else:
|
||||
logger.warning("从原始内容提取标签失败")
|
||||
|
||||
# 保存提示词
|
||||
if self.output_manager:
|
||||
self.output_manager.save_text(system_prompt, f"{topic_dir}/judger_system_prompt.txt")
|
||||
self.output_manager.save_text(user_prompt, f"{topic_dir}/judger_user_prompt.txt")
|
||||
|
||||
# 获取模型参数
|
||||
model_params = {}
|
||||
if hasattr(self.content_config, 'judger_model') and isinstance(self.content_config.judger_model, dict):
|
||||
model_params = {
|
||||
'temperature': self.content_config.judger_model.get('temperature'),
|
||||
'top_p': self.content_config.judger_model.get('top_p'),
|
||||
'presence_penalty': self.content_config.judger_model.get('presence_penalty')
|
||||
}
|
||||
# 移除None值
|
||||
model_params = {k: v for k, v in model_params.items() if v is not None}
|
||||
|
||||
# 2. 调用AI进行审核
|
||||
try:
|
||||
raw_result, _, _, _ = await self.ai_agent.generate_text(
|
||||
system_prompt=system_prompt,
|
||||
user_prompt=user_prompt,
|
||||
use_stream=True,
|
||||
stage="内容审核",
|
||||
**model_params
|
||||
)
|
||||
|
||||
# 保存原始响应
|
||||
if self.output_manager:
|
||||
self.output_manager.save_text(raw_result, f"{topic_dir}/judger_raw_response.txt")
|
||||
|
||||
except Exception as e:
|
||||
logger.critical(f"内容审核时AI调用失败: {e}", exc_info=True)
|
||||
return {"judge_success": False, "error": str(e)}
|
||||
|
||||
# 3. 解析结果
|
||||
judged_data = process_llm_json_text(raw_result)
|
||||
if judged_data and isinstance(judged_data, dict) and "title" in judged_data and "content" in judged_data:
|
||||
judged_data["judge_success"] = True
|
||||
|
||||
# 直接使用原始内容中的标签
|
||||
if original_tag:
|
||||
judged_data["tag"] = original_tag
|
||||
# 如果原始内容中没有标签,则使用默认标签
|
||||
logger.info(f"内容审核成功完成,使用标签: {judged_data.get('tag', [])}")
|
||||
|
||||
# 保存审核后的内容
|
||||
if self.output_manager:
|
||||
self.output_manager.save_json(judged_data, f"{topic_dir}/article_judged.json")
|
||||
|
||||
return judged_data
|
||||
else:
|
||||
logger.warning(f"审核响应JSON格式不正确或缺少键")
|
||||
return {"judge_success": False, "error": "Invalid JSON response", "raw_response": raw_result}
|
||||
@ -6,7 +6,7 @@
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional
|
||||
from typing import Dict, Any, List, Optional, Tuple
|
||||
|
||||
from core.ai import AIAgent
|
||||
from core.config import ConfigManager, GenerateTopicConfig
|
||||
@ -91,4 +91,58 @@ class TopicGenerator:
|
||||
|
||||
return topics
|
||||
|
||||
async def generate_topics_with_prompt(self, system_prompt: str, user_prompt: str) -> Optional[List[Dict[str, Any]]]:
|
||||
"""
|
||||
使用预构建的提示词生成选题
|
||||
|
||||
Args:
|
||||
system_prompt: 已构建好的系统提示词
|
||||
user_prompt: 已构建好的用户提示词
|
||||
|
||||
Returns:
|
||||
生成的选题列表,如果失败则返回None
|
||||
"""
|
||||
logger.info("使用预构建提示词开始执行选题生成流程...")
|
||||
|
||||
# 保存提示以供调试
|
||||
self.output_manager.save_text(system_prompt, "topic_system_prompt.txt")
|
||||
self.output_manager.save_text(user_prompt, "topic_user_prompt.txt")
|
||||
|
||||
# 获取模型参数
|
||||
model_params = {}
|
||||
if hasattr(self.config, 'model') and isinstance(self.config.model, dict):
|
||||
model_params = {
|
||||
'temperature': self.config.model.get('temperature'),
|
||||
'top_p': self.config.model.get('top_p'),
|
||||
'presence_penalty': self.config.model.get('presence_penalty')
|
||||
}
|
||||
# 移除None值
|
||||
model_params = {k: v for k, v in model_params.items() if v is not None}
|
||||
|
||||
# 调用AI生成
|
||||
try:
|
||||
raw_result, _, _, _ = await self.ai_agent.generate_text(
|
||||
system_prompt=system_prompt,
|
||||
user_prompt=user_prompt,
|
||||
use_stream=True,
|
||||
stage="选题生成",
|
||||
**model_params
|
||||
)
|
||||
self.output_manager.save_text(raw_result, "topics_raw_response.txt")
|
||||
except Exception as e:
|
||||
logger.critical(f"AI调用失败,无法生成选题: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
# 解析结果
|
||||
topics = self.parser.parse(raw_result)
|
||||
if not topics:
|
||||
logger.error("未能从AI响应中解析出任何有效选题")
|
||||
return None
|
||||
|
||||
# 保存结果
|
||||
self.output_manager.save_json(topics, "topics.json")
|
||||
logger.info(f"成功生成并保存 {len(topics)} 个选题")
|
||||
|
||||
return topics
|
||||
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user