TravelContentCreator/tweet/content_generator.py
2025-07-11 16:54:07 +08:00

142 lines
5.9 KiB
Python

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
内容生成模块
"""
import logging
import json
from typing import Dict, Any, Tuple, Optional
from core.ai import AIAgent
from core.config import ConfigManager, GenerateTopicConfig, GenerateContentConfig
from utils.prompts import ContentPromptBuilder
from utils.file_io import OutputManager, process_llm_json_text
logger = logging.getLogger(__name__)
class ContentGenerator:
"""负责为单个选题生成内容"""
def __init__(self, ai_agent: AIAgent, config_manager: ConfigManager, output_manager: OutputManager):
self.ai_agent = ai_agent
self.config_manager = config_manager
self.topic_config = config_manager.get_config('topic_gen', GenerateTopicConfig)
self.content_config = config_manager.get_config('content_gen', GenerateContentConfig)
self.output_manager = output_manager
self.prompt_builder = ContentPromptBuilder(config_manager)
async def generate_content_for_topic(self, topic: Dict[str, Any]) -> Dict[str, Any]:
"""
为单个选题生成内容
Args:
topic: 选题信息字典
Returns:
包含生成内容的字典
"""
topic_index = topic.get('index', 'N/A')
logger.info(f"开始为选题 {topic_index} 生成内容...")
# 1. 构建提示
# 使用模板构建器分别获取系统和用户提示
system_prompt = self.prompt_builder.get_system_prompt()
user_prompt = self.prompt_builder.build_user_prompt(topic=topic)
# 保存提示以供调试
output_dir = self.output_manager.get_topic_dir(topic_index)
self.output_manager.save_text(system_prompt, "content_system_prompt.txt", subdir=output_dir.name)
self.output_manager.save_text(user_prompt, "content_user_prompt.txt", subdir=output_dir.name)
# 获取模型参数
model_params = {}
if hasattr(self.content_config, 'model') and isinstance(self.content_config.model, dict):
model_params = {
'temperature': self.content_config.model.get('temperature'),
'top_p': self.content_config.model.get('top_p'),
'presence_penalty': self.content_config.model.get('presence_penalty')
}
# 移除None值
model_params = {k: v for k, v in model_params.items() if v is not None}
# 2. 调用AI
try:
raw_result, _, _, _ = await self.ai_agent.generate_text(
system_prompt=system_prompt,
user_prompt=user_prompt,
use_stream=True,
stage="内容生成",
**model_params
)
self.output_manager.save_text(raw_result, "content_raw_response.txt", subdir=output_dir.name)
except Exception as e:
logger.critical(f"为选题 {topic_index} 生成内容时AI调用失败: {e}", exc_info=True)
return {"error": str(e)}
# 3. 解析和保存结果
content_data = process_llm_json_text(raw_result)
if content_data:
self.output_manager.save_json(content_data, "article.json", subdir=output_dir.name)
logger.info(f"成功为选题 {topic_index} 生成并保存内容。")
return content_data
else:
logger.error(f"解析内容JSON失败 for {topic_index}")
return {"error": "JSONDecodeError", "raw_content": raw_result}
async def generate_content_with_prompt(self, topic: Dict[str, Any], system_prompt: str, user_prompt: str) -> Dict[str, Any]:
"""
使用已构建的提示词生成内容
Args:
topic: 选题信息字典
system_prompt: 已构建好的系统提示词
user_prompt: 已构建好的用户提示词
Returns:
包含生成内容的字典
"""
topic_index = topic.get('index', 'N/A')
logger.info(f"使用预构建提示词为选题 {topic_index} 生成内容...")
# 保存提示以供调试
output_dir = self.output_manager.get_topic_dir(topic_index)
self.output_manager.save_text(system_prompt, "content_system_prompt.txt", subdir=output_dir.name)
self.output_manager.save_text(user_prompt, "content_user_prompt.txt", subdir=output_dir.name)
# 获取模型参数
model_params = {}
if hasattr(self.content_config, 'model') and isinstance(self.content_config.model, dict):
model_params = {
'temperature': self.content_config.model.get('temperature'),
'top_p': self.content_config.model.get('top_p'),
'presence_penalty': self.content_config.model.get('presence_penalty')
}
# 移除None值
model_params = {k: v for k, v in model_params.items() if v is not None}
# 调用AI
try:
raw_result, _, _, _ = await self.ai_agent.generate_text(
system_prompt=system_prompt,
user_prompt=user_prompt,
use_stream=True,
stage="内容生成",
**model_params
)
self.output_manager.save_text(raw_result, "content_raw_response.txt", subdir=output_dir.name)
except Exception as e:
logger.critical(f"为选题 {topic_index} 生成内容时AI调用失败: {e}", exc_info=True)
return {"error": str(e)}
# 解析和保存结果
content_data = process_llm_json_text(raw_result)
if content_data:
self.output_manager.save_json(content_data, "article.json", subdir=output_dir.name)
logger.info(f"成功为选题 {topic_index} 生成并保存内容。")
return content_data
else:
logger.error(f"解析内容JSON失败 for {topic_index}")
return {"error": "JSONDecodeError", "raw_content": raw_result}