195 lines
16 KiB
JSON
195 lines
16 KiB
JSON
{
|
|
"file_path": "tweet/content_generator.py",
|
|
"file_size": 5483,
|
|
"line_count": 141,
|
|
"functions": [
|
|
{
|
|
"name": "__init__",
|
|
"line_start": 23,
|
|
"line_end": 29,
|
|
"args": [
|
|
{
|
|
"name": "self"
|
|
},
|
|
{
|
|
"name": "ai_agent",
|
|
"type_hint": "AIAgent"
|
|
},
|
|
{
|
|
"name": "config_manager",
|
|
"type_hint": "ConfigManager"
|
|
},
|
|
{
|
|
"name": "output_manager",
|
|
"type_hint": "OutputManager"
|
|
}
|
|
],
|
|
"return_type": null,
|
|
"docstring": "",
|
|
"is_async": false,
|
|
"decorators": [],
|
|
"code": " def __init__(self, ai_agent: AIAgent, config_manager: ConfigManager, output_manager: OutputManager):\n self.ai_agent = ai_agent\n self.config_manager = config_manager\n self.topic_config = config_manager.get_config('topic_gen', GenerateTopicConfig)\n self.content_config = config_manager.get_config('content_gen', GenerateContentConfig)\n self.output_manager = output_manager\n self.prompt_builder = ContentPromptBuilder(config_manager)",
|
|
"code_hash": "88e6d8e8fb5faaf9bf4848e2de5c7170"
|
|
}
|
|
],
|
|
"classes": [
|
|
{
|
|
"name": "ContentGenerator",
|
|
"line_start": 20,
|
|
"line_end": 142,
|
|
"bases": [],
|
|
"methods": [
|
|
{
|
|
"name": "__init__",
|
|
"line_start": 23,
|
|
"line_end": 29,
|
|
"args": [
|
|
{
|
|
"name": "self"
|
|
},
|
|
{
|
|
"name": "ai_agent",
|
|
"type_hint": "AIAgent"
|
|
},
|
|
{
|
|
"name": "config_manager",
|
|
"type_hint": "ConfigManager"
|
|
},
|
|
{
|
|
"name": "output_manager",
|
|
"type_hint": "OutputManager"
|
|
}
|
|
],
|
|
"return_type": null,
|
|
"docstring": "",
|
|
"is_async": false,
|
|
"decorators": [],
|
|
"code": " def __init__(self, ai_agent: AIAgent, config_manager: ConfigManager, output_manager: OutputManager):\n self.ai_agent = ai_agent\n self.config_manager = config_manager\n self.topic_config = config_manager.get_config('topic_gen', GenerateTopicConfig)\n self.content_config = config_manager.get_config('content_gen', GenerateContentConfig)\n self.output_manager = output_manager\n self.prompt_builder = ContentPromptBuilder(config_manager)",
|
|
"code_hash": "88e6d8e8fb5faaf9bf4848e2de5c7170"
|
|
},
|
|
{
|
|
"name": "generate_content_for_topic",
|
|
"line_start": 31,
|
|
"line_end": 87,
|
|
"args": [
|
|
{
|
|
"name": "self"
|
|
},
|
|
{
|
|
"name": "topic",
|
|
"type_hint": "Dict[str, Any]"
|
|
}
|
|
],
|
|
"return_type": "Dict[str, Any]",
|
|
"docstring": "为单个选题生成内容\n\nArgs:\n topic: 选题信息字典\n\nReturns:\n 包含生成内容的字典",
|
|
"is_async": true,
|
|
"decorators": [],
|
|
"code": " async def generate_content_for_topic(self, topic: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n 为单个选题生成内容\n\n Args:\n topic: 选题信息字典\n\n Returns:\n 包含生成内容的字典\n \"\"\"\n topic_index = topic.get('index', 'N/A')\n logger.info(f\"开始为选题 {topic_index} 生成内容...\")\n\n # 1. 构建提示\n # 使用模板构建器分别获取系统和用户提示\n system_prompt = self.prompt_builder.get_system_prompt()\n user_prompt = self.prompt_builder.build_user_prompt(topic=topic)\n \n # 保存提示以供调试\n output_dir = self.output_manager.get_topic_dir(topic_index)\n self.output_manager.save_text(system_prompt, \"content_system_prompt.txt\", subdir=output_dir.name)\n self.output_manager.save_text(user_prompt, \"content_user_prompt.txt\", subdir=output_dir.name)\n\n # 获取模型参数\n model_params = {}\n if hasattr(self.content_config, 'model') and isinstance(self.content_config.model, dict):\n model_params = {\n 'temperature': self.content_config.model.get('temperature'),\n 'top_p': self.content_config.model.get('top_p'),\n 'presence_penalty': self.content_config.model.get('presence_penalty')\n }\n # 移除None值\n model_params = {k: v for k, v in model_params.items() if v is not None}\n\n # 2. 调用AI\n try:\n raw_result, _, _, _ = await self.ai_agent.generate_text(\n system_prompt=system_prompt,\n user_prompt=user_prompt,\n use_stream=True,\n stage=\"内容生成\",\n **model_params\n )\n self.output_manager.save_text(raw_result, \"content_raw_response.txt\", subdir=output_dir.name)\n except Exception as e:\n logger.critical(f\"为选题 {topic_index} 生成内容时AI调用失败: {e}\", exc_info=True)\n return {\"error\": str(e)}\n\n # 3. 解析和保存结果\n content_data = process_llm_json_text(raw_result)\n if content_data:\n self.output_manager.save_json(content_data, \"article.json\", subdir=output_dir.name)\n logger.info(f\"成功为选题 {topic_index} 生成并保存内容。\")\n return content_data\n else:\n logger.error(f\"解析内容JSON失败 for {topic_index}\")\n return {\"error\": \"JSONDecodeError\", \"raw_content\": raw_result}",
|
|
"code_hash": "8ccf599fb9a56453241a2b5a6ecc803c"
|
|
},
|
|
{
|
|
"name": "generate_content_with_prompt",
|
|
"line_start": 89,
|
|
"line_end": 142,
|
|
"args": [
|
|
{
|
|
"name": "self"
|
|
},
|
|
{
|
|
"name": "topic",
|
|
"type_hint": "Dict[str, Any]"
|
|
},
|
|
{
|
|
"name": "system_prompt",
|
|
"type_hint": "str"
|
|
},
|
|
{
|
|
"name": "user_prompt",
|
|
"type_hint": "str"
|
|
}
|
|
],
|
|
"return_type": "Dict[str, Any]",
|
|
"docstring": "使用已构建的提示词生成内容\n\nArgs:\n topic: 选题信息字典\n system_prompt: 已构建好的系统提示词\n user_prompt: 已构建好的用户提示词\n\nReturns:\n 包含生成内容的字典",
|
|
"is_async": true,
|
|
"decorators": [],
|
|
"code": " async def generate_content_with_prompt(self, topic: Dict[str, Any], system_prompt: str, user_prompt: str) -> Dict[str, Any]:\n \"\"\"\n 使用已构建的提示词生成内容\n\n Args:\n topic: 选题信息字典\n system_prompt: 已构建好的系统提示词\n user_prompt: 已构建好的用户提示词\n\n Returns:\n 包含生成内容的字典\n \"\"\"\n topic_index = topic.get('index', 'N/A')\n logger.info(f\"使用预构建提示词为选题 {topic_index} 生成内容...\")\n \n # 保存提示以供调试\n output_dir = self.output_manager.get_topic_dir(topic_index)\n self.output_manager.save_text(system_prompt, \"content_system_prompt.txt\", subdir=output_dir.name)\n self.output_manager.save_text(user_prompt, \"content_user_prompt.txt\", subdir=output_dir.name)\n\n # 获取模型参数\n model_params = {}\n if hasattr(self.content_config, 'model') and isinstance(self.content_config.model, dict):\n model_params = {\n 'temperature': self.content_config.model.get('temperature'),\n 'top_p': self.content_config.model.get('top_p'),\n 'presence_penalty': self.content_config.model.get('presence_penalty')\n }\n # 移除None值\n model_params = {k: v for k, v in model_params.items() if v is not None}\n\n # 调用AI\n try:\n raw_result, _, _, _ = await self.ai_agent.generate_text(\n system_prompt=system_prompt,\n user_prompt=user_prompt,\n use_stream=True,\n stage=\"内容生成\",\n **model_params\n )\n self.output_manager.save_text(raw_result, \"content_raw_response.txt\", subdir=output_dir.name)\n except Exception as e:\n logger.critical(f\"为选题 {topic_index} 生成内容时AI调用失败: {e}\", exc_info=True)\n return {\"error\": str(e)}\n\n # 解析和保存结果\n content_data = process_llm_json_text(raw_result)\n if content_data:\n self.output_manager.save_json(content_data, \"article.json\", subdir=output_dir.name)\n logger.info(f\"成功为选题 {topic_index} 生成并保存内容。\")\n return content_data\n else:\n logger.error(f\"解析内容JSON失败 for {topic_index}\")\n return {\"error\": \"JSONDecodeError\", \"raw_content\": raw_result} ",
|
|
"code_hash": "2d7576b90b1b5c5f717424569945ab93"
|
|
}
|
|
],
|
|
"docstring": "负责为单个选题生成内容",
|
|
"decorators": [],
|
|
"code": "class ContentGenerator:\n \"\"\"负责为单个选题生成内容\"\"\"\n\n def __init__(self, ai_agent: AIAgent, config_manager: ConfigManager, output_manager: OutputManager):\n self.ai_agent = ai_agent\n self.config_manager = config_manager\n self.topic_config = config_manager.get_config('topic_gen', GenerateTopicConfig)\n self.content_config = config_manager.get_config('content_gen', GenerateContentConfig)\n self.output_manager = output_manager\n self.prompt_builder = ContentPromptBuilder(config_manager)\n\n async def generate_content_for_topic(self, topic: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n 为单个选题生成内容\n\n Args:\n topic: 选题信息字典\n\n Returns:\n 包含生成内容的字典\n \"\"\"\n topic_index = topic.get('index', 'N/A')\n logger.info(f\"开始为选题 {topic_index} 生成内容...\")\n\n # 1. 构建提示\n # 使用模板构建器分别获取系统和用户提示\n system_prompt = self.prompt_builder.get_system_prompt()\n user_prompt = self.prompt_builder.build_user_prompt(topic=topic)\n \n # 保存提示以供调试\n output_dir = self.output_manager.get_topic_dir(topic_index)\n self.output_manager.save_text(system_prompt, \"content_system_prompt.txt\", subdir=output_dir.name)\n self.output_manager.save_text(user_prompt, \"content_user_prompt.txt\", subdir=output_dir.name)\n\n # 获取模型参数\n model_params = {}\n if hasattr(self.content_config, 'model') and isinstance(self.content_config.model, dict):\n model_params = {\n 'temperature': self.content_config.model.get('temperature'),\n 'top_p': self.content_config.model.get('top_p'),\n 'presence_penalty': self.content_config.model.get('presence_penalty')\n }\n # 移除None值\n model_params = {k: v for k, v in model_params.items() if v is not None}\n\n # 2. 调用AI\n try:\n raw_result, _, _, _ = await self.ai_agent.generate_text(\n system_prompt=system_prompt,\n user_prompt=user_prompt,\n use_stream=True,\n stage=\"内容生成\",\n **model_params\n )\n self.output_manager.save_text(raw_result, \"content_raw_response.txt\", subdir=output_dir.name)\n except Exception as e:\n logger.critical(f\"为选题 {topic_index} 生成内容时AI调用失败: {e}\", exc_info=True)\n return {\"error\": str(e)}\n\n # 3. 解析和保存结果\n content_data = process_llm_json_text(raw_result)\n if content_data:\n self.output_manager.save_json(content_data, \"article.json\", subdir=output_dir.name)\n logger.info(f\"成功为选题 {topic_index} 生成并保存内容。\")\n return content_data\n else:\n logger.error(f\"解析内容JSON失败 for {topic_index}\")\n return {\"error\": \"JSONDecodeError\", \"raw_content\": raw_result}\n \n async def generate_content_with_prompt(self, topic: Dict[str, Any], system_prompt: str, user_prompt: str) -> Dict[str, Any]:\n \"\"\"\n 使用已构建的提示词生成内容\n\n Args:\n topic: 选题信息字典\n system_prompt: 已构建好的系统提示词\n user_prompt: 已构建好的用户提示词\n\n Returns:\n 包含生成内容的字典\n \"\"\"\n topic_index = topic.get('index', 'N/A')\n logger.info(f\"使用预构建提示词为选题 {topic_index} 生成内容...\")\n \n # 保存提示以供调试\n output_dir = self.output_manager.get_topic_dir(topic_index)\n self.output_manager.save_text(system_prompt, \"content_system_prompt.txt\", subdir=output_dir.name)\n self.output_manager.save_text(user_prompt, \"content_user_prompt.txt\", subdir=output_dir.name)\n\n # 获取模型参数\n model_params = {}\n if hasattr(self.content_config, 'model') and isinstance(self.content_config.model, dict):\n model_params = {\n 'temperature': self.content_config.model.get('temperature'),\n 'top_p': self.content_config.model.get('top_p'),\n 'presence_penalty': self.content_config.model.get('presence_penalty')\n }\n # 移除None值\n model_params = {k: v for k, v in model_params.items() if v is not None}\n\n # 调用AI\n try:\n raw_result, _, _, _ = await self.ai_agent.generate_text(\n system_prompt=system_prompt,\n user_prompt=user_prompt,\n use_stream=True,\n stage=\"内容生成\",\n **model_params\n )\n self.output_manager.save_text(raw_result, \"content_raw_response.txt\", subdir=output_dir.name)\n except Exception as e:\n logger.critical(f\"为选题 {topic_index} 生成内容时AI调用失败: {e}\", exc_info=True)\n return {\"error\": str(e)}\n\n # 解析和保存结果\n content_data = process_llm_json_text(raw_result)\n if content_data:\n self.output_manager.save_json(content_data, \"article.json\", subdir=output_dir.name)\n logger.info(f\"成功为选题 {topic_index} 生成并保存内容。\")\n return content_data\n else:\n logger.error(f\"解析内容JSON失败 for {topic_index}\")\n return {\"error\": \"JSONDecodeError\", \"raw_content\": raw_result} ",
|
|
"code_hash": "9205f62520279562109ec37a309391ac"
|
|
}
|
|
],
|
|
"imports": [
|
|
{
|
|
"type": "import",
|
|
"modules": [
|
|
"logging"
|
|
],
|
|
"aliases": []
|
|
},
|
|
{
|
|
"type": "import",
|
|
"modules": [
|
|
"json"
|
|
],
|
|
"aliases": []
|
|
},
|
|
{
|
|
"type": "from_import",
|
|
"module": "typing",
|
|
"names": [
|
|
"Dict",
|
|
"Any",
|
|
"Tuple",
|
|
"Optional"
|
|
],
|
|
"aliases": [],
|
|
"level": 0
|
|
},
|
|
{
|
|
"type": "from_import",
|
|
"module": "core.ai",
|
|
"names": [
|
|
"AIAgent"
|
|
],
|
|
"aliases": [],
|
|
"level": 0
|
|
},
|
|
{
|
|
"type": "from_import",
|
|
"module": "core.config",
|
|
"names": [
|
|
"ConfigManager",
|
|
"GenerateTopicConfig",
|
|
"GenerateContentConfig"
|
|
],
|
|
"aliases": [],
|
|
"level": 0
|
|
},
|
|
{
|
|
"type": "from_import",
|
|
"module": "utils.prompts",
|
|
"names": [
|
|
"ContentPromptBuilder"
|
|
],
|
|
"aliases": [],
|
|
"level": 0
|
|
},
|
|
{
|
|
"type": "from_import",
|
|
"module": "utils.file_io",
|
|
"names": [
|
|
"OutputManager",
|
|
"process_llm_json_text"
|
|
],
|
|
"aliases": [],
|
|
"level": 0
|
|
}
|
|
],
|
|
"constants": [],
|
|
"docstring": "内容生成模块",
|
|
"content_hash": "1f92561652f12d1021bee41f76512a83"
|
|
} |