189 lines
15 KiB
JSON
Raw Normal View History

2025-07-31 15:35:23 +08:00
{
"file_path": "tweet/topic_generator.py",
"file_size": 4849,
"line_count": 148,
"functions": [
{
"name": "__init__",
"line_start": 26,
"line_end": 40,
"args": [
{
"name": "self"
},
{
"name": "ai_agent",
"type_hint": "AIAgent"
},
{
"name": "config_manager",
"type_hint": "ConfigManager"
},
{
"name": "output_manager",
"type_hint": "OutputManager"
}
],
"return_type": null,
"docstring": "初始化选题生成器\n\nArgs:\n ai_agent: AI代理\n config_manager: 配置管理器\n output_manager: 输出管理器",
"is_async": false,
"decorators": [],
"code": " def __init__(self, ai_agent: AIAgent, config_manager: ConfigManager, output_manager: OutputManager):\n \"\"\"\n 初始化选题生成器\n\n Args:\n ai_agent: AI代理\n config_manager: 配置管理器\n output_manager: 输出管理器\n \"\"\"\n self.ai_agent = ai_agent\n self.config_manager = config_manager\n self.config = config_manager.get_config('topic_gen', GenerateTopicConfig)\n self.output_manager = output_manager\n self.prompt_builder = TopicPromptBuilder(config_manager)\n self.parser = TopicParser()",
"code_hash": "8de8e887e9258010481e9e668326b0d6"
}
],
"classes": [
{
"name": "TopicGenerator",
"line_start": 20,
"line_end": 146,
"bases": [],
"methods": [
{
"name": "__init__",
"line_start": 26,
"line_end": 40,
"args": [
{
"name": "self"
},
{
"name": "ai_agent",
"type_hint": "AIAgent"
},
{
"name": "config_manager",
"type_hint": "ConfigManager"
},
{
"name": "output_manager",
"type_hint": "OutputManager"
}
],
"return_type": null,
"docstring": "初始化选题生成器\n\nArgs:\n ai_agent: AI代理\n config_manager: 配置管理器\n output_manager: 输出管理器",
"is_async": false,
"decorators": [],
"code": " def __init__(self, ai_agent: AIAgent, config_manager: ConfigManager, output_manager: OutputManager):\n \"\"\"\n 初始化选题生成器\n\n Args:\n ai_agent: AI代理\n config_manager: 配置管理器\n output_manager: 输出管理器\n \"\"\"\n self.ai_agent = ai_agent\n self.config_manager = config_manager\n self.config = config_manager.get_config('topic_gen', GenerateTopicConfig)\n self.output_manager = output_manager\n self.prompt_builder = TopicPromptBuilder(config_manager)\n self.parser = TopicParser()",
"code_hash": "8de8e887e9258010481e9e668326b0d6"
},
{
"name": "generate_topics",
"line_start": 42,
"line_end": 92,
"args": [
{
"name": "self"
}
],
"return_type": "Optional[List[Dict[str, Any]]]",
"docstring": "执行完整的选题生成流程:构建提示 -> 调用AI -> 解析结果 -> 保存产物",
"is_async": true,
"decorators": [],
"code": " async def generate_topics(self) -> Optional[List[Dict[str, Any]]]:\n \"\"\"\n 执行完整的选题生成流程:构建提示 -> 调用AI -> 解析结果 -> 保存产物\n \"\"\"\n logger.info(\"开始执行选题生成流程...\")\n\n # 1. 构建提示\n system_prompt = self.prompt_builder.get_system_prompt()\n user_prompt = self.prompt_builder.build_user_prompt(\n numTopics=self.config.topic.num,\n month=self.config.topic.date\n )\n self.output_manager.save_text(system_prompt, \"topic_system_prompt.txt\")\n self.output_manager.save_text(user_prompt, \"topic_user_prompt.txt\")\n \n # 获取模型参数\n model_params = {}\n if hasattr(self.config, 'model') and isinstance(self.config.model, dict):\n model_params = {\n 'temperature': self.config.model.get('temperature'),\n 'top_p': self.config.model.get('top_p'),\n 'presence_penalty': self.config.model.get('presence_penalty')\n }\n # 移除None值\n model_params = {k: v for k, v in model_params.items() if v is not None}\n \n # 2. 调用AI生成\n try:\n raw_result, _, _, _ = await self.ai_agent.generate_text(\n system_prompt=system_prompt,\n user_prompt=user_prompt,\n use_stream=True, # 选题生成通常不需要流式输出\n stage=\"选题生成\",\n **model_params\n )\n self.output_manager.save_text(raw_result, \"topics_raw_response.txt\")\n except Exception as e:\n logger.critical(f\"AI调用失败无法生成选题: {e}\", exc_info=True)\n return None\n\n # 3. 解析结果\n topics = self.parser.parse(raw_result)\n if not topics:\n logger.error(\"未能从AI响应中解析出任何有效选题\")\n return None\n \n # 4. 保存结果\n self.output_manager.save_json(topics, \"topics.json\")\n logger.info(f\"成功生成并保存 {len(topics)} 个选题\")\n \n return topics",
"code_hash": "8bfc34aad972e35422d8ee4b9ab9bd9b"
},
{
"name": "generate_topics_with_prompt",
"line_start": 94,
"line_end": 146,
"args": [
{
"name": "self"
},
{
"name": "system_prompt",
"type_hint": "str"
},
{
"name": "user_prompt",
"type_hint": "str"
}
],
"return_type": "Optional[List[Dict[str, Any]]]",
"docstring": "使用预构建的提示词生成选题\n\nArgs:\n system_prompt: 已构建好的系统提示词\n user_prompt: 已构建好的用户提示词\n \nReturns:\n 生成的选题列表如果失败则返回None",
"is_async": true,
"decorators": [],
"code": " async def generate_topics_with_prompt(self, system_prompt: str, user_prompt: str) -> Optional[List[Dict[str, Any]]]:\n \"\"\"\n 使用预构建的提示词生成选题\n \n Args:\n system_prompt: 已构建好的系统提示词\n user_prompt: 已构建好的用户提示词\n \n Returns:\n 生成的选题列表如果失败则返回None\n \"\"\"\n logger.info(\"使用预构建提示词开始执行选题生成流程...\")\n \n # 保存提示以供调试\n self.output_manager.save_text(system_prompt, \"topic_system_prompt.txt\")\n self.output_manager.save_text(user_prompt, \"topic_user_prompt.txt\")\n \n # 获取模型参数\n model_params = {}\n if hasattr(self.config, 'model') and isinstance(self.config.model, dict):\n model_params = {\n 'temperature': self.config.model.get('temperature'),\n 'top_p': self.config.model.get('top_p'),\n 'presence_penalty': self.config.model.get('presence_penalty')\n }\n # 移除None值\n model_params = {k: v for k, v in model_params.items() if v is not None}\n \n # 调用AI生成\n try:\n raw_result, _, _, _ = await self.ai_agent.generate_text(\n system_prompt=system_prompt,\n user_prompt=user_prompt,\n use_stream=True,\n stage=\"选题生成\",\n **model_params\n )\n self.output_manager.save_text(raw_result, \"topics_raw_response.txt\")\n except Exception as e:\n logger.critical(f\"AI调用失败无法生成选题: {e}\", exc_info=True)\n return None\n \n # 解析结果\n topics = self.parser.parse(raw_result)\n if not topics:\n logger.error(\"未能从AI响应中解析出任何有效选题\")\n return None\n \n # 保存结果\n self.output_manager.save_json(topics, \"topics.json\")\n logger.info(f\"成功生成并保存 {len(topics)} 个选题\")\n \n return topics",
"code_hash": "6768f97a84285f1a10cfd586d1a925af"
}
],
"docstring": "选题生成器\n负责生成旅游相关的选题",
"decorators": [],
"code": "class TopicGenerator:\n \"\"\"\n 选题生成器\n 负责生成旅游相关的选题\n \"\"\"\n\n def __init__(self, ai_agent: AIAgent, config_manager: ConfigManager, output_manager: OutputManager):\n \"\"\"\n 初始化选题生成器\n\n Args:\n ai_agent: AI代理\n config_manager: 配置管理器\n output_manager: 输出管理器\n \"\"\"\n self.ai_agent = ai_agent\n self.config_manager = config_manager\n self.config = config_manager.get_config('topic_gen', GenerateTopicConfig)\n self.output_manager = output_manager\n self.prompt_builder = TopicPromptBuilder(config_manager)\n self.parser = TopicParser()\n\n async def generate_topics(self) -> Optional[List[Dict[str, Any]]]:\n \"\"\"\n 执行完整的选题生成流程:构建提示 -> 调用AI -> 解析结果 -> 保存产物\n \"\"\"\n logger.info(\"开始执行选题生成流程...\")\n\n # 1. 构建提示\n system_prompt = self.prompt_builder.get_system_prompt()\n user_prompt = self.prompt_builder.build_user_prompt(\n numTopics=self.config.topic.num,\n month=self.config.topic.date\n )\n self.output_manager.save_text(system_prompt, \"topic_system_prompt.txt\")\n self.output_manager.save_text(user_prompt, \"topic_user_prompt.txt\")\n \n # 获取模型参数\n model_params = {}\n if hasattr(self.config, 'model') and isinstance(self.config.model, dict):\n model_params = {\n 'temperature': self.config.model.get('temperature'),\n 'top_p': self.config.model.get('top_p'),\n 'presence_penalty': self.config.model.get('presence_penalty')\n }\n # 移除None值\n model_params = {k: v for k, v in model_params.items() if v is not None}\n \n # 2. 调用AI生成\n try:\n raw_result, _, _, _ = await self.ai_agent.generate_text(\n system_prompt=system_prompt,\n user_prompt=user_prompt,\n use_stream=True, # 选题生成通常不需要流式输出\n stage=\"选题生成\",\n **model_params\n )\n self.output_manager.save_text(raw_result, \"topics_raw_response.txt\")\n except Exception as e:\n logger.critical(f\"AI调用失败无法生成选题: {e}\", exc_info=True)\n return None\n\n # 3. 解析结果\n topics = self.parser.parse(raw_result)\n if not topics:\n logger.error(\"未能从AI响应中解析出任何有效选题\")\n return None\n \n # 4. 保存结果\n self.output_manager.save_json(topics, \"topics.json\")\n logger.info(f\"成功生成并保存 {len(topics)} 个选题\")\n \n return topics\n \n async def generate_topics_with_prompt(self, system_prompt: str, user_prompt: str) -> Optional[List[Dict[str, Any]]]:\n \"\"\"\n 使用预构建的提示词生成选题\n \n Args:\n system_prompt: 已构建好的系统提示词\n user_prompt: 已构建好的用户提示词\n \n Returns:\n 生成的选题列表如果失败则返回None\n \"\"\"\n logger.info(\"使用预构建提示词开始执行选题生成流程...\")\n \n # 保存提示以供调试\n self.output_manager.save_text(system_prompt, \"topic_system_prompt.txt\")\n self.output_manager.save_text(user_prompt, \"topic_user_prompt.txt\")\n \n # \n model_params = {}\n if hasattr(self.config, 'model') and isinstance(self.config.model, dict):\n model_params = {\n 'temperature': self.config.model.get('temperature'),\n 'top_p': self.config.model.get('top_p'),\n 'presence_penalty': self.config.model.get('presence_penalty
"code_hash": "d9772082007b323b17a00ad5360872d6"
}
],
"imports": [
{
"type": "import",
"modules": [
"logging"
],
"aliases": []
},
{
"type": "from_import",
"module": "typing",
"names": [
"Dict",
"Any",
"List",
"Optional",
"Tuple"
],
"aliases": [],
"level": 0
},
{
"type": "from_import",
"module": "core.ai",
"names": [
"AIAgent"
],
"aliases": [],
"level": 0
},
{
"type": "from_import",
"module": "core.config",
"names": [
"ConfigManager",
"GenerateTopicConfig"
],
"aliases": [],
"level": 0
},
{
"type": "from_import",
"module": "utils.prompts",
"names": [
"TopicPromptBuilder"
],
"aliases": [],
"level": 0
},
{
"type": "from_import",
"module": "utils.file_io",
"names": [
"OutputManager",
"process_llm_json_text"
],
"aliases": [],
"level": 0
},
{
"type": "from_import",
"module": "topic_parser",
"names": [
"TopicParser"
],
"aliases": [],
"level": 1
}
],
"constants": [],
"docstring": "选题生成模块",
"content_hash": "a72f82d4171671649a5ed42732953412"
}