#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import json import time from datetime import datetime import logging # Add project root to the Python path project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, project_root) from core.ai_agent import AI_Agent from utils.prompt_manager import PromptManager from utils.tweet_generator import ( run_topic_generation_pipeline, generate_content_for_topic, generate_posters_for_topic ) def load_config(config_path="/root/autodl-tmp/TravelContentCreator/poster_gen_config.json"): """Loads configuration relative to the script.""" if not os.path.exists(config_path): logging.error(f"Error: Config file '{config_path}' not found.") sys.exit(1) try: with open(config_path, 'r', encoding='utf-8') as f: config = json.load(f) logging.info("Configuration loaded successfully.") return config except Exception as e: logging.error(f"Error loading configuration: {e}") sys.exit(1) def main_test(): logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) logging.info("--- Starting Pipeline Step Test ---") config = load_config() # --- Override config for faster testing --- config['num'] = 1 # Generate only 1 topic config['variants'] = 1 # Generate only 1 content/poster variant logging.info(f"Config overridden for testing: num={config['num']}, variants={config['variants']}") run_id = None tweet_topic_record = None ai_agent_content = None # Separate agent instance for content/poster try: # --- Step 1: Test Topic Generation --- logging.info("\n--- Testing Topic Generation ---") run_id, tweet_topic_record = run_topic_generation_pipeline(config) # run_id generated inside if not passed if not run_id or not tweet_topic_record or not tweet_topic_record.topics_list: logging.info("Topic generation failed or produced no topics. Exiting test.") return logging.info(f"Topic generation successful. Run ID: {run_id}") logging.info(f"Generated {len(tweet_topic_record.topics_list)} topic(s).") test_topic = tweet_topic_record.topics_list[0] # Get the first topic for testing logging.info("Test Topic Data:", json.dumps(test_topic, ensure_ascii=False, indent=2)) # --- Step 2: Test Content Generation (for the first topic) --- logging.info("\n--- Testing Content Generation ---") # Initialize resources needed for content generation prompt_manager = PromptManager(config) logging.info("Initializing AI Agent for content...") request_timeout = config.get("request_timeout", 30) max_retries = config.get("max_retries", 3) ai_agent_content = AI_Agent( config["api_url"], config["model"], config["api_key"], timeout=request_timeout, max_retries=max_retries ) base_output_dir = config["output_dir"] topic_index = 1 # Testing the first topic (1-based index) tweet_content_list = generate_content_for_topic( ai_agent_content, prompt_manager, config, test_topic, base_output_dir, run_id, topic_index ) if not tweet_content_list: logging.info("Content generation failed or produced no content. Exiting test.") return logging.info(f"Content generation successful. Generated {len(tweet_content_list)} variant(s).") logging.info("Generated Content Data (first variant):", json.dumps(tweet_content_list[0], ensure_ascii=False, indent=2)) # --- Step 3: Test Poster Generation (for the first topic/content) --- logging.info("\n--- Testing Poster Generation ---") # Poster generation uses its own internal ContentGenerator and PosterGenerator instances # We just need to call the function success = generate_posters_for_topic( config, test_topic, tweet_content_list, # Pass the list generated above base_output_dir, run_id, topic_index ) if success: logging.info("Poster generation function executed (check output directory for results).") else: logging.info("Poster generation function reported failure or skipped execution.") except Exception as e: logging.info(f"\n--- An error occurred during testing ---") logging.error(f"Error: {e}") finally: # Clean up the content generation AI agent if it was created if ai_agent_content: logging.info("\nClosing content generation AI Agent...") ai_agent_content.close() logging.info("\n--- Test Finished ---") if __name__ == "__main__": main_test()