#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import json import time import logging # Add project root to the Python path to allow importing modules from core and utils project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, project_root) from core.ai_agent import AI_Agent def load_config(config_path="../poster_gen_config.json"): """Loads configuration from a JSON file relative to this script.""" if not os.path.exists(config_path): logging.error(f"Error: Configuration file '{config_path}' not found.") logging.error("Make sure you have copied 'example_config.json' to 'poster_gen_config.json' in the project root.") sys.exit(1) try: with open(config_path, 'r', encoding='utf-8') as f: config = json.load(f) # Basic validation can be added here if needed logging.info(f"Configuration loaded successfully from {config_path}") return config except Exception as e: logging.error(f"Error loading configuration from '{config_path}': {e}") sys.exit(1) def main(): logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) logging.info("--- Testing AI Agent Streaming ---") # 1. Load configuration config = load_config() # 2. Define example prompts (replace with your desired test prompts) test_system_prompt = "You are a helpful assistant. Respond concisely." test_user_prompt = "Tell me a short story about a traveling robot." # You can optionally specify a folder with reference files test_file_folder = None # Or e.g., "../resource/Object" # Get generation parameters from config or use defaults temperature = config.get("content_temperature", 0.7) # Using content params as example top_p = config.get("content_top_p", 0.9) presence_penalty = config.get("content_presence_penalty", 1.0) # 3. Initialize AI Agent ai_agent = None try: request_timeout = config.get("request_timeout", 30) max_retries = config.get("max_retries", 3) ai_agent = AI_Agent( config["api_url"], config["model"], config["api_key"], timeout=request_timeout, max_retries=max_retries ) logging.info("AI Agent initialized.") # 4. Call work_stream and process the generator logging.info("\n--- Starting stream generation ---") start_time = time.time() stream_generator = ai_agent.work_stream( test_system_prompt, test_user_prompt, test_file_folder, temperature, top_p, presence_penalty ) full_response_streamed = "" try: for chunk in stream_generator: print(chunk, end="", flush=True) # Print each chunk as it arrives full_response_streamed += chunk except Exception as e: logging.error(f"\nError while iterating through stream generator: {e}") end_time = time.time() logging.info(f"\n--- Stream finished in {end_time - start_time:.2f} seconds ---") # print(f"Full response received via stream:\n{full_response_streamed}") # Optionally print the assembled response except Exception as e: logging.error(f"\nAn error occurred: {e}") import traceback traceback.print_exc() finally: # 5. Close the agent if ai_agent: logging.info("\nClosing AI Agent...") ai_agent.close() logging.info("AI Agent closed.") if __name__ == "__main__": main()