增加了ai——agent流式输出的测试脚本

This commit is contained in:
jinye_huang 2025-04-22 16:30:33 +08:00
parent 59312ed23a
commit e3d466353d

102
examples/test_stream.py Normal file
View File

@ -0,0 +1,102 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import json
import time
# Add project root to the Python path to allow importing modules from core and utils
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
from core.ai_agent import AI_Agent
def load_config(config_path="../poster_gen_config.json"):
"""Loads configuration from a JSON file relative to this script."""
if not os.path.exists(config_path):
print(f"Error: Configuration file '{config_path}' not found.")
print("Make sure you have copied 'example_config.json' to 'poster_gen_config.json' in the project root.")
sys.exit(1)
try:
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
# Basic validation can be added here if needed
print(f"Configuration loaded successfully from {config_path}")
return config
except Exception as e:
print(f"Error loading configuration from '{config_path}': {e}")
sys.exit(1)
def main():
print("--- Testing AI Agent Streaming ---")
# 1. Load configuration
config = load_config()
# 2. Define example prompts (replace with your desired test prompts)
test_system_prompt = "You are a helpful assistant. Respond concisely."
test_user_prompt = "Tell me a short story about a traveling robot."
# You can optionally specify a folder with reference files
test_file_folder = None # Or e.g., "../resource/Object"
# Get generation parameters from config or use defaults
temperature = config.get("content_temperature", 0.7) # Using content params as example
top_p = config.get("content_top_p", 0.9)
presence_penalty = config.get("content_presence_penalty", 1.0)
# 3. Initialize AI Agent
ai_agent = None
try:
request_timeout = config.get("request_timeout", 30)
max_retries = config.get("max_retries", 3)
ai_agent = AI_Agent(
config["api_url"],
config["model"],
config["api_key"],
timeout=request_timeout,
max_retries=max_retries
)
print("AI Agent initialized.")
# 4. Call work_stream and process the generator
print("\n--- Starting stream generation ---")
start_time = time.time()
stream_generator = ai_agent.work_stream(
test_system_prompt,
test_user_prompt,
test_file_folder,
temperature,
top_p,
presence_penalty
)
full_response_streamed = ""
try:
for chunk in stream_generator:
print(chunk, end="", flush=True) # Print each chunk as it arrives
full_response_streamed += chunk
except Exception as e:
print(f"\nError while iterating through stream generator: {e}")
end_time = time.time()
print(f"\n--- Stream finished in {end_time - start_time:.2f} seconds ---")
# print(f"Full response received via stream:\n{full_response_streamed}") # Optionally print the assembled response
except Exception as e:
print(f"\nAn error occurred: {e}")
import traceback
traceback.print_exc()
finally:
# 5. Close the agent
if ai_agent:
print("\nClosing AI Agent...")
ai_agent.close()
print("AI Agent closed.")
if __name__ == "__main__":
main()