1
+ #!/usr/bin/env python3
2
+
3
+ import sys
4
+ import traceback
5
+ from praisonaiagents import Agent , MCP
6
+
7
+ def test_agent_direct ():
8
+ """Test gpt-4o-mini (agent.py path)"""
9
+ print ("=" * 50 )
10
+ print ("TESTING: gpt-4o-mini (agent.py direct calls)" )
11
+ print ("=" * 50 )
12
+
13
+ try :
14
+ agent = Agent (
15
+ instructions = """You are a helpful assistant that can break down complex problems.
16
+ Use the available tools when relevant to perform step-by-step analysis.""" ,
17
+ llm = "gpt-4o-mini" ,
18
+ tools = MCP ("npx -y @modelcontextprotocol/server-sequential-thinking" )
19
+ )
20
+
21
+ print ("✅ Agent created successfully" )
22
+ print (f"✅ Agent LLM: { getattr (agent , 'llm' , 'Not set' )} " )
23
+ print (f"✅ Agent using custom LLM: { getattr (agent , '_using_custom_llm' , False )} " )
24
+
25
+ result = agent .start ("What are 3 steps to make coffee?" )
26
+ print ("✅ Agent execution completed successfully" )
27
+ return True , result
28
+
29
+ except Exception as e :
30
+ print (f"❌ Error in agent direct: { e } " )
31
+ traceback .print_exc ()
32
+ return False , str (e )
33
+
34
+ def test_llm_class ():
35
+ """Test openai/gpt-4o-mini (llm.py path)"""
36
+ print ("\n " + "=" * 50 )
37
+ print ("TESTING: openai/gpt-4o-mini (llm.py LiteLLM)" )
38
+ print ("=" * 50 )
39
+
40
+ try :
41
+ agent = Agent (
42
+ instructions = """You are a helpful assistant that can break down complex problems.
43
+ Use the available tools when relevant to perform step-by-step analysis.""" ,
44
+ llm = "openai/gpt-4o-mini" ,
45
+ tools = MCP ("npx -y @modelcontextprotocol/server-sequential-thinking" )
46
+ )
47
+
48
+ print ("✅ Agent created successfully" )
49
+ print (f"✅ Agent LLM instance: { getattr (agent , 'llm_instance' , 'Not set' )} " )
50
+ print (f"✅ Agent using custom LLM: { getattr (agent , '_using_custom_llm' , False )} " )
51
+
52
+ result = agent .start ("What are 3 steps to make coffee?" )
53
+ print ("✅ Agent execution completed successfully" )
54
+ return True , result
55
+
56
+ except Exception as e :
57
+ print (f"❌ Error in llm class: { e } " )
58
+ traceback .print_exc ()
59
+ return False , str (e )
60
+
61
+ if __name__ == "__main__" :
62
+ print ("🔍 DEBUGGING: Comparing both LLM approaches\n " )
63
+
64
+ # Test agent direct
65
+ success1 , result1 = test_agent_direct ()
66
+
67
+ # Test LLM class
68
+ success2 , result2 = test_llm_class ()
69
+
70
+ print ("\n " + "=" * 50 )
71
+ print ("FINAL RESULTS" )
72
+ print ("=" * 50 )
73
+
74
+ if success1 :
75
+ print ("✅ gpt-4o-mini (agent.py) - SUCCESS" )
76
+ else :
77
+ print ("❌ gpt-4o-mini (agent.py) - FAILED" )
78
+ print (f" Error: { result1 } " )
79
+
80
+ if success2 :
81
+ print ("✅ openai/gpt-4o-mini (llm.py) - SUCCESS" )
82
+ else :
83
+ print ("❌ openai/gpt-4o-mini (llm.py) - FAILED" )
84
+ print (f" Error: { result2 } " )
85
+
86
+ if success1 and success2 :
87
+ print ("\n 🎉 BOTH FORMATS WORK CORRECTLY!" )
88
+ print ("📝 The issue mentioned might be resolved or was a different problem." )
89
+ elif success1 and not success2 :
90
+ print ("\n ⚠️ CONFIRMED: LLM class path has issues" )
91
+ print ("📝 Need to debug the LLM class implementation" )
92
+ elif success2 and not success1 :
93
+ print ("\n ⚠️ CONFIRMED: Agent direct path has issues" )
94
+ print ("📝 Need to debug the agent direct implementation" )
95
+ else :
96
+ print ("\n 💥 BOTH PATHS FAILED - Something is fundamentally wrong" )
0 commit comments