Skip to content

Commit ec18d4a

Browse files
Merge pull request #604 from MervinPraison/develop
Update PraisonAI version to 2.2.28 across Dockerfiles and related files
2 parents 3588f1c + 7b2e2b6 commit ec18d4a

18 files changed

+5251
-4876
lines changed

docker/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
flask \
19-
"praisonai>=2.2.27" \
19+
"praisonai>=2.2.28" \
2020
"praisonai[api]" \
2121
gunicorn \
2222
markdown

docker/Dockerfile.chat

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=2.2.27" \
19+
"praisonai>=2.2.28" \
2020
"praisonai[chat]" \
2121
"embedchain[github,youtube]"
2222

docker/Dockerfile.dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
2020
# Install Python packages (using latest versions)
2121
RUN pip install --no-cache-dir \
2222
praisonai_tools \
23-
"praisonai>=2.2.27" \
23+
"praisonai>=2.2.28" \
2424
"praisonai[ui]" \
2525
"praisonai[chat]" \
2626
"praisonai[realtime]" \

docker/Dockerfile.ui

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=2.2.27" \
19+
"praisonai>=2.2.28" \
2020
"praisonai[ui]" \
2121
"praisonai[crewai]"
2222

docker/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ healthcheck:
121121
## 📦 Package Versions
122122
123123
All Docker images use consistent, up-to-date versions:
124-
- PraisonAI: `>=2.2.27`
124+
- PraisonAI: `>=2.2.28`
125125
- PraisonAI Agents: `>=0.0.92`
126126
- Python: `3.11-slim`
127127

@@ -218,7 +218,7 @@ docker-compose up -d
218218
### Version Pinning
219219
To use specific versions, update the Dockerfile:
220220
```dockerfile
221-
RUN pip install "praisonai==2.2.27" "praisonaiagents==0.0.92"
221+
RUN pip install "praisonai==2.2.28" "praisonaiagents==0.0.92"
222222
```
223223

224224
## 🌐 Production Deployment
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
#!/usr/bin/env python3
2+
3+
import sys
4+
import traceback
5+
from praisonaiagents import Agent, MCP
6+
7+
def test_agent_direct():
8+
"""Test gpt-4o-mini (agent.py path)"""
9+
print("=" * 50)
10+
print("TESTING: gpt-4o-mini (agent.py direct calls)")
11+
print("=" * 50)
12+
13+
try:
14+
agent = Agent(
15+
instructions="""You are a helpful assistant that can break down complex problems.
16+
Use the available tools when relevant to perform step-by-step analysis.""",
17+
llm="gpt-4o-mini",
18+
tools=MCP("npx -y @modelcontextprotocol/server-sequential-thinking")
19+
)
20+
21+
print("✅ Agent created successfully")
22+
print(f"✅ Agent LLM: {getattr(agent, 'llm', 'Not set')}")
23+
print(f"✅ Agent using custom LLM: {getattr(agent, '_using_custom_llm', False)}")
24+
25+
result = agent.start("What are 3 steps to make coffee?")
26+
print("✅ Agent execution completed successfully")
27+
return True, result
28+
29+
except Exception as e:
30+
print(f"❌ Error in agent direct: {e}")
31+
traceback.print_exc()
32+
return False, str(e)
33+
34+
def test_llm_class():
35+
"""Test openai/gpt-4o-mini (llm.py path)"""
36+
print("\n" + "=" * 50)
37+
print("TESTING: openai/gpt-4o-mini (llm.py LiteLLM)")
38+
print("=" * 50)
39+
40+
try:
41+
agent = Agent(
42+
instructions="""You are a helpful assistant that can break down complex problems.
43+
Use the available tools when relevant to perform step-by-step analysis.""",
44+
llm="openai/gpt-4o-mini",
45+
tools=MCP("npx -y @modelcontextprotocol/server-sequential-thinking")
46+
)
47+
48+
print("✅ Agent created successfully")
49+
print(f"✅ Agent LLM instance: {getattr(agent, 'llm_instance', 'Not set')}")
50+
print(f"✅ Agent using custom LLM: {getattr(agent, '_using_custom_llm', False)}")
51+
52+
result = agent.start("What are 3 steps to make coffee?")
53+
print("✅ Agent execution completed successfully")
54+
return True, result
55+
56+
except Exception as e:
57+
print(f"❌ Error in llm class: {e}")
58+
traceback.print_exc()
59+
return False, str(e)
60+
61+
if __name__ == "__main__":
62+
print("🔍 DEBUGGING: Comparing both LLM approaches\n")
63+
64+
# Test agent direct
65+
success1, result1 = test_agent_direct()
66+
67+
# Test LLM class
68+
success2, result2 = test_llm_class()
69+
70+
print("\n" + "=" * 50)
71+
print("FINAL RESULTS")
72+
print("=" * 50)
73+
74+
if success1:
75+
print("✅ gpt-4o-mini (agent.py) - SUCCESS")
76+
else:
77+
print("❌ gpt-4o-mini (agent.py) - FAILED")
78+
print(f" Error: {result1}")
79+
80+
if success2:
81+
print("✅ openai/gpt-4o-mini (llm.py) - SUCCESS")
82+
else:
83+
print("❌ openai/gpt-4o-mini (llm.py) - FAILED")
84+
print(f" Error: {result2}")
85+
86+
if success1 and success2:
87+
print("\n🎉 BOTH FORMATS WORK CORRECTLY!")
88+
print("📝 The issue mentioned might be resolved or was a different problem.")
89+
elif success1 and not success2:
90+
print("\n⚠️ CONFIRMED: LLM class path has issues")
91+
print("📝 Need to debug the LLM class implementation")
92+
elif success2 and not success1:
93+
print("\n⚠️ CONFIRMED: Agent direct path has issues")
94+
print("📝 Need to debug the agent direct implementation")
95+
else:
96+
print("\n💥 BOTH PATHS FAILED - Something is fundamentally wrong")

0 commit comments

Comments
 (0)