Skip to content

Commit 73982c0

Browse files
committed
adding tests
1 parent f3039c5 commit 73982c0

12 files changed

+266
-12
lines changed

.gitignore

+1
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ wheels/
2424
venv/
2525
env/
2626
ENV/
27+
.venv/
2728

2829
# IDE
2930
.idea/

README.md

+23-9
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,27 @@
44
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
55
[![Python Versions](https://img.shields.io/pypi/pyversions/asktheapi-team-builder.svg)](https://pypi.org/project/asktheapi-team-builder/)
66

7-
A high-level Python library for easily building and managing autonomous agents networks that solve complex tasks using APIs defined with OpenAPI standard format. This package provides a clean, type-safe interface for creating, configuring, and running teams of agents that can work together to solve complex tasks.
7+
A high-level Python library for building and managing networks of autonomous agents that collaborate to solve complex tasks. It’s designed to work seamlessly with APIs defined using the OpenAPI standard. The library provides a clean, type-safe interface for creating, configuring, and running teams of agents, making it easy to orchestrate multi-agent workflows with minimal boilerplate.
88

99
## Features
1010

11-
- 🚀 Easy creation of agent networks with custom tools and capabilities based on openAPI specification
12-
- 🤝 Team building with automatic coordination through a planning agent
13-
- 📡 Support for streaming agent interactions
14-
- 🔧 Built-in HTTP client for tool implementation
15-
- ✨ Pydantic models for type safety and validation
16-
- 🎯 Clean, intuitive API design
11+
- 🚀 Effortless Agent Network Creation
12+
Quickly build agent networks with custom tools and capabilities based on OpenAPI specifications.
13+
14+
- 🤝 Team-Based Collaboration
15+
Easily define agent teams with automatic coordination handled by a built-in planning agent.
16+
17+
- 📡 Streaming Interactions
18+
Stream agent communication in real-time for more dynamic and responsive workflows.
19+
20+
- 🔧 Built-in HTTP Client
21+
Simplify tool implementation with an integrated HTTP client ready to call external APIs.
22+
23+
- ✨ Type Safety with Pydantic
24+
Leverage Pydantic models for robust data validation and clear type definitions.
25+
26+
- 🎯 Clean and Intuitive API
27+
Designed for developers—minimal boilerplate, maximum clarity.
1728

1829
## Installation
1930

@@ -53,8 +64,10 @@ async def create_agents_from_spec():
5364
agents.append(agent_result)
5465

5566
return agents
67+
```
5668

57-
# 3. Build and run a team
69+
# 2. Build and run a team
70+
```python
5871
async def run_agent_team(agents: List[Agent], query: str):
5972
# Initialize team builder
6073
team_builder = TeamBuilder(
@@ -77,8 +90,9 @@ async def run_agent_team(agents: List[Agent], query: str):
7790
async for event in team_builder.run_team(team, messages, stream=True):
7891
if isinstance(event, ChatMessage):
7992
print(f"{event.source}: {event.content}")
80-
93+
```
8194
# Example usage
95+
```python
8296
async def main():
8397
# Create agents from spec
8498
api_agents = await create_agents_from_spec()

pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ package-dir = {"" = "src"}
88

99
[project]
1010
name = "asktheapi-team-builder"
11-
version = "0.2.1"
11+
version = "0.2.2"
1212
authors = [
1313
{ name = "Alex Albala" },
1414
]

pytest.ini

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
[pytest]
2+
asyncio_mode = auto
3+
testpaths = tests
4+
python_files = test_*.py
5+
python_classes = Test*
6+
python_functions = test_*
7+
addopts = -v --tb=short

setup.cfg

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
[metadata]
2+
name = asktheapi-team-builder
3+
version = 0.2.2
4+
5+
[options]
6+
packages = find:
7+

src/asktheapi_team_builder/prompts/__init__.py

Whitespace-only changes.

src/asktheapi_team_builder/services/__init__.py

Whitespace-only changes.
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from services.open_ai_service import OpenAIService
1+
from asktheapi_team_builder.services.open_ai_service import OpenAIService
22

33

44
class LLMService():
@@ -7,4 +7,5 @@ def __init__(self, openai_service: OpenAIService, llm_headers: dict = {}):
77
self.llm_headers = llm_headers
88

99
async def chat_completion(self, model, messages, stream):
10-
return await self.openai_service.completion_with_headers(model, messages, stream, self.llm_headers)
10+
return await self.openai_service.completion_with_headers(
11+
model=model, messages=messages, stream=stream, headers=self.llm_headers)

tests/conftest.py

+76
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import os
2+
from autogen_core import FunctionCall
3+
from autogen_core.models import FunctionExecutionResult
4+
import pytest
5+
from unittest.mock import AsyncMock, MagicMock, patch
6+
from asktheapi_team_builder.services.open_ai_service import OpenAIService
7+
from asktheapi_team_builder.services.llm_service import LLMService
8+
from asktheapi_team_builder.services.agent_evaluator_service import AgentEvaluatorService, AgentDTO, AgentToolDTO, TaskResult, TextMessage, ToolCallExecutionEvent, ToolCallRequestEvent
9+
10+
@pytest.fixture(autouse=True)
11+
def setenvvar(monkeypatch):
12+
with patch.dict(os.environ, clear=True):
13+
envvars = {
14+
"OPENAI_API_KEY": "mock-api-key",
15+
"OPENAI_BASE_URL": "mock-base-url"
16+
}
17+
for k, v in envvars.items():
18+
monkeypatch.setenv(k, v)
19+
yield
20+
21+
@pytest.fixture
22+
def mock_openai_service():
23+
service = AsyncMock(spec=OpenAIService)
24+
service.completion_with_headers.return_value = MagicMock(
25+
choices=[MagicMock(message=MagicMock(content=['{"evaluation": []}']))]
26+
)
27+
return service
28+
29+
@pytest.fixture
30+
def mock_llm_service():
31+
return AsyncMock(spec=LLMService)
32+
33+
@pytest.fixture
34+
def agent_evaluator_service(mock_llm_service):
35+
service = AgentEvaluatorService()
36+
service.llm_service = mock_llm_service
37+
return service
38+
39+
@pytest.fixture
40+
def sample_agent_tool():
41+
return AgentToolDTO(
42+
id="tool1",
43+
name="test_tool",
44+
description="A test tool",
45+
method="GET",
46+
path="/test",
47+
jsonschema={},
48+
auto_updated=False
49+
)
50+
51+
@pytest.fixture
52+
def sample_agent(sample_agent_tool):
53+
return AgentDTO(
54+
id="agent1",
55+
name="test_agent",
56+
system_prompt="You are a test agent",
57+
description="A test agent",
58+
base_url="http://test.com",
59+
tools=[sample_agent_tool],
60+
apispec_id="spec1",
61+
auto_updated=False
62+
)
63+
64+
@pytest.fixture
65+
def sample_task_result():
66+
return TaskResult(
67+
messages=[
68+
TextMessage(content="Test message", source="test_agent"),
69+
ToolCallRequestEvent(content=[FunctionCall(
70+
name="test_function", arguments="{}", id="call1")
71+
], source="test_agent"),
72+
ToolCallExecutionEvent(content=[
73+
FunctionExecutionResult(call_id="call1", is_error=False, content="Success", name="test_function")
74+
], source="test_agent")
75+
]
76+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
import pytest
2+
from asktheapi_team_builder.services.agent_evaluator_service import AgentEvaluatorService
3+
from asktheapi_team_builder.types import TextMessage, ToolCallExecutionEvent
4+
from autogen_core.models import FunctionExecutionResult
5+
from unittest.mock import AsyncMock, MagicMock
6+
7+
@pytest.mark.asyncio
8+
async def test_needs_evaluation_task_result_no_errors(agent_evaluator_service, sample_task_result):
9+
# Act
10+
result = agent_evaluator_service.needs_evaluation_task_result(sample_task_result)
11+
12+
# Assert
13+
assert result is False
14+
15+
@pytest.mark.asyncio
16+
async def test_needs_evaluation_task_result_with_text_error(agent_evaluator_service, sample_task_result):
17+
# Arrange
18+
task_result = sample_task_result
19+
task_result.messages.append(TextMessage(content="An error occurred", source="test_agent"))
20+
21+
# Act
22+
result = agent_evaluator_service.needs_evaluation_task_result(task_result)
23+
24+
# Assert
25+
assert result is True
26+
27+
@pytest.mark.asyncio
28+
async def test_needs_evaluation_task_result_with_tool_error(agent_evaluator_service, sample_task_result):
29+
# Arrange
30+
task_result = sample_task_result
31+
task_result.messages.append(ToolCallExecutionEvent(content=[
32+
FunctionExecutionResult(call_id="call1", is_error=True, content="Error ocurred", name="test_function")
33+
], source="test_agent"))
34+
35+
# Act
36+
result = agent_evaluator_service.needs_evaluation_task_result(task_result)
37+
38+
# Assert
39+
assert result is True
40+
41+
@pytest.mark.asyncio
42+
async def test_evaluate_task_result(agent_evaluator_service, sample_agent, sample_task_result, mock_llm_service):
43+
# Arrange
44+
mock_llm_service.chat_completion.return_value = AsyncMock(
45+
choices=[MagicMock(message=MagicMock(content='{"evaluation": []}'))]
46+
)
47+
48+
# Act
49+
result = await agent_evaluator_service.evaluate_task_result([sample_agent], sample_task_result)
50+
51+
# Assert
52+
assert result is not None
53+
mock_llm_service.chat_completion.assert_called_once()
54+
assert "test_agent" in str(mock_llm_service.chat_completion.call_args)
55+
56+
@pytest.mark.asyncio
57+
async def test_evaluate_task_result_error_handling(agent_evaluator_service, sample_agent, sample_task_result, mock_llm_service):
58+
# Arrange
59+
mock_llm_service.chat_completion.side_effect = Exception("Evaluation error")
60+
61+
# Act & Assert
62+
await agent_evaluator_service.evaluate_task_result([sample_agent], sample_task_result)

tests/services/test_llm_service.py

+44
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
import pytest
2+
from asktheapi_team_builder.services.llm_service import LLMService
3+
from unittest.mock import AsyncMock
4+
5+
@pytest.fixture
6+
def llm_service(mock_openai_service):
7+
return LLMService(mock_openai_service)
8+
9+
@pytest.mark.asyncio
10+
async def test_chat_completion(llm_service, mock_openai_service):
11+
# Arrange
12+
model = "gpt-4"
13+
messages = [{"role": "user", "content": "test"}]
14+
stream = False
15+
16+
# Act
17+
result = await llm_service.chat_completion(model, messages, stream)
18+
19+
# Assert
20+
mock_openai_service.completion_with_headers.assert_called_once_with(
21+
model=model,
22+
messages=messages,
23+
stream=stream,
24+
headers={}
25+
)
26+
assert result is not None
27+
28+
@pytest.mark.asyncio
29+
async def test_chat_completion_with_custom_headers():
30+
# Arrange
31+
custom_headers = {"custom-header": "value"}
32+
mock_openai = AsyncMock()
33+
service = LLMService(mock_openai, custom_headers)
34+
35+
# Act
36+
await service.chat_completion("gpt-4", [], False)
37+
38+
# Assert
39+
mock_openai.completion_with_headers.assert_called_once_with(
40+
model="gpt-4",
41+
messages=[],
42+
stream=False,
43+
headers=custom_headers
44+
)
+42
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
from openai import AsyncOpenAI
2+
import pytest
3+
from unittest.mock import MagicMock, patch, AsyncMock
4+
from asktheapi_team_builder.services.open_ai_service import OpenAIService
5+
6+
@pytest.fixture()
7+
def openai_service():
8+
service = OpenAIService()
9+
service.client = AsyncOpenAI()
10+
service.client.chat.completions = AsyncMock()
11+
return service
12+
13+
@pytest.mark.asyncio
14+
async def test_completion_with_headers(openai_service):
15+
# Arrange
16+
model = "gpt-4"
17+
messages = [{"role": "user", "content": "test"}]
18+
stream = False
19+
headers = {"test-header": "value"}
20+
21+
# Act
22+
result = await openai_service.completion_with_headers(model, messages, stream, headers)
23+
24+
# Assert
25+
openai_service.client.chat.completions.create.assert_called_once_with(
26+
model=model,
27+
messages=messages,
28+
stream=stream,
29+
extra_headers=headers,
30+
response_format={'type': 'json_object'}
31+
)
32+
assert result is not None
33+
34+
@pytest.mark.asyncio
35+
async def test_completion_with_headers_error_handling(openai_service):
36+
# Arrange
37+
openai_service.client.chat.completions.create.side_effect = Exception("API Error")
38+
39+
# Act & Assert
40+
with pytest.raises(Exception) as exc_info:
41+
await openai_service.completion_with_headers("gpt-4", [], False, {})
42+
assert str(exc_info.value) == "API Error"

0 commit comments

Comments
 (0)