forked from microsoft/semantic-kernel
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathresponses_agent_plugins_streaming.py
97 lines (79 loc) · 3.26 KB
/
responses_agent_plugins_streaming.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from typing import Annotated
from semantic_kernel.agents import AzureResponsesAgent
from semantic_kernel.functions import kernel_function
"""
The following sample demonstrates how to create an OpenAI Responses Agent.
The sample shows how to have the agent answer questions about the sample menu
with streaming responses.
The interaction with the agent is via the `get_response` method, which sends a
user input to the agent and receives a response from the agent. The conversation
history is maintained by the agent service, i.e. the responses are automatically
associated with the thread. Therefore, client code does not need to maintain the
conversation history.
"""
# Define a sample plugin for the sample
class MenuPlugin:
"""A sample Menu Plugin used for the concept sample."""
@kernel_function(description="Provides a list of specials from the menu.")
def get_specials(self, menu_item: str) -> Annotated[str, "Returns the specials from the menu."]:
return """
Special Soup: Clam Chowder
Special Salad: Cobb Salad
Special Drink: Chai Tea
"""
@kernel_function(description="Provides the price of the requested menu item.")
def get_item_price(
self, menu_item: Annotated[str, "The name of the menu item."]
) -> Annotated[str, "Returns the price of the menu item."]:
return "$9.99"
# Simulate a conversation with the agent
USER_INPUTS = [
"Hello",
"What is the special soup?",
"What is the special drink?",
"How much is it?",
"Thank you",
]
async def main():
# 1. Create the client using OpenAI resources and configuration
client, model = AzureResponsesAgent.setup_resources()
# 2. Create a Semantic Kernel agent for the OpenAI Responses API
agent = AzureResponsesAgent(
ai_model_id=model,
client=client,
instructions="Answer questions about the menu.",
name="Host",
plugins=[MenuPlugin()],
)
# 3. Create a thread for the agent
# If no thread is provided, a new thread will be
# created and returned with the initial response
thread = None
for user_input in USER_INPUTS:
print(f"# User: '{user_input}'")
first_chunk = True
# 4. Invoke the agent for the current message and print the response
async for response in agent.invoke_stream(messages=user_input, thread=thread):
thread = response.thread
if first_chunk:
print(f"# {response.name}: ", end="", flush=True)
first_chunk = False
print(response.content, end="", flush=True)
print()
"""
Sample Output:
# AuthorRole.USER: 'Hello'
# Host: Hi there! How can I assist you with the menu today?
# AuthorRole.USER: 'What is the special soup?'
# Host: The special soup is Clam Chowder.
# AuthorRole.USER: 'What is the special drink?'
# Host: The special drink is Chai Tea.
# AuthorRole.USER: 'How much is that?'
# Host: The Chai Tea is $9.99. Would you like to know more about the menu?
# AuthorRole.USER: 'Thank you'
# Host: You're welcome! If you have any questions about the menu or need assistance, feel free to ask.
"""
if __name__ == "__main__":
asyncio.run(main())