Skip to content

Commit 0039161

Browse files
committed
First import
1 parent bc54be6 commit 0039161

File tree

1 file changed

+230
-0
lines changed

1 file changed

+230
-0
lines changed

main.py

+230
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,230 @@
1+
import openai
2+
from langchain.embeddings import HuggingFaceEmbeddings
3+
import uuid
4+
from langchain.vectorstores import Chroma
5+
from chromadb.config import Settings
6+
import json
7+
import os
8+
9+
FUNCTIONS_MODEL = os.environ.get("FUNCTIONS_MODEL", "functions")
10+
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4")
11+
12+
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
13+
14+
chroma_client = Chroma(collection_name="memories", persist_directory="db", embedding_function=embeddings)
15+
16+
def needs_to_do_action(user_input):
17+
messages = [
18+
# {"role": "system", "content": "You are a helpful assistant."},
19+
{"role": "user",
20+
"content": f"""Transcript of AI assistant responding to user requests. Replies with the action to perform, including reasoning, and the confidence interval from 0 to 100.
21+
22+
Request: {user_input}
23+
Function call: """
24+
}
25+
]
26+
functions = [
27+
{
28+
"name": "intent",
29+
"description": """Decide to do an action.""",
30+
"parameters": {
31+
"type": "object",
32+
"properties": {
33+
"confidence": {
34+
"type": "number",
35+
"description": "confidence of the action"
36+
},
37+
"action": {
38+
"type": "string",
39+
"enum": ["save_memory", "search_memory", "reply"],
40+
"description": "user intent"
41+
},
42+
"reasoning": {
43+
"type": "string",
44+
"description": "reasoning behind the intent"
45+
},
46+
},
47+
"required": ["action"]
48+
}
49+
},
50+
]
51+
response = openai.ChatCompletion.create(
52+
#model="gpt-3.5-turbo",
53+
model=FUNCTIONS_MODEL,
54+
messages=messages,
55+
functions=functions,
56+
max_tokens=200,
57+
stop=None,
58+
temperature=0.5,
59+
#function_call="auto"
60+
function_call={"name": "intent"},
61+
)
62+
response_message = response["choices"][0]["message"]
63+
if response_message.get("function_call"):
64+
function_name = response.choices[0].message["function_call"].name
65+
function_parameters = response.choices[0].message["function_call"].arguments
66+
# read the json from the string
67+
res = json.loads(function_parameters)
68+
print(">>> function name: "+function_name)
69+
print(">>> function parameters: "+function_parameters)
70+
return res["action"]
71+
return "reply"
72+
73+
### Agent capabilities
74+
def save(memory):
75+
print(">>> saving to memories: ")
76+
print(memory)
77+
chroma_client.add_texts([memory],[{"id": str(uuid.uuid4())}])
78+
chroma_client.persist()
79+
return "saved to memory"
80+
81+
def search(query):
82+
res = chroma_client.similarity_search(query)
83+
print(">>> query: ")
84+
print(query)
85+
print(">>> retrieved memories: ")
86+
print(res)
87+
return res
88+
89+
def process_functions(user_input, action=""):
90+
messages = [
91+
# {"role": "system", "content": "You are a helpful assistant."},
92+
{"role": "user",
93+
"content": f"""Transcript of AI assistant responding to user requests.
94+
95+
Request: {user_input}
96+
Function call: """
97+
}
98+
]
99+
response = get_completion(messages, action=action)
100+
response_message = response["choices"][0]["message"]
101+
response_result = ""
102+
if response_message.get("function_call"):
103+
function_name = response.choices[0].message["function_call"].name
104+
function_parameters = response.choices[0].message["function_call"].arguments
105+
function_result = ""
106+
107+
108+
available_functions = {
109+
"save_memory": save,
110+
"search_memory": search,
111+
}
112+
113+
function_to_call = available_functions[function_name]
114+
function_result = function_to_call(function_parameters)
115+
messages = [
116+
# {"role": "system", "content": "You are a helpful assistant."},
117+
{
118+
"role": "user",
119+
"content": user_input,
120+
}
121+
]
122+
messages.append(
123+
{
124+
"role": "assistant",
125+
"content": None,
126+
"function_call": {"name": function_name, "arguments": function_parameters,},
127+
}
128+
)
129+
messages.append(
130+
{
131+
"role": "function",
132+
"name": function_name,
133+
"content": f'{{"result": {str(function_result)}}}'
134+
}
135+
)
136+
response = openai.ChatCompletion.create(
137+
model=LLM_MODEL,
138+
messages=messages,
139+
max_tokens=200,
140+
stop=None,
141+
temperature=0.5,
142+
)
143+
messages.append(
144+
{
145+
"role": "assistant",
146+
"content": response.choices[0].message["content"],
147+
}
148+
)
149+
return messages
150+
151+
def get_completion(messages, action=""):
152+
function_call = "auto"
153+
if action != "":
154+
function_call={"name": action}
155+
print("==> function_call: ")
156+
print(function_call)
157+
functions = [
158+
{
159+
"name": "save_memory",
160+
"description": """Save or store informations into memory.""",
161+
"parameters": {
162+
"type": "object",
163+
"properties": {
164+
"string": {
165+
"type": "string",
166+
"description": "information to save"
167+
},
168+
},
169+
"required": ["string"]
170+
}
171+
},
172+
{
173+
"name": "search_memory",
174+
"description": """Search in memory""",
175+
"parameters": {
176+
"type": "object",
177+
"properties": {
178+
"query": {
179+
"type": "string",
180+
"description": "The query to be used to search informations"
181+
},
182+
},
183+
"required": ["query"]
184+
}
185+
},
186+
]
187+
response = openai.ChatCompletion.create(
188+
#model="gpt-3.5-turbo",
189+
model=FUNCTIONS_MODEL,
190+
messages=messages,
191+
functions=functions,
192+
max_tokens=200,
193+
stop=None,
194+
temperature=0.1,
195+
function_call=function_call
196+
)
197+
198+
return response
199+
200+
conversation_history = []
201+
while True:
202+
user_input = input("> ")
203+
action = needs_to_do_action(user_input)
204+
if action != "reply":
205+
print("==> needs to do action: "+action)
206+
responses = process_functions(user_input, action=action)
207+
# add responses to conversation history by extending the list
208+
conversation_history.extend(responses)
209+
# print the latest response from the conversation history
210+
print(conversation_history[-1])
211+
else:
212+
print("==> no action needed")
213+
# construct the message and add it to the conversation history
214+
message = {"role": "user", "content": user_input}
215+
conversation_history.append(message)
216+
#conversation_history.append({ "role": "assistant", "content": "No action needed from my side."})
217+
218+
# get the response from the model
219+
response = openai.ChatCompletion.create(
220+
model=LLM_MODEL,
221+
messages=conversation_history,
222+
max_tokens=200,
223+
stop=None,
224+
temperature=0.5,
225+
)
226+
# add the response to the conversation history by extending the list
227+
conversation_history.append({ "role": "assistant", "content": response.choices[0].message["content"]})
228+
# print the latest response from the conversation history
229+
print(conversation_history[-1])
230+

0 commit comments

Comments
 (0)