1
1
import os
2
- from autogen import AssistantAgent
2
+ from openai import OpenAI
3
3
import logging
4
4
5
5
import utils
6
6
7
- logger = logging .getLogger (__name__ )
8
-
9
7
"""
10
8
About models:
11
9
- Gemma 2 does not support system rule
12
-
13
- config_list:
14
- - {"price": [prompt_price_per_1k, completion_token_price_per_1k]}
15
-
16
- Todo:
17
- - With xinference + Gemma 2 + AutoGen, why 'system message' does not work well
18
10
"""
11
+
19
12
LLM_MODEL_NAME = os .environ .get ("LLM_MODEL_NAME" ) or "google/gemma-2-27b-it"
20
- config_list_local = [
21
- # set prices, otherwise there will be warnings
22
- {"model" : LLM_MODEL_NAME , "base_url" : os .environ .get ("OLLAMA_BASE_URL" ) + "/v1" , "tags" : ["gemma" , "local" ], "price" : [0 , 0 ]},
23
- ]
24
13
25
- llm_config = {"config_list" : config_list_local }
14
+ llm_client = OpenAI (
15
+ base_url = os .environ .get ("OPENAI_BASE_URL" ),
16
+ api_key = "token" ,
17
+ )
18
+
19
+ def get_llm_reply (prompt ):
20
+ completion = llm_client .chat .completions .create (
21
+ model = LLM_MODEL_NAME ,
22
+ messages = [
23
+ {"role" : "user" , "content" : prompt }
24
+ ]
25
+ )
26
+ return completion .choices [0 ].message .content
26
27
27
28
"""
28
29
Get list of statements from input.
@@ -33,21 +34,14 @@ def get_statements(input):
33
34
Extract key facts from the given content.
34
35
Provide a list of the facts in array format as response only.'''
35
36
36
- statement_extract_agent = AssistantAgent (
37
- name = "statement_extract_agent" ,
38
- system_message = '' ,
39
- llm_config = llm_config ,
40
- human_input_mode = "NEVER" ,
41
- )
42
-
43
- content = f'''{ system_message }
37
+ prompt = f'''{ system_message }
44
38
```
45
39
Content:
46
40
{ input }
47
41
```'''
48
42
49
- reply = statement_extract_agent . generate_reply ( messages = [{ "content" : content , "role" : "user" }] )
50
- logger .debug (f"get_statements LLM reply: { reply } " )
43
+ reply = get_llm_reply ( prompt )
44
+ logging .debug (f"get_statements LLM reply: { reply } " )
51
45
return utils .llm2list (reply )
52
46
53
47
"""
@@ -59,19 +53,12 @@ def get_search_keywords(statement):
59
53
Generate search keyword used for fact check on the given statement.
60
54
Include only the keyword in your response.'''
61
55
62
- search_keywords_agent = AssistantAgent (
63
- name = "search_keywords_agent" ,
64
- system_message = '' ,
65
- llm_config = llm_config ,
66
- human_input_mode = "NEVER" ,
67
- )
68
-
69
- content = f'''{ system_message }
56
+ prompt = f'''{ system_message }
70
57
```
71
58
Statement:
72
59
{ statement }
73
60
```'''
74
- reply = search_keywords_agent . generate_reply ( messages = [{ "content" : content , "role" : "user" }] )
61
+ reply = get_llm_reply ( prompt )
75
62
return reply .strip ()
76
63
77
64
def get_verdict (statement , contexts ):
@@ -85,14 +72,7 @@ def get_verdict(statement, contexts):
85
72
Be thorough in your explanations, avoiding any duplication of information.
86
73
Provide the response as JSON with the structure:{verdict, reason}'''
87
74
88
- fact_check_agent = AssistantAgent (
89
- name = "fact_check_agent" ,
90
- system_message = '' ,
91
- llm_config = llm_config ,
92
- human_input_mode = "NEVER" ,
93
- )
94
-
95
- content = f'''{ system_message }
75
+ prompt = f'''{ system_message }
96
76
```
97
77
Statement:
98
78
{ statement }
@@ -103,13 +83,13 @@ def get_verdict(statement, contexts):
103
83
_text = node .get ('text' )
104
84
if not _text :
105
85
continue
106
- content = f"""{ content }
86
+ prompt = f"""{ prompt }
107
87
```
108
88
Context { ind + 1 } :
109
89
{ _text }
110
90
```"""
111
91
112
- reply = fact_check_agent . generate_reply ( messages = [{ "content" : content , "role" : "user" }] )
92
+ reply = get_llm_reply ( prompt )
113
93
verdict = utils .llm2json (reply )
114
94
if verdict :
115
95
verdict ['statement' ] = statement
0 commit comments