-
Notifications
You must be signed in to change notification settings - Fork 7k
[Eval,Arch] Update GPTQ eval and add headless_mode
for Controller
#2994
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
1953579
3706ef9
0d62fba
5ab4904
272120f
ed52fe7
cd5fb63
5261489
1f79140
fd3ae43
ef12cd8
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,6 @@ | ||
"""Overview: | ||
This code implements the evaluation of agents on the GPQA Benchmark with Open Book setting. | ||
- The benchmark consists of 448 high-quality and extremely difficult multiple-choice questions in the domains of biology, physics, and chemistry. The questions are intentionally designed to be "Google-proof," meaning that even highly skilled non-expert validators achieve only 34% accuracy despite unrestricted access to the web. | ||
- The benchmark consists of 448 high-quality and extremely difficult multiple-choice questions in the domains of biology, physics, and chemistry. The questions are intentionally designed to be "Google-proof," meaning that even highly skilled non-experts validators achieve only 34% accuracy despite unrestricted access to the web. | ||
xingyaoww marked this conversation as resolved.
Show resolved
Hide resolved
|
||
- Even experts in the corresponding domains achieve only 65% accuracy. | ||
- State-of-the-art AI systems achieve only 39% accuracy on this challenging dataset. | ||
|
||
|
@@ -22,6 +22,7 @@ | |
import pathlib | ||
import random | ||
import re | ||
from typing import Callable | ||
|
||
import pandas as pd | ||
from datasets import load_dataset | ||
|
@@ -30,7 +31,6 @@ | |
EvalMetadata, | ||
codeact_user_response, | ||
make_metadata, | ||
monologue_user_response, | ||
prepare_dataset, | ||
run_evaluation, | ||
) | ||
|
@@ -40,52 +40,78 @@ | |
from opendevin.core.logger import get_console_handler | ||
from opendevin.core.logger import opendevin_logger as logger | ||
from opendevin.core.main import run_agent_controller | ||
from opendevin.events.action import Action, AgentFinishAction, MessageAction | ||
from opendevin.events.observation import Observation | ||
from opendevin.llm.llm import LLM | ||
|
||
AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = { | ||
'CodeActAgent': codeact_user_response, | ||
'MonologueAgent': monologue_user_response, | ||
} | ||
|
||
def gpqa_codeact_user_response( | ||
state: State, | ||
encapsulate_solution: bool = False, | ||
try_parse: Callable[[Action], str] | None = None, | ||
) -> str: | ||
msg = ( | ||
'Please continue working on the task on whatever approach you think is suitable.\n' | ||
'Feel free to use all tools for calculations and solving the problem, and web-search for finding relevant facts during the process if needed\n' | ||
'If you have finished reporting the answer in the expected format, (and only once that is done), please run the following command to submit: <execute_bash> exit </execute_bash>.\n' | ||
'Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST.\n' | ||
'That is, when you have decided on the answer report in the following format:\n' | ||
'<<FINAL_ANSWER||\n' | ||
'<insert correct answer here, must be one of A, B, C, D> (Please dont use any additional characters. Just the letter of the correct answer (A/B/C/D).)\n' | ||
'||FINAL_ANSWER>>\n' | ||
'<execute_bash> exit </execute_bash>\n' | ||
'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP TO SOLVE THIS TASK.\n' | ||
) | ||
|
||
return msg | ||
|
||
|
||
AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {'CodeActAgent': codeact_user_response} | ||
|
||
AGENT_CLS_TO_INST_SUFFIX = { | ||
'CodeActAgent': '\n\n SUPER IMPORTANT: When you think you have solved the question, first report it back to the user in the requested format. Only once that is done, in the next turn, please run the following command: <execute_bash> exit </execute_bash>.\n' | ||
} | ||
|
||
|
||
def parse_final_answer(final_answer: str) -> str: | ||
def parse_final_answer(final_answer: str | None) -> str | None: | ||
"""Parse the final answer from the final message generated by the agent | ||
to extract the final answer. The final answer is usually enclosed in the format: | ||
<<FINAL_ANSWER|| | ||
<insert correct answer here> | ||
||FINAL_ANSWER>> | ||
""" | ||
# to do this first extract the part enclosed in the format <<FINAL_ANSWER|| ... ||FINAL_ANSWER>> | ||
pattern = re.compile(r'<<FINAL_ANSWER\|\|(.*?)\|\|FINAL_ANSWER>>', re.DOTALL) | ||
match = pattern.search(final_answer) | ||
|
||
if match: | ||
return match.group(1).strip() | ||
else: | ||
return 'No final answer found in the provided string.' | ||
# and then strip it, remove any leading/trailing spaces line breaks etc. | ||
answer = match.group(1).strip() | ||
# finally capitalize it | ||
answer = answer.upper() | ||
# and then return A, B, C, D depending on whether the answer A, B, C, D is found in the final answer | ||
for letter in ['A', 'B', 'C', 'D']: | ||
if letter in answer: | ||
return letter | ||
|
||
|
||
def compare_answers(predicted_answer, ground_truth): | ||
def compare_answers(model_output: str | None, ground_truth: str): | ||
"""Compare the predicted answer with the ground truth answer""" | ||
try: | ||
# parse the final answer from model output | ||
predicted_answer = parse_final_answer(model_output) | ||
except Exception as e: | ||
# Log the exception | ||
logger.error(f'An error occurred: {e}\n defaulting to random guess ...') | ||
# choose a random answer if the model output is not in the correct format | ||
predicted_answer = random.choice(['A', 'B', 'C', 'D']) | ||
|
||
logger.info('#############################################') | ||
logger.info(f'Predicted answer: {predicted_answer}') | ||
logger.info(f'Ground truth answer: {ground_truth}') | ||
logger.info('#############################################') | ||
return predicted_answer == ground_truth | ||
|
||
|
||
def get_test_result(model_output, ground_truth): | ||
"""Implements the evaluation logic for GPQA | ||
Checks if the output of a given instance is correct (as per the ground truth) | ||
""" | ||
# parse the final answer from model output | ||
predicted_answer = parse_final_answer(model_output) | ||
|
||
# check if the model output matches the ground truth | ||
result = compare_answers(predicted_answer, ground_truth) | ||
|
||
return result | ||
|
||
|
||
def convert_instance_dict(instance): | ||
"""Used for preprocessing the hf dataset into a format that can be used by the agent. | ||
Reads and extracts relevant information from the dataset instance. | ||
|
@@ -165,27 +191,37 @@ def process_instance( | |
# ======= Run the agent on the instance ======= | ||
# Prepare instruction for the agent using suggested format in gpqa codebase | ||
instruction = f""" | ||
What is the correct answer to this question:\n | ||
{instance['question']}\n | ||
What is the correct answer to this question:\n | ||
{instance['question']}\n | ||
|
||
Choices:\n | ||
(A) {instance['choices'][0]}\n | ||
(B) {instance['choices'][1]}\n | ||
(C) {instance['choices'][2]}\n | ||
(D) {instance['choices'][3]}\n | ||
\n\n | ||
Choices:\n | ||
(A) {instance['choices'][0]}\n | ||
(B) {instance['choices'][1]}\n | ||
(C) {instance['choices'][2]}\n | ||
(D) {instance['choices'][3]}\n | ||
\n\n | ||
|
||
MOST IMPORTANT: Format your response as follows: | ||
<<FINAL_ANSWER|| | ||
<insert correct answer here, must be one of A, B, C, D> (Please dont use any additional characters. Just the letter of the correct answer (A/B/C/D).) | ||
||FINAL_ANSWER>> | ||
MOST IMPORTANT: Format your response as follows: | ||
<<FINAL_ANSWER|| | ||
<insert correct answer here, must be one of A, B, C, D> (Please dont use any additional characters. Just the letter of the correct answer (A/B/C/D).) | ||
||FINAL_ANSWER>> | ||
|
||
Additional Instructions: | ||
- You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP. | ||
""" | ||
Additional Instructions: | ||
- Do not try to solve the question in a single step. Break it down into smaller steps. | ||
- You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP. | ||
|
||
# NOTE: You can actually set slightly different instruction for different agents | ||
instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] | ||
- SUPER IMPORTANT: When you have reported the answer to the user in the requested format, (and only once that is done) in the next turn, please run the following command: <execute_bash> exit </execute_bash>. | ||
- Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST. | ||
That is, when you have decided on the answer report in the following format: | ||
|
||
<<FINAL_ANSWER|| | ||
<insert correct answer here, must be one of A, B, C, D> (Please dont use any additional characters. Just the letter of the correct answer (A/B/C/D).) | ||
||FINAL_ANSWER>> | ||
<execute_bash> exit </execute_bash> | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is duplicated with the codeact user response above, could we deduplicate? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Because these were the code we used to run experiments in our paper, maybe we should leave them as is for reproducibility and improve in future iterations? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We could, but because the strings are the same, wouldn't deduplicating be functionally equivalent? |
||
|
||
Again do not quit without reporting the answer first. | ||
Ok now its time to start solving the question. Good luck! | ||
""" | ||
|
||
# Here's how you can run the agent (similar to the `main` function) and get the final task state | ||
state: State | None = asyncio.run( | ||
|
@@ -196,18 +232,70 @@ def process_instance( | |
fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( | ||
agent.__class__.__name__ | ||
), | ||
sid=instance.instance_id, | ||
sid=f'gptq_{str(instance.instance_id)}', | ||
headless_mode=True, | ||
) | ||
) | ||
assert state is not None, 'State should not be None.' | ||
|
||
# ======= Attempt to evaluate the agent's edits ======= | ||
# get the final message from the state history (default to empty if not found) | ||
final_message = state.history.get_last_agent_message() | ||
|
||
question_choices = { | ||
'A': instance['choices'][0], | ||
'B': instance['choices'][1], | ||
'C': instance['choices'][2], | ||
'D': instance['choices'][3], | ||
} | ||
# get the final message from the state history (default to empty if not found) | ||
found_answers = { | ||
'A': False, | ||
'B': False, | ||
'C': False, | ||
'D': False, | ||
} | ||
for event in state.history.get_events(reverse=True): | ||
if ( | ||
isinstance(event, AgentFinishAction) | ||
and event.source != 'user' | ||
and '<<FINAL_ANSWER||' in event.thought | ||
): | ||
final_message = event.thought | ||
break | ||
elif ( | ||
isinstance(event, MessageAction) | ||
and event.source != 'user' | ||
and '<<FINAL_ANSWER||' in event.content | ||
): | ||
final_message = event.content | ||
break | ||
elif isinstance(event, Observation): | ||
for option, option_text in question_choices.items(): | ||
if option_text in event.content: | ||
found_answers[option] = True | ||
else: | ||
final_message = None | ||
|
||
found_options = [option for option, found in found_answers.items() if found] | ||
logger.info('#############################################') | ||
logger.info(f'Final message generated by the agent: {final_message}') | ||
|
||
test_result = get_test_result(final_message, instance.correct_solution) | ||
logger.info('#############################################') | ||
|
||
# check if the model output matches the ground truth | ||
test_result = compare_answers(final_message, instance.correct_solution) | ||
if final_message is None and len(found_options) > 0: | ||
_selected = random.choice(found_options) | ||
# if the final message is None, then the agent did not report the answer in the correct format | ||
# so we randomly select one of the found options and compare it with the correct solution | ||
test_result = _selected == instance.correct_solution | ||
logger.info('#############################################') | ||
logger.info('Agent did not report the answer in the correct format.') | ||
logger.info(f'Found options: {found_options}') | ||
logger.info(f'Selected option: {_selected}') | ||
logger.info('#############################################') | ||
|
||
logger.info('#############################################') | ||
logger.info(f'Test result: {test_result}') | ||
logger.info('#############################################') | ||
|
||
# If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) | ||
# You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. | ||
|
@@ -216,21 +304,20 @@ def process_instance( | |
|
||
metrics = state.metrics.get() if state.metrics else None | ||
|
||
# history is now available as a stream of events, rather than list of pairs of (Action, Observation) | ||
# for compatibility with the existing output format, we can remake the pairs here | ||
# remove when it becomes unnecessary | ||
histories = state.history.compatibility_for_eval_history_pairs() | ||
|
||
# Save the output | ||
output = { | ||
'task_id': instance.task_id, | ||
'instance_id': instance.instance_id, | ||
'instruction': instruction, | ||
'metadata': metadata.model_dump(), | ||
'history': histories, | ||
'history': state.history.compatibility_for_eval_history_pairs(), | ||
'metrics': metrics, | ||
'error': state.last_error if state and state.last_error else None, | ||
'test_result': test_result, | ||
'test_result': { | ||
'result': test_result, | ||
'found_answers': found_answers, | ||
'last_message': final_message, | ||
}, | ||
} | ||
|
||
except Exception: | ||
|
@@ -269,9 +356,14 @@ def process_instance( | |
gpqa_dataset['task_id'] = gpqa_dataset.index | ||
# gpqa_dataset = dataset['train'].to_pandas().sort_values(by='id').reset_index(drop=True) | ||
|
||
if args.agent_cls != 'CodeActAgent': | ||
raise ValueError( | ||
f'Agent class {args.agent_cls} not supported for GPQA evaluation.' | ||
) | ||
|
||
metadata = make_metadata( | ||
llm_config=llm_config, | ||
dataset_name='gpqa', | ||
dataset_name=args.data_split, | ||
agent_class=args.agent_cls, | ||
max_iterations=args.max_iterations, | ||
eval_note=args.eval_note, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -68,6 +68,7 @@ def __init__( | |
max_budget_per_task: float | None = MAX_BUDGET_PER_TASK, | ||
initial_state: State | None = None, | ||
is_delegate: bool = False, | ||
headless_mode: bool = False, | ||
): | ||
"""Initializes a new instance of the AgentController class. | ||
|
||
|
@@ -79,10 +80,12 @@ def __init__( | |
max_budget_per_task: The maximum budget (in USD) allowed per task, beyond which the agent will stop. | ||
initial_state: The initial state of the controller. | ||
is_delegate: Whether this controller is a delegate. | ||
headless_mode: Whether the agent is run in headless mode. | ||
""" | ||
self._step_lock = asyncio.Lock() | ||
self.id = sid | ||
self.agent = agent | ||
self.headless_mode = headless_mode | ||
|
||
# subscribe to the event stream | ||
self.event_stream = event_stream | ||
|
@@ -291,7 +294,16 @@ async def _step(self): | |
logger.debug(f'[Agent Controller {self.id}] Delegate step done') | ||
assert self.delegate is not None | ||
delegate_state = self.delegate.get_agent_state() | ||
if delegate_state == AgentState.ERROR: | ||
logger.debug( | ||
f'[Agent Controller {self.id}] Delegate state: {delegate_state}' | ||
) | ||
if delegate_state == AgentState.ERROR or ( | ||
self.headless_mode and delegate_state == AgentState.PAUSED | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I suggest we change There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good idea! pushed the changes in the latest commits. |
||
): | ||
# consider PAUSED state as an error if running in headless mode | ||
# (since user cannot resume on the web interface so agent will hang forever) | ||
# otherwise, PAUSED state is fine | ||
|
||
# close the delegate upon error | ||
await self.delegate.close() | ||
self.delegate = None | ||
|
Uh oh!
There was an error while loading. Please reload this page.