Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Eval,Arch] Update GPTQ eval and add headless_mode for Controller #2994

Merged
merged 11 commits into from
Jul 20, 2024
2 changes: 1 addition & 1 deletion agenthub/codeact_agent/action_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def parse(self, action_str: str) -> Action:
# a command was found
command_group = self.bash_command.group(1).strip()
if command_group.strip() == 'exit':
return AgentFinishAction()
return AgentFinishAction(thought=thought)
return CmdRunAction(command=command_group, thought=thought)


Expand Down
1 change: 1 addition & 0 deletions evaluation/EDA/run_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ def process_instance(
agent.__class__.__name__
],
sid=instance['text'].strip(),
headless_mode=True,
)
)
# ======= Attempt to evaluate the agent's edits =======
Expand Down
1 change: 1 addition & 0 deletions evaluation/agent_bench/run_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ def process_instance(
fake_user_response_fn=FAKE_RESPONSES[agent.__class__.__name__],
sandbox=sandbox,
sid=inst_id,
headless_mode=True,
)
)

Expand Down
1 change: 1 addition & 0 deletions evaluation/biocoder/run_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ def process_instance(
],
sandbox=sandbox,
sid=sid,
headless_mode=True,
)
)

Expand Down
1 change: 1 addition & 0 deletions evaluation/bird/run_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,7 @@ def execute_sql(db_path, sql):
agent.__class__.__name__
],
sid=sid,
headless_mode=True,
)
)

Expand Down
1 change: 1 addition & 0 deletions evaluation/browsing_delegation/run_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ def process_instance(
instruction,
max_iterations=metadata.max_iterations,
sid=env_id,
headless_mode=True,
)
)

Expand Down
1 change: 1 addition & 0 deletions evaluation/gaia/run_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ def process_instance(
agent.__class__.__name__
],
sid=instance['task_id'],
headless_mode=True,
)
)
# ======= Attempt to evaluate the agent's edits =======
Expand Down
1 change: 1 addition & 0 deletions evaluation/gorilla/run_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ def process_instance(agent, question_id, question, metadata, reset_logger: bool
agent.__class__.__name__
),
sid=question_id,
headless_mode=True,
)
)
# ======= Attempt to evaluate the agent's edits =======
Expand Down
12 changes: 1 addition & 11 deletions evaluation/gpqa/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,6 @@ Further references:
- https://paperswithcode.com/dataset/gpqa
- https://github.com/idavidrein/gpqa

## TODOs
- [ ] Add support for other agents (currently only tested on `CodeActAgent`)
- [ ] Complete full benchmark evaluation
- [ ] Fix intermittent `BrowserException: Failed to start browser environment` error

## Setup Environment

Expand All @@ -27,17 +23,11 @@ Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/D

## Configure OpenDevin and your LLM

Create a `config.toml` file if it does not exist at the root of the workspace.
Create a `config.toml` file (you can copy from `config.template.toml`) if it does not exist at the root of the workspace.

Add the following configurations:

```toml
[core]
max_iterations = 100
cache_dir = "/tmp/cache"
ssh_hostname = "localhost"
enable_auto_lint = true

# TODO: Change these to the model you want to evaluate
[llm.eval_gpt4_1106_preview]
model = "gpt-4-1106-preview"
Expand Down
200 changes: 146 additions & 54 deletions evaluation/gpqa/run_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import pathlib
import random
import re
from typing import Callable

import pandas as pd
from datasets import load_dataset
Expand All @@ -30,7 +31,6 @@
EvalMetadata,
codeact_user_response,
make_metadata,
monologue_user_response,
prepare_dataset,
run_evaluation,
)
Expand All @@ -40,52 +40,82 @@
from opendevin.core.logger import get_console_handler
from opendevin.core.logger import opendevin_logger as logger
from opendevin.core.main import run_agent_controller
from opendevin.events.action import Action, AgentFinishAction, MessageAction
from opendevin.events.observation import Observation
from opendevin.llm.llm import LLM

AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
'CodeActAgent': codeact_user_response,
'MonologueAgent': monologue_user_response,
}
ACTION_FORMAT = """
<<FINAL_ANSWER||
<insert correct answer here, must be one of A, B, C, D> (Please dont use any additional characters. Just the letter of the correct answer (A/B/C/D).)
||FINAL_ANSWER>>
""".strip()
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@neubig is this similar to what you have in mind for deduplication?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

AFAIK these parts are also duplicated?

        'Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST.\n'
        'That is, when you have decided on the answer report in the following format:\n'

But this is not super-important, please don't let me block you.



def gpqa_codeact_user_response(
state: State,
encapsulate_solution: bool = False,
try_parse: Callable[[Action], str] | None = None,
) -> str:
msg = (
'Please continue working on the task on whatever approach you think is suitable.\n'
'Feel free to use all tools for calculations and solving the problem, and web-search for finding relevant facts during the process if needed\n'
'If you have finished reporting the answer in the expected format, (and only once that is done), please run the following command to submit: <execute_bash> exit </execute_bash>.\n'
'Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST.\n'
'That is, when you have decided on the answer report in the following format:\n'
f'{ACTION_FORMAT}\n'
'<execute_bash> exit </execute_bash>\n'
'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP TO SOLVE THIS TASK.\n'
)

return msg


AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {'CodeActAgent': codeact_user_response}

AGENT_CLS_TO_INST_SUFFIX = {
'CodeActAgent': '\n\n SUPER IMPORTANT: When you think you have solved the question, first report it back to the user in the requested format. Only once that is done, in the next turn, please run the following command: <execute_bash> exit </execute_bash>.\n'
}


def parse_final_answer(final_answer: str) -> str:
def parse_final_answer(final_answer: str | None) -> str | None:
"""Parse the final answer from the final message generated by the agent
to extract the final answer. The final answer is usually enclosed in the format:
<<FINAL_ANSWER||
<insert correct answer here>
||FINAL_ANSWER>>
"""
# to do this first extract the part enclosed in the format <<FINAL_ANSWER|| ... ||FINAL_ANSWER>>
pattern = re.compile(r'<<FINAL_ANSWER\|\|(.*?)\|\|FINAL_ANSWER>>', re.DOTALL)
match = pattern.search(final_answer)

if match:
return match.group(1).strip()
else:
return 'No final answer found in the provided string.'
# and then strip it, remove any leading/trailing spaces line breaks etc.
answer = match.group(1).strip()
# finally capitalize it
answer = answer.upper()
# and then return A, B, C, D depending on whether the answer A, B, C, D is found in the final answer
for letter in ['A', 'B', 'C', 'D']:
if letter in answer:
return letter


def compare_answers(predicted_answer, ground_truth):
def compare_answers(model_output: str | None, ground_truth: str):
"""Compare the predicted answer with the ground truth answer"""
try:
# parse the final answer from model output
predicted_answer = parse_final_answer(model_output)
except Exception as e:
# Log the exception
logger.error(f'An error occurred: {e}\n defaulting to random guess ...')
# choose a random answer if the model output is not in the correct format
predicted_answer = random.choice(['A', 'B', 'C', 'D'])

logger.info('#############################################')
logger.info(f'Predicted answer: {predicted_answer}')
logger.info(f'Ground truth answer: {ground_truth}')
logger.info('#############################################')
return predicted_answer == ground_truth


def get_test_result(model_output, ground_truth):
"""Implements the evaluation logic for GPQA
Checks if the output of a given instance is correct (as per the ground truth)
"""
# parse the final answer from model output
predicted_answer = parse_final_answer(model_output)

# check if the model output matches the ground truth
result = compare_answers(predicted_answer, ground_truth)

return result


def convert_instance_dict(instance):
"""Used for preprocessing the hf dataset into a format that can be used by the agent.
Reads and extracts relevant information from the dataset instance.
Expand Down Expand Up @@ -165,27 +195,33 @@ def process_instance(
# ======= Run the agent on the instance =======
# Prepare instruction for the agent using suggested format in gpqa codebase
instruction = f"""
What is the correct answer to this question:\n
{instance['question']}\n
What is the correct answer to this question:\n
{instance['question']}\n

Choices:\n
(A) {instance['choices'][0]}\n
(B) {instance['choices'][1]}\n
(C) {instance['choices'][2]}\n
(D) {instance['choices'][3]}\n
\n\n
Choices:\n
(A) {instance['choices'][0]}\n
(B) {instance['choices'][1]}\n
(C) {instance['choices'][2]}\n
(D) {instance['choices'][3]}\n
\n\n

MOST IMPORTANT: Format your response as follows:
<<FINAL_ANSWER||
<insert correct answer here, must be one of A, B, C, D> (Please dont use any additional characters. Just the letter of the correct answer (A/B/C/D).)
||FINAL_ANSWER>>
MOST IMPORTANT: Format your response as follows:
{ACTION_FORMAT}

Additional Instructions:
- You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
"""
Additional Instructions:
- Do not try to solve the question in a single step. Break it down into smaller steps.
- You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.

# NOTE: You can actually set slightly different instruction for different agents
instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__]
- SUPER IMPORTANT: When you have reported the answer to the user in the requested format, (and only once that is done) in the next turn, please run the following command: <execute_bash> exit </execute_bash>.
- Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST.
That is, when you have decided on the answer report in the following format:

{ACTION_FORMAT}
<execute_bash> exit </execute_bash>

Again do not quit without reporting the answer first.
Ok now its time to start solving the question. Good luck!
"""

# Here's how you can run the agent (similar to the `main` function) and get the final task state
state: State | None = asyncio.run(
Expand All @@ -196,18 +232,70 @@ def process_instance(
fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get(
agent.__class__.__name__
),
sid=instance.instance_id,
sid=f'gptq_{str(instance.instance_id)}',
headless_mode=True,
)
)
assert state is not None, 'State should not be None.'

# ======= Attempt to evaluate the agent's edits =======
# get the final message from the state history (default to empty if not found)
final_message = state.history.get_last_agent_message()

question_choices = {
'A': instance['choices'][0],
'B': instance['choices'][1],
'C': instance['choices'][2],
'D': instance['choices'][3],
}
# get the final message from the state history (default to empty if not found)
found_answers = {
'A': False,
'B': False,
'C': False,
'D': False,
}
for event in state.history.get_events(reverse=True):
if (
isinstance(event, AgentFinishAction)
and event.source != 'user'
and '<<FINAL_ANSWER||' in event.thought
):
final_message = event.thought
break
elif (
isinstance(event, MessageAction)
and event.source != 'user'
and '<<FINAL_ANSWER||' in event.content
):
final_message = event.content
break
elif isinstance(event, Observation):
for option, option_text in question_choices.items():
if option_text in event.content:
found_answers[option] = True
else:
final_message = None

found_options = [option for option, found in found_answers.items() if found]
logger.info('#############################################')
logger.info(f'Final message generated by the agent: {final_message}')

test_result = get_test_result(final_message, instance.correct_solution)
logger.info('#############################################')

# check if the model output matches the ground truth
test_result = compare_answers(final_message, instance.correct_solution)
if final_message is None and len(found_options) > 0:
_selected = random.choice(found_options)
# if the final message is None, then the agent did not report the answer in the correct format
# so we randomly select one of the found options and compare it with the correct solution
test_result = _selected == instance.correct_solution
logger.info('#############################################')
logger.info('Agent did not report the answer in the correct format.')
logger.info(f'Found options: {found_options}')
logger.info(f'Selected option: {_selected}')
logger.info('#############################################')

logger.info('#############################################')
logger.info(f'Test result: {test_result}')
logger.info('#############################################')

# If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
# You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
Expand All @@ -216,21 +304,20 @@ def process_instance(

metrics = state.metrics.get() if state.metrics else None

# history is now available as a stream of events, rather than list of pairs of (Action, Observation)
# for compatibility with the existing output format, we can remake the pairs here
# remove when it becomes unnecessary
histories = state.history.compatibility_for_eval_history_pairs()

# Save the output
output = {
'task_id': instance.task_id,
'instance_id': instance.instance_id,
'instruction': instruction,
'metadata': metadata.model_dump(),
'history': histories,
'history': state.history.compatibility_for_eval_history_pairs(),
'metrics': metrics,
'error': state.last_error if state and state.last_error else None,
'test_result': test_result,
'test_result': {
'result': test_result,
'found_answers': found_answers,
'last_message': final_message,
},
}

except Exception:
Expand Down Expand Up @@ -269,9 +356,14 @@ def process_instance(
gpqa_dataset['task_id'] = gpqa_dataset.index
# gpqa_dataset = dataset['train'].to_pandas().sort_values(by='id').reset_index(drop=True)

if args.agent_cls != 'CodeActAgent':
raise ValueError(
f'Agent class {args.agent_cls} not supported for GPQA evaluation.'
)

metadata = make_metadata(
llm_config=llm_config,
dataset_name='gpqa',
dataset_name=args.data_split,
agent_class=args.agent_cls,
max_iterations=args.max_iterations,
eval_note=args.eval_note,
Expand Down
Empty file modified evaluation/gpqa/scripts/run_infer.sh
100644 → 100755
Empty file.
1 change: 1 addition & 0 deletions evaluation/humanevalfix/run_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,7 @@ def process_instance(
agent.__class__.__name__
),
sid=sid,
headless_mode=True,
)
)

Expand Down
Loading