Skip to content
This repository has been archived by the owner on Oct 17, 2024. It is now read-only.

Commit

Permalink
format w/ ruff
Browse files Browse the repository at this point in the history
  • Loading branch information
StableFluffy committed Jun 28, 2024
1 parent f3f087c commit a61d06f
Show file tree
Hide file tree
Showing 4 changed files with 172 additions and 90 deletions.
113 changes: 71 additions & 42 deletions evaluator.py
Original file line number Diff line number Diff line change
@@ -1,54 +1,64 @@
from typing import Dict, Union
import argparse
import re
import json
import re
import time
from datetime import datetime
from threading import Lock
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from pathlib import Path
from threading import Lock
from typing import Dict, Union

import pandas as pd
from openai import OpenAI

from templates import JUDGE_TEMPLATE


# Constants
TIME_START = datetime.now().strftime("%Y%m%d_%H%M%S")
LOCK = Lock()


def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--model-output-dir', help='Model Output Directory', required=True)
parser.add_argument('-k', '--openai-api-key', help='OpenAI API Key', required=True)
parser.add_argument('-j', '--judge-model', help='Judge Model', default='gpt-4-1106-preview')
parser.add_argument('-t', '--threads', help='Thread count', default=42, type=int)
parser.add_argument(
"-o", "--model-output-dir", help="Model Output Directory", required=True
)
parser.add_argument("-k", "--openai-api-key", help="OpenAI API Key", required=True)
parser.add_argument(
"-j", "--judge-model", help="Judge Model", default="gpt-4-1106-preview"
)
parser.add_argument("-t", "--threads", help="Thread count", default=42, type=int)
return parser.parse_args()


def create_azure_client(api_key: str):
return OpenAI(
api_key=api_key
)
return OpenAI(api_key=api_key)


def create_answers(client, model_output, judge_model, is_multi_turn: bool = False, i=0) -> Dict[str, Union[str, float]]:
model_questions = model_output['questions']
model_outputs = model_output['outputs']
model_references = model_output['references']
def create_answers(
client, model_output, judge_model, is_multi_turn: bool = False, i=0
) -> Dict[str, Union[str, float]]:
model_questions = model_output["questions"]
model_outputs = model_output["outputs"]
model_references = model_output["references"]

prompt = (
f"아래의 내용을 주어진 평가 기준들을 충실히 반영하여 평가해라. 특히 모델 답변이 언어 요구사항을 준수하는지 반드시 확인해야 한다.\n\n"
f"**Question**\n{model_questions[0]}"
)

if model_references and model_references[0]:
prompt += f"\n\n**Additional Reference**\n{model_references[0]}"

prompt += f"\n\n**Model's Response**\n{model_outputs[0]}"

if is_multi_turn:
prompt += f"\n\n**Follow-up Question.**\n{model_questions[1]}"
if model_references and model_references[1]:
prompt += f"\n\n**Additional Reference**\n{model_references[1]}"
prompt += f"\n\n**Model's Response**\n{model_outputs[1]}"

prompt += "\n\n[[대화 종료. 평가 시작.]]"

try:
Expand All @@ -57,24 +67,34 @@ def create_answers(client, model_output, judge_model, is_multi_turn: bool = Fals
temperature=0.0,
n=1,
messages=[
{"role": "system", "content": JUDGE_TEMPLATE['multi_turn' if is_multi_turn else 'single_turn']},
{"role": "user", "content": prompt}
]
{
"role": "system",
"content": JUDGE_TEMPLATE[
"multi_turn" if is_multi_turn else "single_turn"
],
},
{"role": "user", "content": prompt},
],
)

content = response.choices[0].message.content
judge_message_match = re.search(r"평가:(.*?)점수:", content.replace("*", ''), re.DOTALL)
judge_message = judge_message_match.group(1).strip() if judge_message_match else "No judge message found"
judge_score_match = re.search(r"점수:\s*(\d+(\.\d+)?)", content.replace("*", ''))
judge_message_match = re.search(
r"평가:(.*?)점수:", content.replace("*", ""), re.DOTALL
)
judge_message = (
judge_message_match.group(1).strip()
if judge_message_match
else "No judge message found"
)
judge_score_match = re.search(
r"점수:\s*(\d+(\.\d+)?)", content.replace("*", "")
)
if judge_score_match:
judge_score = float(judge_score_match.group(1))
else:
raise ValueError("No score found in response")

return {
'judge_message': judge_message,
'judge_score': judge_score
}
return {"judge_message": judge_message, "judge_score": judge_score}

except Exception as e:
print("Error. Retrying after 20 sec", e)
Expand All @@ -84,26 +104,30 @@ def create_answers(client, model_output, judge_model, is_multi_turn: bool = Fals
if i > 3:
print("Impossible prompt, aborting..!")
return {
'judge_message': "Impossible to judge due to repetition.",
'judge_score': 0.0
"judge_message": "Impossible to judge due to repetition.",
"judge_score": 0.0,
}
i += 1
return create_answers(client, model_output, judge_model, is_multi_turn, i)


def process_item(client, row, judge_model, output_file):
query_single = create_answers(client, row, judge_model)
query_multi = create_answers(client, row, judge_model, is_multi_turn=True)

row['query_single'] = query_single
row['query_multi'] = query_multi
row["query_single"] = query_single
row["query_multi"] = query_multi
row = row.to_dict()

with LOCK:
with output_file.open('a', encoding='utf-8-sig') as f:
with output_file.open("a", encoding="utf-8-sig") as f:
f.write(json.dumps(row, ensure_ascii=False))
f.write('\n')
f.write("\n")

def process_file(client, file_path: Path, output_dir: Path, judge_model, threads: int, args):

def process_file(
client, file_path: Path, output_dir: Path, judge_model, threads: int, args
):
print(f"- 현재 Processing : {file_path}")
df_model_outputs = pd.read_json(file_path, lines=True)

Expand All @@ -114,26 +138,31 @@ def process_file(client, file_path: Path, output_dir: Path, judge_model, threads
for row in df_model_outputs.iterrows():
executor.submit(process_item, client, row[1], judge_model, output_file)


def is_hidden(filepath: Path) -> bool:
return any(part.startswith('.') for part in filepath.parts)
return any(part.startswith(".") for part in filepath.parts)


def main():
args = get_args()
client = create_azure_client(args.openai_api_key)

input_dir = Path(args.model_output_dir)
output_dir = Path('./evaluated')
output_dir = Path("./evaluated")

# Filter out hidden files
json_files = [file for file in input_dir.rglob('*.jsonl') if not is_hidden(file)]
json_files = [file for file in input_dir.rglob("*.jsonl") if not is_hidden(file)]

for file_path in json_files:
output_file_path = output_dir / file_path.relative_to(input_dir)
if output_file_path.exists():
print(f"이미 평가 완료.. : {file_path}")
continue
process_file(client, file_path, output_dir, args.judge_model, args.threads, args)
time.sleep(20) # to handle ratelimit!
process_file(
client, file_path, output_dir, args.judge_model, args.threads, args
)
time.sleep(20) # to handle ratelimit!


if __name__ == "__main__":
main()
main()
106 changes: 73 additions & 33 deletions generator.py
Original file line number Diff line number Diff line change
@@ -1,73 +1,113 @@
import argparse
import pandas as pd
import os

import pandas as pd

from templates import PROMPT_STRATEGY


# Use aphrodite-engine or vLLM
try:
from aphrodite import LLM, SamplingParams

print("- Using aphrodite-engine")

except ImportError:
from vllm import LLM, SamplingParams

print("- Using vLLM")

parser = argparse.ArgumentParser()
parser.add_argument('-g' ,'--gpu_devices', help=' : CUDA_VISIBLE_DEVICES', default='0')
parser.add_argument('-m', '--model', help=' : Model to evaluate', default='yanolja/EEVE-Korean-Instruct-2.8B-v1.0')
parser.add_argument('-ml', '--model_len', help=' : Maximum Model Length', default=4096, type=int)
parser.add_argument("-g", "--gpu_devices", help=" : CUDA_VISIBLE_DEVICES", default="0")
parser.add_argument(
"-m",
"--model",
help=" : Model to evaluate",
default="yanolja/EEVE-Korean-Instruct-2.8B-v1.0",
)
parser.add_argument(
"-ml", "--model_len", help=" : Maximum Model Length", default=4096, type=int
)
args = parser.parse_args()

print(f"Args - {args}")

os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_devices
gpu_counts = len(args.gpu_devices.split(','))
gpu_counts = len(args.gpu_devices.split(","))

llm = LLM(
model=args.model,
tensor_parallel_size=gpu_counts,
max_model_len=args.model_len,
gpu_memory_utilization=0.8,
trust_remote_code=True # !
)
trust_remote_code=True, # !
)

sampling_params = SamplingParams(
temperature=0,
skip_special_tokens=True,
max_tokens=args.model_len,
stop=[
'<|endoftext|>',
'[INST]',
'[/INST]',
'<|im_end|>',
'<|end|>',
'<|eot_id|>'
]
)
stop=["<|endoftext|>", "[INST]", "[/INST]", "<|im_end|>", "<|end|>", "<|eot_id|>"],
)

df_questions = pd.read_json(
'questions.jsonl',
orient='records',
encoding="utf-8-sig",
lines=True
)
"questions.jsonl", orient="records", encoding="utf-8-sig", lines=True
)

if not os.path.exists("./generated/" + args.model):
os.makedirs("./generated/" + args.model)

for strategy_name, prompts in PROMPT_STRATEGY.items():

def format_single_turn_question(question):
return llm.llm_engine.tokenizer.tokenizer.apply_chat_template(prompts + [{"role": "user", "content": question[0]}], tokenize=False, add_generation_prompt=True)

single_turn_questions = df_questions['questions'].map(format_single_turn_question)
return llm.llm_engine.tokenizer.tokenizer.apply_chat_template(
prompts + [{"role": "user", "content": question[0]}],
tokenize=False,
add_generation_prompt=True,
)

single_turn_questions = df_questions["questions"].map(format_single_turn_question)
print(single_turn_questions.iloc[0])
single_turn_outputs = [output.outputs[0].text.strip() for output in llm.generate(single_turn_questions, sampling_params)]

single_turn_outputs = [
output.outputs[0].text.strip()
for output in llm.generate(single_turn_questions, sampling_params)
]

def format_double_turn_question(question, single_turn_output):
return llm.llm_engine.tokenizer.tokenizer.apply_chat_template(prompts + [{"role": "user", "content": question[0]}, {"role": "assistant", "content": single_turn_output}, {"role": "user", "content": question[1]}], tokenize=False, add_generation_prompt=True)

multi_turn_questions = df_questions[['questions', 'id']].apply(lambda x: format_double_turn_question(x['questions'], single_turn_outputs[x['id']-1]), axis=1)
multi_turn_outputs = [output.outputs[0].text.strip() for output in llm.generate(multi_turn_questions, sampling_params)]

df_output = pd.DataFrame({'id': df_questions['id'], 'category': df_questions['category'], 'questions': df_questions['questions'], 'outputs': list(zip(single_turn_outputs, multi_turn_outputs)), "references": df_questions['references']})
df_output.to_json('./generated/' + os.path.join(args.model, f'{strategy_name}.jsonl'), orient='records', lines=True, force_ascii=False)
return llm.llm_engine.tokenizer.tokenizer.apply_chat_template(
prompts
+ [
{"role": "user", "content": question[0]},
{"role": "assistant", "content": single_turn_output},
{"role": "user", "content": question[1]},
],
tokenize=False,
add_generation_prompt=True,
)

multi_turn_questions = df_questions[["questions", "id"]].apply(
lambda x: format_double_turn_question(
x["questions"], single_turn_outputs[x["id"] - 1]
),
axis=1,
)
multi_turn_outputs = [
output.outputs[0].text.strip()
for output in llm.generate(multi_turn_questions, sampling_params)
]

df_output = pd.DataFrame(
{
"id": df_questions["id"],
"category": df_questions["category"],
"questions": df_questions["questions"],
"outputs": list(zip(single_turn_outputs, multi_turn_outputs)),
"references": df_questions["references"],
}
)
df_output.to_json(
"./generated/" + os.path.join(args.model, f"{strategy_name}.jsonl"),
orient="records",
lines=True,
force_ascii=False,
)
3 changes: 1 addition & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,4 @@ ignore = ["C408", "C901", "E501", "E731", "E741", "W605"]
select = ["C", "E", "F", "I", "W"]

[tool.ruff.lint.isort]
lines-after-imports = 2

lines-after-imports = 2
Loading

0 comments on commit a61d06f

Please sign in to comment.