Skip to content

Commit

Permalink
Default max generation length to be 8192 for alpaca eval.
Browse files Browse the repository at this point in the history
  • Loading branch information
yizhongw committed Oct 22, 2023
1 parent 4977175 commit 409ebc1
Showing 1 changed file with 9 additions and 3 deletions.
12 changes: 9 additions & 3 deletions eval/alpaca_farm/run_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def main(args):
)
sampling_params = vllm.SamplingParams(
temperature=0, # greedy decoding
max_tokens=2048,
max_tokens=args.max_new_tokens,
)
outputs = model.generate(prompts, sampling_params)
outputs = [it.outputs[0].text for it in outputs]
Expand All @@ -50,7 +50,7 @@ def main(args):
model=model,
tokenizer=tokenizer,
prompts=prompts,
max_new_tokens=2048,
max_new_tokens=args.max_new_tokens,
do_sample=False,
temperature=0,
batch_size=args.eval_batch_size if args.eval_batch_size else 1,
Expand All @@ -63,7 +63,7 @@ def main(args):
instances=[{"id": str(i), "prompt": prompt} for i, prompt in enumerate(prompts)],
batch_size=args.eval_batch_size if args.eval_batch_size else 10,
output_path=openai_query_cache_path,
max_tokens=2048,
max_tokens=args.max_new_tokens,
temperature=0,
reuse_existing_outputs=True,
)
Expand Down Expand Up @@ -136,6 +136,12 @@ def main(args):
default=None,
help="If specified, we will use the OpenAI API to generate the predictions.",
)
parser.add_argument(
"--max_new_tokens",
type=int,
default=8192,
help="Maximum number of new tokens to generate."
)
parser.add_argument(
"--eval_batch_size",
type=int,
Expand Down

0 comments on commit 409ebc1

Please sign in to comment.