Skip to content

Commit

Permalink
尝试解决tokenizer报错问题
Browse files Browse the repository at this point in the history
  • Loading branch information
zRzRzRzRzRzRzR committed Mar 31, 2024
1 parent af9be9e commit 8a457af
Showing 1 changed file with 1 addition and 3 deletions.
4 changes: 1 addition & 3 deletions finetune_demo/finetune_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ def prediction_step(
ignore_keys=None,
**gen_kwargs,
) -> tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:

if self.args.predict_with_generate:
output_ids = inputs.pop('output_ids')
input_ids = inputs['input_ids']
Expand All @@ -87,7 +86,6 @@ def prediction_step(
if self.args.predict_with_generate:
labels = output_ids
return loss, generated_tokens, labels

# For P-Tuning a new save_model function is fine for the prefix_encoder model
# but may cost problems for the whole model loading

Expand Down Expand Up @@ -525,7 +523,7 @@ def main(
),
train_dataset=train_dataset,
eval_dataset=val_dataset.select(list(range(50))),
tokenizer=tokenizer,
# tokenizer=tokenizer, # to avoid the wrong of the tokenizer
compute_metrics=functools.partial(compute_metrics, tokenizer=tokenizer),
)

Expand Down

0 comments on commit 8a457af

Please sign in to comment.