Skip to content

Commit

Permalink
fix unsloth resume training (modelscope#2668)
Browse files Browse the repository at this point in the history
  • Loading branch information
tastelikefeet authored Dec 16, 2024
1 parent 4288d42 commit 0e08211
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 3 deletions.
7 changes: 5 additions & 2 deletions swift/llm/train/tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,8 +335,11 @@ def prepare_model(cls, args: TrainArguments, model, template=None, train_dataset
apply_liger(args.model_type)

if args.is_adapter:
# Fix the name of the layer in xcomposer that contains Plora.
model.requires_grad_(False)
if args.tuner_backend != 'unsloth':
# Fix the name of the layer in xcomposer that contains Plora.
# Unsloth prepares and loads lora outside this function when
# resume_from_checkpoint, so do not disable grad here
model.requires_grad_(False)
if args.resume_from_checkpoint:
if args.train_type in extra_tuners:
tuner: Tuner = extra_tuners[args.train_type]
Expand Down
2 changes: 1 addition & 1 deletion swift/tuners/peft.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class LoraConfig(peft.LoraConfig):
lora_dtype: Optional[str] = field(
default=None, metadata={'help': 'The lora dtype, default None means following the original layer\'s dtype'})

lorap_lr_ratio: Optional[float] = field(default=2.0**4, metadata={'help': 'The lr ratio of lora_B in lora+'})
lorap_lr_ratio: Optional[float] = field(default=None, metadata={'help': 'The lr ratio of lora_B in lora+'})

lorap_emb_lr: float = field(default=1e-6, metadata={'help': 'The lr for embedding in lora+'})

Expand Down

0 comments on commit 0e08211

Please sign in to comment.