You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
It seems there are some precision error when I run example/train.py. Could you help me with this? I try set llama "torch_dtype": "float16", into "torch_dtype": "float32".. It is not helpful with new error.
Sorry to bother. I am new to LLM. Are some config in Lora not matchable with llama config?
warnings.warn(
Traceback (most recent call last):
File "/home/liuzhe/AutomaTikZ/examples/train.py", line 141, in <module>
model, tokenizer = getattr(train, name).train(
File "/home/liuzhe/AutomaTikZ/automatikz/train/llama.py", line 292, in train
trainer.train(resume_from_checkpoint=last_checkpoint)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/transformers/trainer.py", line 1555, in train
return inner_training_loop(
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/transformers/trainer.py", line 1860, in _inner_training_loop
tr_loss_step = self.training_step(model, inputs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/transformers/trainer.py", line 2725, in training_step
loss = self.compute_loss(model, inputs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/transformers/trainer.py", line 2748, in compute_loss
outputs = model(**inputs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py", line 185, in forward
outputs = self.parallel_apply(replicas, inputs, module_kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py", line 200, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py", line 110, in parallel_apply
output.reraise()
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/_utils.py", line 694, in reraise
raise exception
RuntimeError: Caught RuntimeError in replica 0 on device 0.
Original Traceback (most recent call last):
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py", line 85, in _worker
output = module(*input, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/peft/peft_model.py", line 1003, in forward
return self.base_model(
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/peft/tuners/tuners_utils.py", line 106, in forward
return self.model.forward(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 1034, in forward
outputs = self.model(
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 912, in forward
layer_outputs = self._gradient_checkpointing_func(
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/_compile.py", line 24, in inner
return torch._dynamo.disable(fn, recursive)(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 328, in _fn
return fn(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/_dynamo/external_utils.py", line 17, in inner
return fn(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/utils/checkpoint.py", line 451, in checkpoint
return CheckpointFunction.apply(function, preserve, *args)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/autograd/function.py", line 539, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/utils/checkpoint.py", line 230, in forward
outputs = run_function(*args)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 672, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 366, in forward
query_states = self.q_proj(hidden_states)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/peft/tuners/lora/layer.py", line 308, in forward
result = self._linear(x)
File "/home/liuzhe/anaconda3/envs/auto310/lib/python3.10/site-packages/peft/tuners/lora/layer.py", line 296, in _linear
return F.linear(input, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
RuntimeError: expected mat1 and mat2 to have the same dtype, but got: float != c10::Half
The text was updated successfully, but these errors were encountered:
It seems there are some precision error when I run example/train.py. Could you help me with this? I try set llama "torch_dtype": "float16", into "torch_dtype": "float32".. It is not helpful with new error.
Sorry to bother. I am new to LLM. Are some config in Lora not matchable with llama config?
The text was updated successfully, but these errors were encountered: