Skip to content

Commit

Permalink
Correct AutoConfig call docstrings (huggingface#10822)
Browse files Browse the repository at this point in the history
  • Loading branch information
Sebelino authored Mar 22, 2021
1 parent 8fb4671 commit 2c66842
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 27 deletions.
8 changes: 4 additions & 4 deletions hubconf.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def model(*args, **kwargs):
model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
Expand All @@ -97,7 +97,7 @@ def modelWithLMHead(*args, **kwargs):
model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
Expand All @@ -115,7 +115,7 @@ def modelForSequenceClassification(*args, **kwargs):
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
Expand All @@ -134,7 +134,7 @@ def modelForQuestionAnswering(*args, **kwargs):
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
Expand Down
24 changes: 12 additions & 12 deletions src/transformers/models/auto/modeling_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -801,7 +801,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -895,7 +895,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1000,7 +1000,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
warnings.warn(
Expand Down Expand Up @@ -1099,7 +1099,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/gpt2_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/gpt2_tf_model_config.json')
>>> model = AutoModelForCausalLM.from_pretrained('./tf_model/gpt2_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1192,7 +1192,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMaskedLM.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1288,7 +1288,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/t5_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/t5_tf_model_config.json')
>>> model = AutoModelForSeq2SeqLM.from_pretrained('./tf_model/t5_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1386,7 +1386,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1483,7 +1483,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1583,7 +1583,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/tapas_tf_checkpoint.json')
>>> config = AutoConfig.from_pretrained('./tf_model/tapas_tf_checkpoint.json')
>>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/tapas_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1681,7 +1681,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1781,7 +1781,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMultipleChoice.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1881,7 +1881,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForNextSentencePrediction.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down
22 changes: 11 additions & 11 deletions src/transformers/models/auto/modeling_tf_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModel.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -699,7 +699,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForPreTraining.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -804,7 +804,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelWithLMHead.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
warnings.warn(
Expand Down Expand Up @@ -904,7 +904,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/gpt2_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/gpt2_pt_model_config.json')
>>> model = TFAutoModelForCausalLM.from_pretrained('./pt_model/gpt2_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -997,7 +997,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForMaskedLM.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1093,7 +1093,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/t5_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/t5_pt_model_config.json')
>>> model = TFAutoModelForSeq2SeqLM.from_pretrained('./pt_model/t5_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1191,7 +1191,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForSequenceClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1288,7 +1288,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForQuestionAnswering.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1384,7 +1384,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForTokenClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1482,7 +1482,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForMultipleChoice.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down Expand Up @@ -1580,7 +1580,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json')
>>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForNextSentencePrediction.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
Expand Down

0 comments on commit 2c66842

Please sign in to comment.