Skip to content

Commit

Permalink
Enforce target version for black.
Browse files Browse the repository at this point in the history
This should stabilize formatting.
  • Loading branch information
aaugustin authored and julien-c committed Jan 5, 2020
1 parent f01b3e6 commit 0ffc8ea
Show file tree
Hide file tree
Showing 19 changed files with 21 additions and 21 deletions.
2 changes: 1 addition & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ jobs:
# we need a version of isort with https://github.com/timothycrosley/isort/pull/1000
- run: sudo pip install git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort
- run: sudo pip install .[tf,torch,quality]
- run: black --check --line-length 119 examples templates tests src utils
- run: black --check --line-length 119 --target-version py35 examples templates tests src utils
- run: isort --check-only --recursive examples templates tests src utils
- run: flake8 examples templates tests src utils
check_repository_consistency:
Expand Down
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
# Check that source code meets quality standards

quality:
black --check --line-length 119 examples templates tests src utils
black --check --line-length 119 --target-version py35 examples templates tests src utils
isort --check-only --recursive examples templates tests src utils
flake8 examples templates tests src utils

# Format source code automatically

style:
black --line-length 119 examples templates tests src utils
black --line-length 119 --target-version py35 examples templates tests src utils
isort --recursive examples templates tests src utils

# Run tests for the library
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/modeling_encoder_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
encoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
decoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
*args,
**kwargs
**kwargs,
)

return model
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/modeling_tf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
**kwargs
**kwargs,
)
else:
model_kwargs = kwargs
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
**kwargs
**kwargs,
)
else:
model_kwargs = kwargs
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -643,7 +643,7 @@ def __init__(
framework=framework,
args_parser=QuestionAnsweringArgumentHandler(),
device=device,
**kwargs
**kwargs,
)

@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/tokenization_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def __init__(
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)

self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/tokenization_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def __init__(
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
Expand Down Expand Up @@ -560,7 +560,7 @@ def __init__(
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)

self._tokenizer = tk.Tokenizer(tk.models.WordPiece.from_files(vocab_file, unk_token=unk_token))
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/tokenization_bert_japanese.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def __init__(
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/tokenization_camembert.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def __init__(
pad_token=pad_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/tokenization_roberta.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def __init__(
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/tokenization_t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def __init__(
unk_token=unk_token,
pad_token=pad_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)

try:
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/tokenization_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -817,7 +817,7 @@ def encode(
truncation_strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
return_tensors=return_tensors,
**kwargs
**kwargs,
)

return encoded_inputs["input_ids"]
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/tokenization_xlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -586,7 +586,7 @@ def __init__(
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)

self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/tokenization_xlm_roberta.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def __init__(
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/tokenization_xlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def __init__(
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)

self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
Expand Down
2 changes: 1 addition & 1 deletion templates/adding_a_new_model/tokenization_xxx.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def __init__(
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
Expand Down
2 changes: 1 addition & 1 deletion tests/test_tokenization_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def test_rust_and_python_full_tokenizers(self):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False)

sequence = u"UNwant\u00E9d,running"
sequence = "UNwant\u00E9d,running"

tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_tokenization_gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def test_rust_and_python_full_tokenizers(self):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False, add_prefix_space=True)

sequence = u"lower newer"
sequence = "lower newer"

# Testing tokenization
tokens = tokenizer.tokenize(sequence, add_prefix_space=True)
Expand Down

0 comments on commit 0ffc8ea

Please sign in to comment.