Skip to content

Commit

Permalink
Make more test models smaller (huggingface#25005)
Browse files Browse the repository at this point in the history
* Make more test models tiny

* Make more test models tiny

* More models

* More models
  • Loading branch information
sgugger authored Jul 24, 2023
1 parent 8f1f0bf commit 42571f6
Show file tree
Hide file tree
Showing 22 changed files with 149 additions and 137 deletions.
6 changes: 1 addition & 5 deletions tests/models/ctrl/test_modeling_ctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def get_config(self):
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
dff=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
Expand Down Expand Up @@ -243,10 +243,6 @@ def test_ctrl_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)

@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def test_model_is_small(self):
pass

@slow
def test_model_from_pretrained(self):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
2 changes: 1 addition & 1 deletion tests/models/ctrl/test_modeling_tf_ctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def prepare_config_and_inputs(self):
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
dff=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
Expand Down
8 changes: 2 additions & 6 deletions tests/models/cvt/test_modeling_cvt.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ def __init__(
batch_size=13,
image_size=64,
num_channels=3,
embed_dim=[16, 48, 96],
num_heads=[1, 3, 6],
embed_dim=[16, 32, 48],
num_heads=[1, 2, 3],
depth=[1, 2, 10],
patch_sizes=[7, 3, 3],
patch_stride=[4, 2, 2],
Expand Down Expand Up @@ -247,10 +247,6 @@ def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)

@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def test_model_is_small(self):
pass

@slow
def test_model_from_pretrained(self):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
4 changes: 2 additions & 2 deletions tests/models/cvt/test_modeling_tf_cvt.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ def __init__(
batch_size=13,
image_size=64,
num_channels=3,
embed_dim=[16, 48, 96],
num_heads=[1, 3, 6],
embed_dim=[16, 32, 48],
num_heads=[1, 2, 3],
depth=[1, 2, 10],
patch_sizes=[7, 3, 3],
patch_stride=[4, 2, 2],
Expand Down
19 changes: 13 additions & 6 deletions tests/models/deta/test_modeling_deta.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import math
import unittest

from transformers import DetaConfig, is_torch_available, is_torchvision_available, is_vision_available
from transformers import DetaConfig, ResNetConfig, is_torch_available, is_torchvision_available, is_vision_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torchvision, require_vision, slow, torch_device

Expand Down Expand Up @@ -49,7 +49,7 @@ def __init__(
batch_size=8,
is_training=True,
use_labels=True,
hidden_size=256,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=8,
intermediate_size=4,
Expand Down Expand Up @@ -118,6 +118,16 @@ def prepare_config_and_inputs(self):
return config, pixel_values, pixel_mask, labels

def get_config(self):
resnet_config = ResNetConfig(
num_channels=3,
embeddings_size=10,
hidden_sizes=[10, 20, 30, 40],
depths=[1, 1, 2, 1],
hidden_act="relu",
num_labels=3,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
)
return DetaConfig(
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
Expand All @@ -134,6 +144,7 @@ def get_config(self):
encoder_n_points=self.encoder_n_points,
decoder_n_points=self.decoder_n_points,
two_stage=self.two_stage,
backbone_config=resnet_config,
)

def prepare_config_and_inputs_for_common(self):
Expand Down Expand Up @@ -423,10 +434,6 @@ def test_forward_signature(self):
def test_tied_model_weights_key_ignore(self):
pass

@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def test_model_is_small(self):
pass

def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand Down
8 changes: 4 additions & 4 deletions tests/models/dpt/test_modeling_dpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def __init__(
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
num_labels=3,
neck_hidden_sizes=[16, 16, 32, 32],
is_hybrid=False,
scope=None,
):
Expand All @@ -84,6 +85,7 @@ def __init__(
self.num_labels = num_labels
self.scope = scope
self.is_hybrid = is_hybrid
self.neck_hidden_sizes = neck_hidden_sizes
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
Expand All @@ -105,6 +107,7 @@ def get_config(self):
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
fusion_hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
backbone_out_indices=self.backbone_out_indices,
num_attention_heads=self.num_attention_heads,
Expand All @@ -115,6 +118,7 @@ def get_config(self):
is_decoder=False,
initializer_range=self.initializer_range,
is_hybrid=self.is_hybrid,
neck_hidden_sizes=self.neck_hidden_sizes,
)

def create_and_check_model(self, config, pixel_values, labels):
Expand Down Expand Up @@ -275,10 +279,6 @@ def test_initialization(self):
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)

@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def test_model_is_small(self):
pass

@slow
def test_model_from_pretrained(self):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
12 changes: 6 additions & 6 deletions tests/models/dpt/test_modeling_dpt_hybrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ def __init__(
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
num_labels=3,
backbone_featmap_shape=[1, 384, 24, 24],
backbone_featmap_shape=[1, 32, 24, 24],
neck_hidden_sizes=[16, 16, 32, 32],
is_hybrid=True,
scope=None,
):
Expand All @@ -86,6 +87,7 @@ def __init__(
self.backbone_featmap_shape = backbone_featmap_shape
self.scope = scope
self.is_hybrid = is_hybrid
self.neck_hidden_sizes = neck_hidden_sizes
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
Expand All @@ -108,7 +110,7 @@ def get_config(self):
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"hidden_sizes": [16, 16, 32, 32],
"num_groups": 2,
}

Expand All @@ -117,6 +119,7 @@ def get_config(self):
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
fusion_hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
backbone_out_indices=self.backbone_out_indices,
num_attention_heads=self.num_attention_heads,
Expand All @@ -129,6 +132,7 @@ def get_config(self):
is_hybrid=self.is_hybrid,
backbone_config=backbone_config,
backbone_featmap_shape=self.backbone_featmap_shape,
neck_hidden_sizes=self.neck_hidden_sizes,
)

def create_and_check_model(self, config, pixel_values, labels):
Expand Down Expand Up @@ -289,10 +293,6 @@ def test_initialization(self):
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)

@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def test_model_is_small(self):
pass

@slow
def test_model_from_pretrained(self):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
Expand Down
6 changes: 1 addition & 5 deletions tests/models/efficientnet/test_modeling_efficientnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(
num_channels=3,
kernel_sizes=[3, 3, 5],
in_channels=[32, 16, 24],
out_channels=[16, 24, 40],
out_channels=[16, 24, 20],
strides=[1, 1, 2],
num_block_repeats=[1, 1, 2],
expand_ratios=[1, 6, 6],
Expand Down Expand Up @@ -223,10 +223,6 @@ def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)

@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def test_model_is_small(self):
pass

@slow
def test_model_from_pretrained(self):
for model_name in EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
30 changes: 22 additions & 8 deletions tests/models/encodec/test_modeling_encodec.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,16 +77,25 @@ def __init__(
batch_size=12,
num_channels=2,
is_training=False,
num_hidden_layers=4,
intermediate_size=40,
hidden_size=32,
num_filters=8,
num_residual_layers=1,
upsampling_ratios=[8, 4],
num_lstm_layers=1,
codebook_size=64,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.is_training = is_training

self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_size = hidden_size
self.num_filters = num_filters
self.num_residual_layers = num_residual_layers
self.upsampling_ratios = upsampling_ratios
self.num_lstm_layers = num_lstm_layers
self.codebook_size = codebook_size

def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0)
Expand All @@ -99,7 +108,16 @@ def prepare_config_and_inputs_for_common(self):
return config, inputs_dict

def get_config(self):
return EncodecConfig(audio_channels=self.num_channels, chunk_in_sec=None)
return EncodecConfig(
audio_channels=self.num_channels,
chunk_in_sec=None,
hidden_size=self.hidden_size,
num_filters=self.num_filters,
num_residual_layers=self.num_residual_layers,
upsampling_ratios=self.upsampling_ratios,
num_lstm_layers=self.num_lstm_layers,
codebook_size=self.codebook_size,
)

def create_and_check_model_forward(self, config, inputs_dict):
model = EncodecModel(config=config).to(torch_device).eval()
Expand Down Expand Up @@ -397,10 +415,6 @@ def test_initialization(self):
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)

@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def test_model_is_small(self):
pass

def test_identity_shortcut(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
config.use_conv_shortcut = False
Expand Down
4 changes: 0 additions & 4 deletions tests/models/esm/test_modeling_esm.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,10 +279,6 @@ def test_resize_embeddings_untied(self):
def test_resize_tokens_embeddings(self):
pass

@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def test_model_is_small(self):
pass


@require_torch
class EsmModelIntegrationTest(TestCasePlus):
Expand Down
32 changes: 25 additions & 7 deletions tests/models/esm/test_modeling_esmfold.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,28 @@ def prepare_config_and_inputs(self):
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels

def get_config(self):
esmfold_config = {
"trunk": {
"num_blocks": 2,
"sequence_state_dim": 64,
"pairwise_state_dim": 16,
"sequence_head_width": 4,
"pairwise_head_width": 4,
"position_bins": 4,
"chunk_size": 16,
"structure_module": {
"ipa_dim": 16,
"num_angles": 7,
"num_blocks": 2,
"num_heads_ipa": 4,
"pairwise_dim": 16,
"resnet_dim": 16,
"sequence_dim": 48,
},
},
"fp16_esm": False,
"lddt_head_hid_dim": 16,
}
config = EsmConfig(
vocab_size=33,
hidden_size=self.hidden_size,
Expand All @@ -114,7 +136,7 @@ def get_config(self):
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
is_folding_model=True,
esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False},
esmfold_config=esmfold_config,
)
return config

Expand All @@ -126,8 +148,8 @@ def create_and_check_model(self, config, input_ids, input_mask, sequence_labels,
result = model(input_ids)
result = model(input_ids)

self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2))
self.parent.assertEqual(result.positions.shape, (2, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape, (2, self.batch_size, self.seq_length, 7, 2))

def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
Expand Down Expand Up @@ -243,10 +265,6 @@ def test_torchscript_simple(self):
def test_multi_gpu_data_parallel_forward(self):
pass

@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def test_model_is_small(self):
pass


@require_torch
class EsmModelIntegrationTest(TestCasePlus):
Expand Down
Loading

0 comments on commit 42571f6

Please sign in to comment.