Skip to content

Commit

Permalink
Separate tests by torchvision dependency and model size
Browse files Browse the repository at this point in the history
  • Loading branch information
TylerYep committed Jan 11, 2022
1 parent 051fa20 commit 55edaaf
Show file tree
Hide file tree
Showing 3 changed files with 123 additions and 120 deletions.
File renamed without changes.
122 changes: 2 additions & 120 deletions tests/torchinfo_test.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
from typing import Any

import pytest
import torch
import torchvision # type: ignore[import]
from torch import nn
from torch.nn.utils import prune

from tests.conftest import verify_output_str
from tests.fixtures.genotype import GenotypeNetwork # type: ignore[attr-defined]
from tests.fixtures.models import (
AutoEncoder,
ContainerModule,
Expand All @@ -31,7 +28,6 @@
SiameseNets,
SingleInputNet,
)
from tests.fixtures.tmva_net import TMVANet # type: ignore[attr-defined]
from torchinfo import ColumnSettings, summary
from torchinfo.enums import Verbosity

Expand Down Expand Up @@ -141,33 +137,6 @@ def test_single_input_batch_dim() -> None:
)


def test_frozen_layers() -> None:
model = torchvision.models.resnet18()
for ind, param in enumerate(model.parameters()):
if ind < 30:
param.requires_grad = False

summary(
model,
input_size=(1, 3, 64, 64),
depth=3,
col_names=("output_size", "num_params", "kernel_size", "mult_adds"),
)


def test_resnet18_depth_consistency() -> None:
model = torchvision.models.resnet18()

for depth in range(1, 3):
summary(model, (1, 3, 64, 64), depth=depth, cache_forward_pass=True)


def test_resnet152() -> None:
model = torchvision.models.resnet152()

summary(model, (1, 3, 224, 224), depth=3)


def test_pruning() -> None:
model = SingleInputNet()
for module in model.modules():
Expand Down Expand Up @@ -287,18 +256,6 @@ def test_recursive() -> None:
assert results.total_mult_adds == 173709312


def test_resnet() -> None:
# According to https://arxiv.org/abs/1605.07146,
# resnet50 has ~25.6 M trainable params.
model = torchvision.models.resnet50()
results = summary(model, input_size=(2, 3, 224, 224))

assert results.total_params == 25557032 # close to 25.6e6
assert results.total_mult_adds == sum(
layer.macs for layer in results.summary_list if layer.is_leaf_layer
)


def test_siamese_net() -> None:
metrics = summary(SiameseNets(), input_size=[(1, 1, 88, 88), (1, 1, 88, 88)])

Expand All @@ -313,16 +270,6 @@ def test_empty_module() -> None:
summary(EmptyModule())


@pytest.mark.skip
def test_fasterrcnn() -> None:
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
pretrained_backbone=False
)
results = summary(model, input_size=(1, 3, 112, 112))

assert results.total_params == 41755286


def test_device() -> None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SingleInputNet()
Expand Down Expand Up @@ -351,7 +298,7 @@ def test_pack_padded() -> None:
]).long()
# fmt: on

summary(PackPaddedLSTM(), input_data=x, lengths=y, device="cpu")
summary(PackPaddedLSTM(), input_data=x, lengths=y)


def test_module_dict() -> None:
Expand Down Expand Up @@ -417,7 +364,7 @@ def test_namedtuple() -> None:
model = NamedTuple()
input_size = [(2, 1, 28, 28), (2, 1, 28, 28)]
named_tuple = model.Point(*input_size)
summary(model, input_size=input_size, z=named_tuple, device="cpu")
summary(model, input_size=input_size, z=named_tuple)


def test_return_dict() -> None:
Expand All @@ -432,61 +379,11 @@ def test_containers() -> None:
summary(ContainerModule(), input_size=(5,))


def test_eval_order_doesnt_matter() -> None:
input_size = (1, 3, 224, 224)
input_tensor = torch.ones(input_size)

model1 = torchvision.models.resnet18(pretrained=True)
model1.eval()
summary(model1, input_size=input_size, device="cpu")
with torch.inference_mode(): # type: ignore[no-untyped-call]
output1 = model1(input_tensor)

model2 = torchvision.models.resnet18(pretrained=True)
summary(model2, input_size=input_size, device="cpu")
model2.eval()
with torch.inference_mode(): # type: ignore[no-untyped-call]
output2 = model2(input_tensor)

assert torch.all(torch.eq(output1, output2))


def test_autoencoder() -> None:
model = AutoEncoder()
summary(model, input_size=(1, 3, 64, 64))


def test_genotype() -> None:
model = GenotypeNetwork()

x = summary(model, (2, 3, 32, 32), depth=3, cache_forward_pass=True)
y = summary(model, (2, 3, 32, 32), depth=7, cache_forward_pass=True)

assert x.total_params == y.total_params, (x, y)


def test_tmva_net_column_totals() -> None:
for depth in (1, 3, 5):
results = summary(
TMVANet(n_classes=4, n_frames=5),
input_data=[
torch.randn(1, 1, 5, 256, 64),
torch.randn(1, 1, 5, 256, 256),
torch.randn(1, 1, 5, 256, 64),
],
col_names=["output_size", "num_params", "mult_adds"],
depth=depth,
cache_forward_pass=True,
)

assert results.total_params == sum(
layer.num_params for layer in results.summary_list if layer.is_leaf_layer
)
assert results.total_mult_adds == sum(
layer.macs for layer in results.summary_list if layer.is_leaf_layer
)


def test_reusing_activation_layers() -> None:
act = nn.LeakyReLU(inplace=True)
model1 = nn.Sequential(act, nn.Identity(), act, nn.Identity(), act) # type: ignore[no-untyped-call] # noqa
Expand All @@ -511,21 +408,6 @@ def test_mixed_trainable_parameters() -> None:
assert result.total_params == 20


def test_ascii_only() -> None:
result = summary(
torchvision.models.resnet18(),
depth=3,
input_size=(1, 3, 64, 64),
row_settings=["ascii_only"],
)

assert str(result).encode("ascii").decode("ascii")


def test_google() -> None:
summary(torchvision.models.googlenet(), (1, 3, 112, 112), depth=7)


def test_too_many_linear() -> None:
net = ReuseLinear()
summary(net, (2, 10))
Expand Down
121 changes: 121 additions & 0 deletions tests/torchinfo_xl_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
import pytest
import torch
import torchvision # type: ignore[import]

from tests.fixtures.genotype import GenotypeNetwork # type: ignore[attr-defined]
from tests.fixtures.tmva_net import TMVANet # type: ignore[attr-defined]
from torchinfo import summary


def test_ascii_only() -> None:
result = summary(
torchvision.models.resnet18(),
depth=3,
input_size=(1, 3, 64, 64),
row_settings=["ascii_only"],
)

assert str(result).encode("ascii").decode("ascii")


def test_frozen_layers() -> None:
model = torchvision.models.resnet18()
for ind, param in enumerate(model.parameters()):
if ind < 30:
param.requires_grad = False

summary(
model,
input_size=(1, 3, 64, 64),
depth=3,
col_names=("output_size", "num_params", "kernel_size", "mult_adds"),
)


def test_eval_order_doesnt_matter() -> None:
input_size = (1, 3, 224, 224)
input_tensor = torch.ones(input_size)

model1 = torchvision.models.resnet18(pretrained=True)
model1.eval()
summary(model1, input_size=input_size)
with torch.inference_mode(): # type: ignore[no-untyped-call]
output1 = model1(input_tensor)

model2 = torchvision.models.resnet18(pretrained=True)
summary(model2, input_size=input_size)
model2.eval()
with torch.inference_mode(): # type: ignore[no-untyped-call]
output2 = model2(input_tensor)

assert torch.all(torch.eq(output1, output2))


def test_resnet18_depth_consistency() -> None:
model = torchvision.models.resnet18()

for depth in range(1, 3):
summary(model, (1, 3, 64, 64), depth=depth, cache_forward_pass=True)


def test_resnet50() -> None:
# According to https://arxiv.org/abs/1605.07146,
# resnet50 has ~25.6 M trainable params.
model = torchvision.models.resnet50()
results = summary(model, input_size=(2, 3, 224, 224))

assert results.total_params == 25557032 # close to 25.6e6
assert results.total_mult_adds == sum(
layer.macs for layer in results.summary_list if layer.is_leaf_layer
)


def test_resnet152() -> None:
model = torchvision.models.resnet152()

summary(model, (1, 3, 224, 224), depth=3)


@pytest.mark.skip(reason="nondeterministic output")
def test_fasterrcnn() -> None:
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
pretrained_backbone=False
)
results = summary(model, input_size=(1, 3, 112, 112))

assert results.total_params == 41755286


def test_genotype() -> None:
model = GenotypeNetwork()

x = summary(model, (2, 3, 32, 32), depth=3, cache_forward_pass=True)
y = summary(model, (2, 3, 32, 32), depth=7, cache_forward_pass=True)

assert x.total_params == y.total_params, (x, y)


def test_tmva_net_column_totals() -> None:
for depth in (1, 3, 5):
results = summary(
TMVANet(n_classes=4, n_frames=5),
input_data=[
torch.randn(1, 1, 5, 256, 64),
torch.randn(1, 1, 5, 256, 256),
torch.randn(1, 1, 5, 256, 64),
],
col_names=["output_size", "num_params", "mult_adds"],
depth=depth,
cache_forward_pass=True,
)

assert results.total_params == sum(
layer.num_params for layer in results.summary_list if layer.is_leaf_layer
)
assert results.total_mult_adds == sum(
layer.macs for layer in results.summary_list if layer.is_leaf_layer
)


def test_google() -> None:
summary(torchvision.models.googlenet(), (1, 3, 112, 112), depth=7)

0 comments on commit 55edaaf

Please sign in to comment.