Skip to content

Commit

Permalink
Add automatic_dynamic_shapes test configuration (pytorch#103598)
Browse files Browse the repository at this point in the history
Signed-off-by: Edward Z. Yang <[email protected]>

Pull Request resolved: pytorch#103598
Approved by: https://github.com/Skylion007
  • Loading branch information
ezyang authored and pytorchmergebot committed Jun 15, 2023
1 parent 480d20c commit ed3a61a
Show file tree
Hide file tree
Showing 6 changed files with 49 additions and 10 deletions.
8 changes: 7 additions & 1 deletion test/dynamo/test_aot_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,12 @@

import torch._dynamo
import torch._dynamo.test_case
from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided
from torch._dynamo.testing import (
CompileCounter,
expectedFailureAutomaticDynamic,
expectedFailureDynamic,
rand_strided,
)
from torch.testing._internal.common_utils import compare_equal_outs_and_grads


Expand Down Expand Up @@ -651,6 +656,7 @@ def guard_fail_fn(failure):
self.assertExpectedInline(failure_reason, """L['c'] is L['d']""")

@expectedFailureDynamic # https://github.com/pytorch/pytorch/issues/103539
@expectedFailureAutomaticDynamic # as above
@patch("torch._functorch.config.debug_assert", True)
def test_multiple_aot_autograd_calls_dupe_args(self):
# this is just dealing with the fact that
Expand Down
14 changes: 11 additions & 3 deletions test/dynamo/test_dynamic_shapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,18 +29,25 @@
test_classes = {}


def make_dynamic_cls(cls):
def make_dynamic_cls(cls, automatic_dynamic_shapes=False):
suffix = "_dynamic_shapes"
if automatic_dynamic_shapes:
suffix = "_automatic_dynamic_shapes"

cls_prefix = "DynamicShapes"
if automatic_dynamic_shapes:
cls_prefix = "AutomaticDynamicShapes"

test_class = make_test_cls_with_patches(
cls,
cls_prefix,
suffix,
(config, "assume_static_by_default", False),
(config, "assume_static_by_default", automatic_dynamic_shapes),
(config, "automatic_dynamic_shapes", automatic_dynamic_shapes),
(config, "specialize_int", False),
xfail_prop="_expected_failure_dynamic",
xfail_prop="_expected_failure_automatic_dynamic"
if automatic_dynamic_shapes
else "_expected_failure_dynamic",
)

test_classes[test_class.__name__] = test_class
Expand All @@ -62,6 +69,7 @@ def make_dynamic_cls(cls):
]
for test in tests:
make_dynamic_cls(test)
make_dynamic_cls(test, automatic_dynamic_shapes=True)

if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
Expand Down
9 changes: 8 additions & 1 deletion test/dynamo/test_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from torch._dynamo.source import GetItemSource, LocalSource
from torch._dynamo.testing import (
CompileCounter,
expectedFailureAutomaticDynamic,
expectedFailureDynamic,
requires_numpy_pytorch_interop,
same,
Expand Down Expand Up @@ -1971,7 +1972,9 @@ def fn():
self.assertTrue(same(res, ref_run1))

# NotImplementedError: SymNodeVariable() is not a constant
# https://github.com/pytorch/pytorch/issues/103618
@expectedFailureDynamic
@expectedFailureAutomaticDynamic
def test_slice_input(self):
cnts = torch._dynamo.testing.CompileCounter()

Expand Down Expand Up @@ -4429,7 +4432,10 @@ def fn(x, y):
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(cnt.frame_count, """5""")
if torch._dynamo.config.automatic_dynamic_shapes:
self.assertExpectedInline(cnt.frame_count, """2""")
else:
self.assertExpectedInline(cnt.frame_count, """5""")
else:
self.assertExpectedInline(cnt.frame_count, """1""")

Expand Down Expand Up @@ -5272,6 +5278,7 @@ def fn(a, b):
fn(torch.rand(2, 3), torch.rand(2, 3))
fn(torch.rand(2, 3), (1, 2, 3))

@expectedFailureAutomaticDynamic
def test_compile_profiler(self):
class Model(torch.nn.Module):
def forward(self, input):
Expand Down
18 changes: 13 additions & 5 deletions test/dynamo/test_repros.py
Original file line number Diff line number Diff line change
Expand Up @@ -992,8 +992,12 @@ def test_longformer_chunk(self):
self.assertTrue(same(opt_fn(input2), correct2))

if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(cnt.frame_count, """2""")
self.assertExpectedInline(cnt.op_count, """4""")
if torch._dynamo.config.automatic_dynamic_shapes:
self.assertExpectedInline(cnt.frame_count, """2""")
self.assertExpectedInline(cnt.op_count, """14""")
else:
self.assertExpectedInline(cnt.frame_count, """2""")
self.assertExpectedInline(cnt.op_count, """4""")
else:
self.assertExpectedInline(cnt.frame_count, """2""")
self.assertExpectedInline(cnt.op_count, """35""")
Expand Down Expand Up @@ -1058,6 +1062,7 @@ def fn(input_lengths: torch.Tensor, new_ones_1):
res = opt_fn(x, y)
self.assertTrue(same(ref, res))

# https://github.com/pytorch/pytorch/issues/103620
@expectedFailureDynamic
def test_chunk_reformer_ff(self):
input = torch.randn([1, 4096, 256])
Expand Down Expand Up @@ -1138,7 +1143,6 @@ def fn4(x):
self.assertEqual(cnt.frame_count, 1)
self.assertEqual(cnt.op_count, 1)

@expectedFailureDynamic
def test_create_rand_mask_from_inputs(self):
args = [
torch.randn([1, 64, 64]),
Expand All @@ -1156,8 +1160,12 @@ def test_create_rand_mask_from_inputs(self):
cnt = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize_assert(cnt)(fn)
self.assertTrue(same(opt_fn(*args), correct))
self.assertEqual(cnt.frame_count, 1)
self.assertEqual(cnt.op_count, 8)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(cnt.frame_count, """1""")
self.assertExpectedInline(cnt.op_count, """8""")
else:
self.assertExpectedInline(cnt.frame_count, """1""")
self.assertExpectedInline(cnt.op_count, """11""")

def test_rng_state(self):
def fn():
Expand Down
4 changes: 4 additions & 0 deletions test/inductor/test_cpu_repro.py
Original file line number Diff line number Diff line change
Expand Up @@ -1574,6 +1574,7 @@ def fn(a, b, c, idx):

with config.patch({"cpp.max_horizontal_fusion_size": 0}):
metrics.reset()
torch._dynamo.reset()
a = torch.randn(size=(4, 16), dtype=torch.bfloat16)
b = torch.randn(size=(4, 16), dtype=torch.bfloat16)
c = torch.randn(size=(4, 16), dtype=torch.bfloat16)
Expand All @@ -1585,6 +1586,7 @@ def fn(a, b, c, idx):

with config.patch({"cpp.max_horizontal_fusion_size": 1}):
metrics.reset()
torch._dynamo.reset()
a = torch.randn(size=(4, 32), dtype=torch.bfloat16)
b = torch.randn(size=(4, 32), dtype=torch.bfloat16)
c = torch.randn(size=(4, 32), dtype=torch.bfloat16)
Expand All @@ -1596,6 +1598,7 @@ def fn(a, b, c, idx):

with config.patch({"cpp.max_horizontal_fusion_size": 2}):
metrics.reset()
torch._dynamo.reset()
a = torch.randn(size=(4, 64), dtype=torch.bfloat16)
b = torch.randn(size=(4, 64), dtype=torch.bfloat16)
c = torch.randn(size=(4, 64), dtype=torch.bfloat16)
Expand All @@ -1608,6 +1611,7 @@ def fn(a, b, c, idx):

with config.patch({"cpp.max_horizontal_fusion_size": 3}):
metrics.reset()
torch._dynamo.reset()
a = torch.randn(size=(4, 128), dtype=torch.bfloat16)
b = torch.randn(size=(4, 128), dtype=torch.bfloat16)
c = torch.randn(size=(4, 128), dtype=torch.bfloat16)
Expand Down
6 changes: 6 additions & 0 deletions torch/_dynamo/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,12 @@ def expectedFailureDynamic(fn):
return fn


# Controls tests generated in test/dynamo/test_dynamic_shapes.py
def expectedFailureAutomaticDynamic(fn):
fn._expected_failure_automatic_dynamic = True
return fn


# Controls tests generated in test/inductor/test_torchinductor_codegen_dynamic_shapes.py
def expectedFailureCodegenDynamic(fn):
fn._expected_failure_codegen_dynamic = True
Expand Down

0 comments on commit ed3a61a

Please sign in to comment.