From 0a93903e8ea1a6a0ae125b2df8e6c32f70b135c5 Mon Sep 17 00:00:00 2001 From: albanD Date: Wed, 14 Jun 2017 09:23:23 +0100 Subject: [PATCH] move tests to test_nn --- test/test_autograd.py | 89 ------------------------------------------- test/test_nn.py | 89 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 89 deletions(-) diff --git a/test/test_autograd.py b/test/test_autograd.py index b929c16f6d7b59..21ece4d1bd79a0 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -10,7 +10,6 @@ from itertools import product import torch.nn.functional as F from torch.autograd import gradcheck -from torch.autograd.gradcheck import gradgradcheck from torch.autograd.function import once_differentiable from common import TestCase, run_tests, skipIfNoLapack @@ -35,16 +34,6 @@ def backward_engine(engine): Variable._execution_engine = _prev_engine -@contextlib.contextmanager -def use_cudnn(should_use): - orig = torch.backends.cudnn.enabled - torch.backends.cudnn.enabled = should_use - try: - yield - finally: - torch.backends.cudnn.enabled = orig - - def graph_desc(fn): if fn is None: return 'None' @@ -1274,84 +1263,6 @@ def backward(self, grad_output): c.backward(torch.ones(c.size())) self.assertEqual(x.grad.data, torch.ones(x.size())) - def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out, - batch_size, inp_size, dilation, no_weight, use_cuda=False): - x = Variable(torch.randn(batch_size, chan_in, inp_size, inp_size), requires_grad=True) - weight = Variable(torch.randn(chan_out, chan_in, kern, kern), requires_grad=True) - bias = Variable(torch.randn(chan_out), requires_grad=True) - - if use_cuda: - x = x.cuda() - weight = weight.cuda() - bias = bias.cuda() - - if no_weight: - # Special case because transpose dilated convolution is not implemented - def func(x, bias): - # We disable cudnn during forward to avoid finite difference imprecision issues - with use_cudnn(False): - out = F.conv2d(x, weight, bias, stride, padding, dilation) - return out - inputs = (x, bias,) - else: - def func(x, weight, bias): - # We disable cudnn during forward to avoid finite difference imprecision issues - with use_cudnn(False): - out = F.conv2d(x, weight, bias, stride, padding, dilation) - return out - inputs = (x, weight, bias,) - - dummy_out = func(*inputs) - grad_y = Variable(torch.randn(dummy_out.size()), requires_grad=True) - if use_cuda: - grad_y = grad_y.cuda() - - return gradgradcheck(func, inputs, (grad_y,)) - - def test_conv_double_backward(self): - batch_size = 2 - for kern, inp_size, dilations in [(3, 6, [1, 2]), (3, 7, [1, 2]), (4, 9, [1, 2]), (4, 10, [1, 2])]: - for stride, padding, chan_in, chan_out, dilation in product([1, 2], [0, 2], [1], [2, 3], dilations): - no_weight = stride == 2 - result = self.run_conv_double_back_test(kern, stride, - padding, chan_in, chan_out, - batch_size, inp_size, dilation, - no_weight) - self.assertTrue(result, - "Conv double backward test failed with parameters:" + - "\nkern: " + str(kern) + - "\nstride: " + str(stride) + - "\npadding: " + str(padding) + - "\nchan_in: " + str(chan_in) + - "\nchan_out: " + str(chan_out) + - "\nbatch_size: " + str(batch_size) + - "\ninp_size: " + str(inp_size) + - "\ndilation: " + str(dilation)) - - def test_error_conv_double_backward(self): - batch_size = 2 - - # Cannot provide ggW when stride is > 1 - for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1, 2]), (4, 6, [1]), (4, 7, [2])]: - for stride, padding, chan_in, chan_out, dilation in product([2], [0, 1, 2], [1, 3], [1, 3], dilations): - no_weight = False - with self.assertRaises(RuntimeError): - self.run_conv_double_back_test(kern, stride, - padding, chan_in, chan_out, - batch_size, inp_size, dilation, - no_weight) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") - def test_conv_double_backward_cuda(self): - batch_size = 1 - for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 10, [1])]: - for stride, padding, chan_in, chan_out, dilation in product([1], [2], [2], [3], dilations): - no_weight = stride == 2 - result = self.run_conv_double_back_test(kern, stride, - padding, chan_in, chan_out, - batch_size, inp_size, dilation, - no_weight, use_cuda=True) - def index_variable(shape, max_indices): if not isinstance(shape, tuple): diff --git a/test/test_nn.py b/test/test_nn.py index 1dcc69e9231bbd..1d60e17c24d5a7 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -18,6 +18,7 @@ import torch.legacy.nn as legacy from torch.nn.utils import clip_grad_norm from torch.autograd import Variable, gradcheck +from torch.autograd.gradcheck import gradgradcheck from torch.nn import Parameter from common_nn import NNTestCase, ModuleTest, CriterionTest, TestBase, \ module_tests, criterion_tests, TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, \ @@ -28,6 +29,16 @@ from scipy import stats +@contextlib.contextmanager +def use_cudnn(should_use): + orig = torch.backends.cudnn.enabled + torch.backends.cudnn.enabled = should_use + try: + yield + finally: + torch.backends.cudnn.enabled = orig + + def default_tensor_type(type): type_str = torch.typename(type) @@ -2317,6 +2328,84 @@ def test_bilinear(self): self.assertTrue(gradcheck(lambda x1, x2: F.bilinear(x1, x2, module.weight, module.bias), (input1_1, input2_1))) + def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out, + batch_size, inp_size, dilation, no_weight, use_cuda=False): + x = Variable(torch.randn(batch_size, chan_in, inp_size, inp_size), requires_grad=True) + weight = Variable(torch.randn(chan_out, chan_in, kern, kern), requires_grad=True) + bias = Variable(torch.randn(chan_out), requires_grad=True) + + if use_cuda: + x = x.cuda() + weight = weight.cuda() + bias = bias.cuda() + + if no_weight: + # Special case because transpose dilated convolution is not implemented + def func(x, bias): + # We disable cudnn during forward to avoid finite difference imprecision issues + with use_cudnn(False): + out = F.conv2d(x, weight, bias, stride, padding, dilation) + return out + inputs = (x, bias,) + else: + def func(x, weight, bias): + # We disable cudnn during forward to avoid finite difference imprecision issues + with use_cudnn(False): + out = F.conv2d(x, weight, bias, stride, padding, dilation) + return out + inputs = (x, weight, bias,) + + dummy_out = func(*inputs) + grad_y = Variable(torch.randn(dummy_out.size()), requires_grad=True) + if use_cuda: + grad_y = grad_y.cuda() + + return gradgradcheck(func, inputs, (grad_y,)) + + def test_conv_double_backward(self): + batch_size = 2 + for kern, inp_size, dilations in [(3, 6, [1, 2]), (3, 7, [1, 2]), (4, 9, [1, 2]), (4, 10, [1, 2])]: + for stride, padding, chan_in, chan_out, dilation in product([1, 2], [0, 2], [1], [2, 3], dilations): + no_weight = stride == 2 + result = self.run_conv_double_back_test(kern, stride, + padding, chan_in, chan_out, + batch_size, inp_size, dilation, + no_weight) + self.assertTrue(result, + "Conv double backward test failed with parameters:" + + "\nkern: " + str(kern) + + "\nstride: " + str(stride) + + "\npadding: " + str(padding) + + "\nchan_in: " + str(chan_in) + + "\nchan_out: " + str(chan_out) + + "\nbatch_size: " + str(batch_size) + + "\ninp_size: " + str(inp_size) + + "\ndilation: " + str(dilation)) + + def test_error_conv_double_backward(self): + batch_size = 2 + + # Cannot provide ggW when stride is > 1 + for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1, 2]), (4, 6, [1]), (4, 7, [2])]: + for stride, padding, chan_in, chan_out, dilation in product([2], [0, 1, 2], [1, 3], [1, 3], dilations): + no_weight = False + with self.assertRaises(RuntimeError): + self.run_conv_double_back_test(kern, stride, + padding, chan_in, chan_out, + batch_size, inp_size, dilation, + no_weight) + + @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") + def test_conv_double_backward_cuda(self): + batch_size = 1 + for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 10, [1])]: + for stride, padding, chan_in, chan_out, dilation in product([1], [2], [2], [3], dilations): + no_weight = stride == 2 + result = self.run_conv_double_back_test(kern, stride, + padding, chan_in, chan_out, + batch_size, inp_size, dilation, + no_weight, use_cuda=True) + class TestNNInit(TestCase): def setUp(self):