Skip to content

Commit

Permalink
move tests to test_nn
Browse files Browse the repository at this point in the history
  • Loading branch information
albanD authored and soumith committed Jun 17, 2017
1 parent bcac55d commit 0a93903
Show file tree
Hide file tree
Showing 2 changed files with 89 additions and 89 deletions.
89 changes: 0 additions & 89 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from itertools import product
import torch.nn.functional as F
from torch.autograd import gradcheck
from torch.autograd.gradcheck import gradgradcheck
from torch.autograd.function import once_differentiable

from common import TestCase, run_tests, skipIfNoLapack
Expand All @@ -35,16 +34,6 @@ def backward_engine(engine):
Variable._execution_engine = _prev_engine


@contextlib.contextmanager
def use_cudnn(should_use):
orig = torch.backends.cudnn.enabled
torch.backends.cudnn.enabled = should_use
try:
yield
finally:
torch.backends.cudnn.enabled = orig


def graph_desc(fn):
if fn is None:
return 'None'
Expand Down Expand Up @@ -1274,84 +1263,6 @@ def backward(self, grad_output):
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad.data, torch.ones(x.size()))

def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out,
batch_size, inp_size, dilation, no_weight, use_cuda=False):
x = Variable(torch.randn(batch_size, chan_in, inp_size, inp_size), requires_grad=True)
weight = Variable(torch.randn(chan_out, chan_in, kern, kern), requires_grad=True)
bias = Variable(torch.randn(chan_out), requires_grad=True)

if use_cuda:
x = x.cuda()
weight = weight.cuda()
bias = bias.cuda()

if no_weight:
# Special case because transpose dilated convolution is not implemented
def func(x, bias):
# We disable cudnn during forward to avoid finite difference imprecision issues
with use_cudnn(False):
out = F.conv2d(x, weight, bias, stride, padding, dilation)
return out
inputs = (x, bias,)
else:
def func(x, weight, bias):
# We disable cudnn during forward to avoid finite difference imprecision issues
with use_cudnn(False):
out = F.conv2d(x, weight, bias, stride, padding, dilation)
return out
inputs = (x, weight, bias,)

dummy_out = func(*inputs)
grad_y = Variable(torch.randn(dummy_out.size()), requires_grad=True)
if use_cuda:
grad_y = grad_y.cuda()

return gradgradcheck(func, inputs, (grad_y,))

def test_conv_double_backward(self):
batch_size = 2
for kern, inp_size, dilations in [(3, 6, [1, 2]), (3, 7, [1, 2]), (4, 9, [1, 2]), (4, 10, [1, 2])]:
for stride, padding, chan_in, chan_out, dilation in product([1, 2], [0, 2], [1], [2, 3], dilations):
no_weight = stride == 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))

def test_error_conv_double_backward(self):
batch_size = 2

# Cannot provide ggW when stride is > 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1, 2]), (4, 6, [1]), (4, 7, [2])]:
for stride, padding, chan_in, chan_out, dilation in product([2], [0, 1, 2], [1, 3], [1, 3], dilations):
no_weight = False
with self.assertRaises(RuntimeError):
self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight)

@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
def test_conv_double_backward_cuda(self):
batch_size = 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 10, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([1], [2], [2], [3], dilations):
no_weight = stride == 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_cuda=True)


def index_variable(shape, max_indices):
if not isinstance(shape, tuple):
Expand Down
89 changes: 89 additions & 0 deletions test/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import torch.legacy.nn as legacy
from torch.nn.utils import clip_grad_norm
from torch.autograd import Variable, gradcheck
from torch.autograd.gradcheck import gradgradcheck
from torch.nn import Parameter
from common_nn import NNTestCase, ModuleTest, CriterionTest, TestBase, \
module_tests, criterion_tests, TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, \
Expand All @@ -28,6 +29,16 @@
from scipy import stats


@contextlib.contextmanager
def use_cudnn(should_use):
orig = torch.backends.cudnn.enabled
torch.backends.cudnn.enabled = should_use
try:
yield
finally:
torch.backends.cudnn.enabled = orig


def default_tensor_type(type):
type_str = torch.typename(type)

Expand Down Expand Up @@ -2317,6 +2328,84 @@ def test_bilinear(self):

self.assertTrue(gradcheck(lambda x1, x2: F.bilinear(x1, x2, module.weight, module.bias), (input1_1, input2_1)))

def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out,
batch_size, inp_size, dilation, no_weight, use_cuda=False):
x = Variable(torch.randn(batch_size, chan_in, inp_size, inp_size), requires_grad=True)
weight = Variable(torch.randn(chan_out, chan_in, kern, kern), requires_grad=True)
bias = Variable(torch.randn(chan_out), requires_grad=True)

if use_cuda:
x = x.cuda()
weight = weight.cuda()
bias = bias.cuda()

if no_weight:
# Special case because transpose dilated convolution is not implemented
def func(x, bias):
# We disable cudnn during forward to avoid finite difference imprecision issues
with use_cudnn(False):
out = F.conv2d(x, weight, bias, stride, padding, dilation)
return out
inputs = (x, bias,)
else:
def func(x, weight, bias):
# We disable cudnn during forward to avoid finite difference imprecision issues
with use_cudnn(False):
out = F.conv2d(x, weight, bias, stride, padding, dilation)
return out
inputs = (x, weight, bias,)

dummy_out = func(*inputs)
grad_y = Variable(torch.randn(dummy_out.size()), requires_grad=True)
if use_cuda:
grad_y = grad_y.cuda()

return gradgradcheck(func, inputs, (grad_y,))

def test_conv_double_backward(self):
batch_size = 2
for kern, inp_size, dilations in [(3, 6, [1, 2]), (3, 7, [1, 2]), (4, 9, [1, 2]), (4, 10, [1, 2])]:
for stride, padding, chan_in, chan_out, dilation in product([1, 2], [0, 2], [1], [2, 3], dilations):
no_weight = stride == 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))

def test_error_conv_double_backward(self):
batch_size = 2

# Cannot provide ggW when stride is > 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1, 2]), (4, 6, [1]), (4, 7, [2])]:
for stride, padding, chan_in, chan_out, dilation in product([2], [0, 1, 2], [1, 3], [1, 3], dilations):
no_weight = False
with self.assertRaises(RuntimeError):
self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight)

@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_conv_double_backward_cuda(self):
batch_size = 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 10, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([1], [2], [2], [3], dilations):
no_weight = stride == 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_cuda=True)


class TestNNInit(TestCase):
def setUp(self):
Expand Down

0 comments on commit 0a93903

Please sign in to comment.