Skip to content

Commit

Permalink
Fixes flake8 --select W605 test warnings (NVIDIA#829)
Browse files Browse the repository at this point in the history
Signed-off-by: asears <[email protected]>
  • Loading branch information
asears authored May 13, 2020
1 parent e1b7997 commit 9165b27
Show file tree
Hide file tree
Showing 8 changed files with 10 additions and 7 deletions.
2 changes: 1 addition & 1 deletion apex/contrib/optimizers/fused_adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class FusedAdam(torch.optim.Optimizer):
use_mt (boolean, optional): use multi tensor apply for lower launch
latency. (default: False)
.. _Adam\: A Method for Stochastic Optimization:
.. _Adam - A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
Expand Down
2 changes: 1 addition & 1 deletion apex/contrib/optimizers/fused_lamb.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class FusedLAMB(torch.optim.Optimizer):
max_grad_norm (float, optional): value used to clip global grad norm
(default: 1.0)
.. _Large Batch Optimization for Deep Learning\: Training BERT in 76 minutes:
.. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
Expand Down
2 changes: 1 addition & 1 deletion apex/optimizers/fused_adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class FusedAdam(torch.optim.Optimizer):
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
.. _Adam\: A Method for Stochastic Optimization:
.. _Adam - A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
Expand Down
2 changes: 1 addition & 1 deletion apex/optimizers/fused_lamb.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class FusedLAMB(torch.optim.Optimizer):
max_grad_norm (float, optional): value used to clip global grad norm
(default: 1.0)
.. _Large Batch Optimization for Deep Learning\: Training BERT in 76 minutes:
.. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
Expand Down
2 changes: 1 addition & 1 deletion apex/optimizers/fused_novograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class FusedNovoGrad(torch.optim.Optimizer):
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
.. _Jasper\: An End-to-End Convolutional Neural Acoustic Model:
.. _Jasper - An End-to-End Convolutional Neural Acoustic Model:
https://arxiv.org/abs/1904.03288
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
Expand Down
2 changes: 1 addition & 1 deletion apex/reparameterization/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from .reparameterization import Reparameterization

def apply_weight_norm(module, name='', dim=0, hook_child=True):
"""
r"""
Applies weight normalization to a parameter in the given module.
If no parameter is provided, applies weight normalization to all
parameters in model (except 1-d vectors and scalars).
Expand Down
2 changes: 1 addition & 1 deletion apex/reparameterization/weight_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def _norm(p, dim):
HALF_TYPES = (torch.cuda.HalfTensor, torch.HalfTensor)

class WeightNorm(Reparameterization):
"""
r"""
Weight normalization is a reparameterization that decouples the magnitude
of a weight tensor from its direction. This replaces the parameter specified
by `name` (e.g. "weight") with two parameters: one specifying the magnitude
Expand Down
3 changes: 3 additions & 0 deletions requirements_dev.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
-r requirements.txt
flake8>=3.7.9
Sphinx>=3.0.3

0 comments on commit 9165b27

Please sign in to comment.