Skip to content

Commit

Permalink
Update isort to the latest version (NVIDIA#5895)
Browse files Browse the repository at this point in the history
Update isort to the latest version

Signed-off-by: Vladimir Bataev <[email protected]>

---------

Signed-off-by: Vladimir Bataev <[email protected]>
  • Loading branch information
artbataev authored Feb 3, 2023
1 parent ddb92e3 commit 907acde
Show file tree
Hide file tree
Showing 37 changed files with 72 additions and 75 deletions.
3 changes: 1 addition & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,10 @@ repos:
- id: requirements-txt-fixer

- repo: https://github.com/PyCQA/isort
rev: 4.3.21
rev: 5.12.0
hooks:
- id: isort
name: Format imports
args: [ --multi-line=3, --trailing-comma, --force-grid-wrap=0, --use-parentheses, --line-width=119, -rc, -ws ]
exclude: docs/

- repo: https://github.com/psf/black
Expand Down
2 changes: 1 addition & 1 deletion nemo/collections/asr/losses/rnnt.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
WARP_RNNT_AVAILABLE = False

try:
from nemo.collections.asr.parts.numba.rnnt_loss import RNNTLossNumba, MultiblankRNNTLossNumba
from nemo.collections.asr.parts.numba.rnnt_loss import MultiblankRNNTLossNumba, RNNTLossNumba

NUMBA_RNNT_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
Expand Down
8 changes: 4 additions & 4 deletions nemo/collections/asr/modules/flashlight_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,18 +99,18 @@ def __init__(
):

try:
from flashlight.lib.text.dictionary import create_word_dict, load_words
from flashlight.lib.sequence.criterion import get_data_ptr_as_bytes
from flashlight.lib.text.decoder import (
LM,
CriterionType,
LexiconDecoderOptions,
KenLM,
LM,
LexiconDecoder,
LexiconDecoderOptions,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
)
from flashlight.lib.text.dictionary import create_word_dict, load_words
except ModuleNotFoundError:
raise ModuleNotFoundError(
"FlashLightKenLMBeamSearchDecoder requires the installation of flashlight python bindings "
Expand Down
2 changes: 1 addition & 1 deletion nemo/collections/asr/parts/utils/asr_confidence_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,8 @@ def get_confidence_aggregation_bank():
if hasattr(math, "prod"):
confidence_aggregation_bank["prod"] = math.prod
else:
from functools import reduce
import operator
from functools import reduce

confidence_aggregation_bank["prod"] = lambda x: reduce(operator.mul, x, 1)
return confidence_aggregation_bank
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,6 @@ def __getitem__(self, idx):
def test_index_blending(cls):
"""Visualize indices of blended dataset"""

import torch
import matplotlib.pyplot as plt

plt.ion()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -611,11 +611,11 @@ def get_dataset(
raise ValueError("Invalid dataset_type: ", dataset_type)

# from nemo.collections.nlp.data.language_modeling.megatron.ict_dataset import ICTDataset
from nemo.collections.nlp.data.language_modeling.megatron.bart_dataset import BARTDataset
from nemo.collections.nlp.data.language_modeling.megatron.bert_dataset import BertDataset
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import T5Dataset
from nemo.collections.nlp.data.language_modeling.megatron.ul2_dataset import UL2Dataset
from nemo.collections.nlp.data.language_modeling.megatron.bart_dataset import BARTDataset
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution

if dataset_type == DSET_TYPE_ICT:
raise NotImplementedError("ICT dataset is not implemented yet.")
Expand Down Expand Up @@ -1119,11 +1119,11 @@ def print_split_stats(name, index):

def build_dataset(index, name):
# from nemo.collections.nlp.data.language_modeling.megatron.ict_dataset import ICTDataset
from nemo.collections.nlp.data.language_modeling.megatron.bart_dataset import BARTDataset
from nemo.collections.nlp.data.language_modeling.megatron.bert_dataset import BertDataset
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import T5Dataset
from nemo.collections.nlp.data.language_modeling.megatron.ul2_dataset import UL2Dataset
from nemo.collections.nlp.data.language_modeling.megatron.bart_dataset import BARTDataset
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution

dataset = None
if splits[index + 1] > splits[index]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.enums import AttnMaskType
from apex.transformer.tensor_parallel.layers import set_tensor_model_parallel_attributes
from apex.transformer import tensor_parallel

HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
)

try:
from apex.transformer import tensor_parallel, parallel_state
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.enums import AttnMaskType

HAVE_APEX = True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,14 @@

try:
from apex.transformer import parallel_state

from apex.transformer.pipeline_parallel.schedules.common import build_model
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import forward_backward_no_pipelining
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import (
_forward_backward_pipelining_with_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)

HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,13 +53,13 @@
try:
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.schedules.common import build_model
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import forward_backward_no_pipelining
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import (
_forward_backward_pipelining_with_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import forward_backward_no_pipelining
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)

HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,11 @@

try:
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import forward_backward_no_pipelining
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import forward_backward_no_pipelining
from apex.transformer.pipeline_parallel.utils import (
_reconfigure_microbatch_calculator,
get_micro_batch_size,
)
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator, get_micro_batch_size

HAVE_APEX = True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,15 @@
try:
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.enums import ModelType
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.pipeline_parallel.schedules.common import build_model
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import forward_backward_no_pipelining
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
from apex.transformer.pipeline_parallel.utils import (
get_num_microbatches,
_reconfigure_microbatch_calculator,
get_micro_batch_size,
get_num_microbatches,
)

HAVE_APEX = True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@
from nemo.utils import AppState, logging

try:
from apex.transformer.enums import ModelType
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType

HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@
from nemo.core.classes.mixins import adapter_mixin_strategies

try:
from apex.transformer.tensor_parallel import RowParallelLinear, ColumnParallelLinear
from apex.normalization.fused_layer_norm import MixedFusedLayerNorm
from apex.transformer.tensor_parallel import ColumnParallelLinear, RowParallelLinear

HAVE_APEX = True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@


try:
from apex.transformer.layers.layer_norm import FastLayerNorm
from apex.normalization.fused_layer_norm import MixedFusedLayerNorm
from apex.transformer.layers.layer_norm import FastLayerNorm

HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@
from torch import nn

try:
from apex.contrib.layer_norm.layer_norm import _fast_layer_norm, FastLayerNorm as OrigFastLayerNorm
from apex.contrib.layer_norm.layer_norm import FastLayerNorm as OrigFastLayerNorm
from apex.contrib.layer_norm.layer_norm import _fast_layer_norm
from apex.transformer.layers.layer_norm import FastLayerNorm

HAVE_APEX = True
Expand Down
4 changes: 2 additions & 2 deletions nemo/collections/nlp/modules/common/megatron/megatron_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@
try:
from apex.transformer import tensor_parallel
from apex.transformer.log_util import set_logging_level
from apex.transformer.microbatches import ConstantNumMicroBatches
from apex.transformer.parallel_state import (
get_pipeline_model_parallel_rank,
set_pipeline_model_parallel_rank,
set_virtual_pipeline_model_parallel_rank,
set_pipeline_model_parallel_split_rank,
set_pipeline_model_parallel_world_size,
set_tensor_model_parallel_rank,
set_tensor_model_parallel_world_size,
set_virtual_pipeline_model_parallel_rank,
)
from apex.transformer.microbatches import ConstantNumMicroBatches
from apex.transformer.pipeline_parallel.utils import setup_microbatch_calculator

HAVE_APEX = True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@
)

try:
from apex.transformer.enums import AttnMaskType, ModelType
from apex.normalization import MixedFusedRMSNorm
from apex.transformer.enums import AttnMaskType, ModelType

HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
Expand Down
2 changes: 1 addition & 1 deletion nemo/collections/nlp/modules/common/megatron/mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@
from nemo.core import adapter_mixins

try:
from apex.normalization import MixedFusedRMSNorm
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.normalization import MixedFusedRMSNorm

HAVE_APEX = True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
from nemo.collections.nlp.modules.common.megatron.vocab_parallel_cross_entropy import vocab_parallel_cross_entropy

try:
from apex.transformer import tensor_parallel, parallel_state
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.enums import AttnMaskType, ModelType

HAVE_APEX = True
Expand Down
4 changes: 2 additions & 2 deletions nemo/collections/nlp/modules/common/megatron/transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@
from nemo.utils import logging

try:
from apex.normalization import MixedFusedRMSNorm
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.enums import AttnMaskType, AttnType, ModelType
from apex.normalization import MixedFusedRMSNorm

HAVE_APEX = True

Expand All @@ -56,8 +56,8 @@
ModelType = AttnMaskType = AttnType = LayerType = ApexGuardDefaults()

try:
from transformer_engine.pytorch import TransformerLayer, fp8_autocast
from transformer_engine.common import recipe
from transformer_engine.pytorch import TransformerLayer, fp8_autocast
from transformer_engine.pytorch.distributed import checkpoint as te_checkpoint

HAVE_TE = True
Expand Down
4 changes: 2 additions & 2 deletions nemo/collections/nlp/modules/common/megatron/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@
import torch

try:
from apex.normalization.fused_layer_norm import FusedLayerNorm # NOQA
from apex.normalization import MixedFusedRMSNorm
from apex.normalization.fused_layer_norm import FusedLayerNorm # NOQA
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.enums import AttnMaskType
from apex.transformer.layers.layer_norm import FastLayerNorm
from apex.transformer.pipeline_parallel.schedules.common import listify_model
from apex.transformer.tensor_parallel.layers import linear_with_grad_accumulation_and_async_allreduce
from apex.transformer.layers.layer_norm import FastLayerNorm

HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,11 @@
import torch

try:
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.transformer.parallel_state import (
get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from apex.transformer.tensor_parallel.utils import VocabUtility

HAVE_APEX = True
Expand Down
2 changes: 1 addition & 1 deletion nemo/collections/nlp/modules/common/prompt_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from nemo.core.neural_types import ChannelType, NeuralType

try:
from apex.transformer import tensor_parallel, parallel_state
from apex.transformer import parallel_state, tensor_parallel

HAVE_APEX = True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@
from nemo.collections.nlp.modules.common.megatron.utils import get_ltor_masks_and_position_ids

try:
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import forward_backward_no_pipelining
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import forward_backward_no_pipelining

HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
Expand Down Expand Up @@ -275,15 +275,15 @@ def post_process(self, tokens: torch.Tensor, new_tokens: torch.Tensor, context_l


def model_inference_strategy_dispatcher(model, **args):
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.models.language_modeling.megatron_retrieval_model import MegatronRetrievalModel
from nemo.collections.nlp.modules.common.retro_inference_strategies import (
RetroFileQAModelTextGenerationStrategy,
RetroModelTextGenerationStrategy,
RetroQAModelTextGenerationStrategy,
RetroFileQAModelTextGenerationStrategy,
)

if isinstance(model, MegatronGPTPromptLearningModel):
Expand Down
4 changes: 2 additions & 2 deletions nemo/core/optim/optimizer_with_main_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@
from nemo.utils import logging

try:
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
from apex.transformer.parallel_state import get_data_parallel_world_size, get_data_parallel_group
from apex.transformer.parallel_state import get_data_parallel_group, get_data_parallel_world_size
from apex.transformer.tensor_parallel import copy_tensor_model_parallel_attributes
import amp_C

HAVE_APEX = True

Expand Down
3 changes: 1 addition & 2 deletions nemo/core/optim/optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,7 @@
}

try:
from apex.optimizers import FusedLAMB
from apex.optimizers import FusedAdam
from apex.optimizers import FusedAdam, FusedLAMB

HAVE_APEX = True

Expand Down
4 changes: 2 additions & 2 deletions nemo/utils/export_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,10 +207,10 @@ def run_ort_and_compare(sess, ort_input, output_example, check_tolerance=0.01):
apex_available = True

try:
from apex.normalization.fused_layer_norm import FusedLayerNorm, MixedFusedLayerNorm
from apex.contrib.layer_norm.layer_norm import FastLayerNorm
from apex.transformer.tensor_parallel.layers import RowParallelLinear, ColumnParallelLinear
from apex.normalization.fused_layer_norm import FusedLayerNorm, MixedFusedLayerNorm
from apex.transformer.functional.fused_softmax import FusedScaleMaskSoftmax
from apex.transformer.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear

def replace_FusedLayerNorm(n: nn.Module) -> Optional[nn.LayerNorm]:
"""
Expand Down
Loading

0 comments on commit 907acde

Please sign in to comment.