Skip to content

Commit

Permalink
F401 fixes for ruff
Browse files Browse the repository at this point in the history
  • Loading branch information
AUTOMATIC1111 committed May 10, 2023
1 parent f741a98 commit 4b85480
Show file tree
Hide file tree
Showing 17 changed files with 27 additions and 30 deletions.
4 changes: 2 additions & 2 deletions extensions-builtin/LDSR/scripts/ldsr_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR
from modules import shared, script_callbacks
import sd_hijack_autoencoder
import sd_hijack_ddpm_v1
import sd_hijack_autoencoder # noqa: F401
import sd_hijack_ddpm_v1 # noqa: F401


class UpscalerLDSR(Upscaler):
Expand Down
2 changes: 1 addition & 1 deletion modules/cmd_args.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import argparse
import os
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401

parser = argparse.ArgumentParser()

Expand Down
1 change: 0 additions & 1 deletion modules/deepbooru.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import re

import torch
from PIL import Image
import numpy as np

from modules import modelloader, paths, deepbooru_model, devices, images, shared
Expand Down
2 changes: 1 addition & 1 deletion modules/extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import git

from modules import shared
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401

extensions = []

Expand Down
2 changes: 1 addition & 1 deletion modules/gfpgan_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def setup_model(dirname):

try:
from gfpgan import GFPGANer
from facexlib import detection, parsing
from facexlib import detection, parsing # noqa: F401
global user_path
global have_gfpgan
global gfpgan_constructor
Expand Down
2 changes: 1 addition & 1 deletion modules/models/diffusion/uni_pc/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
from .sampler import UniPCSampler
from .sampler import UniPCSampler # noqa: F401
4 changes: 2 additions & 2 deletions modules/paths.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import os
import sys
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401

import modules.safe
import modules.safe # noqa: F401


# data_path = cmd_opts_pre.data
Expand Down
6 changes: 3 additions & 3 deletions modules/realesrgan_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ def __init__(self, path):
self.user_path = path
super().__init__()
try:
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
from basicsr.archs.rrdbnet_arch import RRDBNet # noqa: F401
from realesrgan import RealESRGANer # noqa: F401
from realesrgan.archs.srvgg_arch import SRVGGNetCompact # noqa: F401
self.enable = True
self.scalers = []
scalers = self.load_models(path)
Expand Down
1 change: 0 additions & 1 deletion modules/script_loading.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import sys
import traceback
import importlib.util
from types import ModuleType


def load_module(path):
Expand Down
2 changes: 1 addition & 1 deletion modules/sd_hijack_inpainting.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms

from ldm.models.diffusion.ddim import DDIMSampler, noise_like
from ldm.models.diffusion.ddim import noise_like
from ldm.models.diffusion.sampling_util import norm_thresholding


Expand Down
4 changes: 1 addition & 3 deletions modules/sd_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from ldm.util import instantiate_from_config

from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config
from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack
from modules.timer import Timer

Expand Down Expand Up @@ -87,8 +86,7 @@ def calculate_shorthash(self):

try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.

from transformers import logging, CLIPModel
from transformers import logging, CLIPModel # noqa: F401

logging.set_verbosity_error()
except Exception:
Expand Down
2 changes: 1 addition & 1 deletion modules/sd_samplers.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared

# imports for functions that previously were here and are used by other modules
from modules.sd_samplers_common import samples_to_image_grid, sample_to_image
from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401

all_samplers = [
*sd_samplers_kdiffusion.samplers_data_k_diffusion,
Expand Down
2 changes: 1 addition & 1 deletion modules/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import modules.styles
import modules.devices as devices
from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
from ldm.models.diffusion.ddpm import LatentDiffusion

demo = None
Expand Down
4 changes: 2 additions & 2 deletions modules/ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@
import gradio.routes
import gradio.utils
import numpy as np
from PIL import Image, PngImagePlugin
from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call

from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress
from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path, data_path

Expand Down
2 changes: 1 addition & 1 deletion modules/upscaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def __init__(self, create_dirs=False):
os.makedirs(self.model_path, exist_ok=True)

try:
import cv2
import cv2 # noqa: F401
self.can_tile = True
except Exception:
pass
Expand Down
9 changes: 5 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
[tool.ruff]

target-version = "py310"

exclude = ["extensions"]

ignore = [
"E501",

"F401", # Module imported but unused
"E501", # Line too long
"E731", # Do not assign a `lambda` expression, use a `def`
]


[tool.ruff.per-file-ignores]
"webui.py" = ["E402"] # Module level import not at top of file
"webui.py" = ["E402"] # Module level import not at top of file
8 changes: 4 additions & 4 deletions webui.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@
import logging
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())

from modules import paths, timer, import_hook, errors
from modules import paths, timer, import_hook, errors # noqa: F401

startup_timer = timer.Timer()

import torch
import pytorch_lightning # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")

Expand All @@ -31,12 +31,12 @@
import gradio
startup_timer.record("import gradio")

import ldm.modules.encoders.modules
import ldm.modules.encoders.modules # noqa: F401
startup_timer.record("import ldm")

from modules import extra_networks, ui_extra_networks_checkpoints
from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion
from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call
from modules.call_queue import wrap_queued_call, queue_lock

# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
if ".dev" in torch.__version__ or "+git" in torch.__version__:
Expand Down

0 comments on commit 4b85480

Please sign in to comment.