diff --git a/Lib/__future__.py b/Lib/__future__.py index 97dc90c6e4..e64374f26b 100644 --- a/Lib/__future__.py +++ b/Lib/__future__.py @@ -101,9 +101,7 @@ def getMandatoryRelease(self): return self.mandatory def __repr__(self): - return "_Feature" + repr((self.optional, - self.mandatory, - self.compiler_flag)) + return f"_Feature{repr((self.optional, self.mandatory, self.compiler_flag))}" nested_scopes = _Feature((2, 1, 0, "beta", 1), diff --git a/Lib/_collections_abc.py b/Lib/_collections_abc.py index 87a9cd2d46..65816eda33 100644 --- a/Lib/_collections_abc.py +++ b/Lib/_collections_abc.py @@ -97,9 +97,7 @@ def __hash__(self): @classmethod def __subclasshook__(cls, C): - if cls is Hashable: - return _check_methods(C, "__hash__") - return NotImplemented + return _check_methods(C, "__hash__") if cls is Hashable else NotImplemented class Awaitable(metaclass=ABCMeta): @@ -112,9 +110,7 @@ def __await__(self): @classmethod def __subclasshook__(cls, C): - if cls is Awaitable: - return _check_methods(C, "__await__") - return NotImplemented + return _check_methods(C, "__await__") if cls is Awaitable else NotImplemented __class_getitem__ = classmethod(GenericAlias) @@ -261,9 +257,7 @@ def __iter__(self): @classmethod def __subclasshook__(cls, C): - if cls is Iterable: - return _check_methods(C, "__iter__") - return NotImplemented + return _check_methods(C, "__iter__") if cls is Iterable else NotImplemented __class_getitem__ = classmethod(GenericAlias) @@ -380,9 +374,7 @@ def __len__(self): @classmethod def __subclasshook__(cls, C): - if cls is Sized: - return _check_methods(C, "__len__") - return NotImplemented + return _check_methods(C, "__len__") if cls is Sized else NotImplemented class Container(metaclass=ABCMeta): @@ -444,9 +436,8 @@ def __parameters__(self): # Looks like a genericalias if hasattr(arg, "__parameters__") and isinstance(arg.__parameters__, tuple): params.extend(arg.__parameters__) - else: - if _is_typevarlike(arg): - params.append(arg) + elif _is_typevarlike(arg): + params.append(arg) return tuple(dict.fromkeys(params)) def __repr__(self): @@ -494,10 +485,8 @@ def __getitem__(self, item): f"ParamSpec, or Concatenate. Got {arg}") else: arg = subst[arg] - # Looks like a GenericAlias elif hasattr(arg, '__parameters__') and isinstance(arg.__parameters__, tuple): - subparams = arg.__parameters__ - if subparams: + if subparams := arg.__parameters__: subargs = tuple(subst[x] for x in subparams) arg = arg[subargs] new_args.append(arg) @@ -542,9 +531,7 @@ def _type_repr(obj): return f'{obj.__module__}.{obj.__qualname__}' if obj is Ellipsis: return '...' - if isinstance(obj, FunctionType): - return obj.__name__ - return repr(obj) + return obj.__name__ if isinstance(obj, FunctionType) else repr(obj) class Callable(metaclass=ABCMeta): @@ -557,9 +544,7 @@ def __call__(self, *args, **kwds): @classmethod def __subclasshook__(cls, C): - if cls is Callable: - return _check_methods(C, "__call__") - return NotImplemented + return _check_methods(C, "__call__") if cls is Callable else NotImplemented __class_getitem__ = classmethod(_CallableGenericAlias) @@ -583,12 +568,7 @@ class Set(Collection): def __le__(self, other): if not isinstance(other, Set): return NotImplemented - if len(self) > len(other): - return False - for elem in self: - if elem not in other: - return False - return True + return False if len(self) > len(other) else all(elem in other for elem in self) def __lt__(self, other): if not isinstance(other, Set): @@ -603,12 +583,7 @@ def __gt__(self, other): def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - if len(self) < len(other): - return False - for elem in other: - if elem not in self: - return False - return True + return False if len(self) < len(other) else all(elem in self for elem in other) def __eq__(self, other): if not isinstance(other, Set): @@ -633,10 +608,7 @@ def __and__(self, other): def isdisjoint(self, other): 'Return True if two sets have a null intersection.' - for value in other: - if value in self: - return False - return True + return all(value not in self for value in other) def __or__(self, other): if not isinstance(other, Iterable): @@ -868,7 +840,7 @@ class KeysView(MappingView, Set): __slots__ = () @classmethod - def _from_iterable(self, it): + def _from_iterable(cls, it): return set(it) def __contains__(self, key): @@ -886,7 +858,7 @@ class ItemsView(MappingView, Set): __slots__ = () @classmethod - def _from_iterable(self, it): + def _from_iterable(cls, it): return set(it) def __contains__(self, item): @@ -1032,17 +1004,13 @@ def __iter__(self): i = 0 try: while True: - v = self[i] - yield v + yield self[i] i += 1 except IndexError: return def __contains__(self, value): - for v in self: - if v is value or v == value: - return True - return False + return any(v is value or v == value for v in self) def __reversed__(self): for i in reversed(range(len(self))): diff --git a/Lib/_compat_pickle.py b/Lib/_compat_pickle.py index 17b9010278..22e3a4c862 100644 --- a/Lib/_compat_pickle.py +++ b/Lib/_compat_pickle.py @@ -170,14 +170,14 @@ NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname) # Same, but for 3.x to 2.x -REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items()) +REVERSE_IMPORT_MAPPING = {v: k for (k, v) in IMPORT_MAPPING.items()} assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING) -REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items()) +REVERSE_NAME_MAPPING = {v: k for (k, v) in NAME_MAPPING.items()} assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING) # Non-mutual mappings. -IMPORT_MAPPING.update({ +IMPORT_MAPPING |= { 'cPickle': 'pickle', '_elementtree': 'xml.etree.ElementTree', 'FileDialog': 'tkinter.filedialog', @@ -190,46 +190,58 @@ 'UserList': 'collections', 'UserString': 'collections', 'whichdb': 'dbm', - 'StringIO': 'io', + 'StringIO': 'io', 'cStringIO': 'io', -}) +} -REVERSE_IMPORT_MAPPING.update({ +REVERSE_IMPORT_MAPPING |= { '_bz2': 'bz2', '_dbm': 'dbm', '_functools': 'functools', '_gdbm': 'gdbm', '_pickle': 'pickle', -}) +} -NAME_MAPPING.update({ +NAME_MAPPING |= { ('__builtin__', 'basestring'): ('builtins', 'str'), ('exceptions', 'StandardError'): ('builtins', 'Exception'), ('UserDict', 'UserDict'): ('collections', 'UserDict'), ('socket', '_socketobject'): ('socket', 'SocketType'), -}) +} -REVERSE_NAME_MAPPING.update({ +REVERSE_NAME_MAPPING |= { ('_functools', 'reduce'): ('__builtin__', 'reduce'), ('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'), ('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'), ('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'), ('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'), ('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'), - ('xmlrpc.server', 'XMLRPCDocGenerator'): - ('DocXMLRPCServer', 'XMLRPCDocGenerator'), - ('xmlrpc.server', 'DocXMLRPCRequestHandler'): - ('DocXMLRPCServer', 'DocXMLRPCRequestHandler'), - ('xmlrpc.server', 'DocXMLRPCServer'): - ('DocXMLRPCServer', 'DocXMLRPCServer'), - ('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'): - ('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'), - ('http.server', 'SimpleHTTPRequestHandler'): - ('SimpleHTTPServer', 'SimpleHTTPRequestHandler'), - ('http.server', 'CGIHTTPRequestHandler'): - ('CGIHTTPServer', 'CGIHTTPRequestHandler'), + ('xmlrpc.server', 'XMLRPCDocGenerator'): ( + 'DocXMLRPCServer', + 'XMLRPCDocGenerator', + ), + ('xmlrpc.server', 'DocXMLRPCRequestHandler'): ( + 'DocXMLRPCServer', + 'DocXMLRPCRequestHandler', + ), + ('xmlrpc.server', 'DocXMLRPCServer'): ( + 'DocXMLRPCServer', + 'DocXMLRPCServer', + ), + ('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'): ( + 'DocXMLRPCServer', + 'DocCGIXMLRPCRequestHandler', + ), + ('http.server', 'SimpleHTTPRequestHandler'): ( + 'SimpleHTTPServer', + 'SimpleHTTPRequestHandler', + ), + ('http.server', 'CGIHTTPRequestHandler'): ( + 'CGIHTTPServer', + 'CGIHTTPRequestHandler', + ), ('_socket', 'socket'): ('socket', '_socketobject'), -}) +} PYTHON3_OSERROR_EXCEPTIONS = ( 'BrokenPipeError', diff --git a/Lib/_compression.py b/Lib/_compression.py index e8b70aa0a3..02cd910670 100644 --- a/Lib/_compression.py +++ b/Lib/_compression.py @@ -140,7 +140,7 @@ def seek(self, offset, whence=io.SEEK_SET): pass offset = self._size + offset else: - raise ValueError("Invalid value for whence: {}".format(whence)) + raise ValueError(f"Invalid value for whence: {whence}") # Make it so that offset is the number of bytes to skip forward. if offset < self._pos: @@ -150,11 +150,11 @@ def seek(self, offset, whence=io.SEEK_SET): # Read and discard data until we reach the desired position. while offset > 0: - data = self.read(min(io.DEFAULT_BUFFER_SIZE, offset)) - if not data: - break - offset -= len(data) + if data := self.read(min(io.DEFAULT_BUFFER_SIZE, offset)): + offset -= len(data) + else: + break return self._pos def tell(self): diff --git a/Lib/_dummy_os.py b/Lib/_dummy_os.py index 5bd5ec0a13..be900abb02 100644 --- a/Lib/_dummy_os.py +++ b/Lib/_dummy_os.py @@ -48,9 +48,9 @@ def fspath(path): if isinstance(path_repr, (str, bytes)): return path_repr else: - raise TypeError("expected {}.__fspath__() to return str or bytes, " - "not {}".format(path_type.__name__, - type(path_repr).__name__)) + raise TypeError( + f"expected {path_type.__name__}.__fspath__() to return str or bytes, not {type(path_repr).__name__}" + ) class PathLike(abc.ABC): diff --git a/Lib/_dummy_thread.py b/Lib/_dummy_thread.py index 424b0b3be5..82d19812de 100644 --- a/Lib/_dummy_thread.py +++ b/Lib/_dummy_thread.py @@ -42,7 +42,7 @@ def start_new_thread(function, args, kwargs={}): """ if type(args) != type(tuple()): raise TypeError("2nd arg must be a tuple") - if type(kwargs) != type(dict()): + if type(kwargs) != type({}): raise TypeError("3rd arg must be a dict") global _main _main = False @@ -115,18 +115,20 @@ def acquire(self, waitflag=None, timeout=-1): aren't triggered and throw a little fit. """ - if waitflag is None or waitflag: + if ( + waitflag is not None + and not waitflag + and not self.locked_status + or waitflag is None + or waitflag + ): self.locked_status = True return True else: - if not self.locked_status: - self.locked_status = True - return True - else: - if timeout > 0: - import time - time.sleep(timeout) - return False + if timeout > 0: + import time + time.sleep(timeout) + return False __enter__ = acquire @@ -149,12 +151,7 @@ def _at_fork_reinit(self): self.locked_status = False def __repr__(self): - return "<%s %s.%s object at %s>" % ( - "locked" if self.locked_status else "unlocked", - self.__class__.__module__, - self.__class__.__qualname__, - hex(id(self)) - ) + return f'<{"locked" if self.locked_status else "unlocked"} {self.__class__.__module__}.{self.__class__.__qualname__} object at {hex(id(self))}>' # Used to signal that interrupt_main was called in a "thread" _interrupt = False @@ -166,9 +163,8 @@ def interrupt_main(): KeyboardInterrupt upon exiting.""" if _main: raise KeyboardInterrupt - else: - global _interrupt - _interrupt = True + global _interrupt + _interrupt = True class RLock: def __init__(self): @@ -193,11 +189,4 @@ def locked(self): return self.locked_status != 0 def __repr__(self): - return "<%s %s.%s object owner=%s count=%s at %s>" % ( - "locked" if self.locked_count else "unlocked", - self.__class__.__module__, - self.__class__.__qualname__, - get_ident() if self.locked_count else 0, - self.locked_count, - hex(id(self)) - ) + return f'<{"locked" if self.locked_count else "unlocked"} {self.__class__.__module__}.{self.__class__.__qualname__} object owner={get_ident() if self.locked_count else 0} count={self.locked_count} at {hex(id(self))}>' diff --git a/Lib/_markupbase.py b/Lib/_markupbase.py index 3ad7e27996..9acd402419 100644 --- a/Lib/_markupbase.py +++ b/Lib/_markupbase.py @@ -45,8 +45,7 @@ def updatepos(self, i, j): if i >= j: return j rawdata = self.rawdata - nlines = rawdata.count("\n", i, j) - if nlines: + if nlines := rawdata.count("\n", i, j): self.lineno = self.lineno + nlines pos = rawdata.rindex("\n", i, j) # Should not fail self.offset = j-(pos+1) @@ -110,10 +109,10 @@ def parse_declaration(self, i): self.unknown_decl(data) return j + 1 if c in "\"'": - m = _declstringlit_match(rawdata, j) - if not m: + if m := _declstringlit_match(rawdata, j): + j = m.end() + else: return -1 # incomplete - j = m.end() elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": name, j = self._scan_name(j, i) elif c in self._decl_otherchars: @@ -127,7 +126,7 @@ def parse_declaration(self, i): # also in data attribute specifications of attlist declaration # also link type declaration subsets in linktype declarations # also link attribute specification lists in link declarations - raise AssertionError("unsupported '[' char in %s declaration" % decltype) + raise AssertionError(f"unsupported '[' char in {decltype} declaration") else: raise AssertionError("unexpected '[' char in declaration") else: @@ -212,7 +211,7 @@ def _parse_doctype_subset(self, i, declstartpos): "unknown declaration %r in internal subset" % name ) # handle the individual names - meth = getattr(self, "_parse_doctype_" + name) + meth = getattr(self, f"_parse_doctype_{name}") j = meth(j, declstartpos) if j < 0: return j @@ -230,13 +229,12 @@ def _parse_doctype_subset(self, i, declstartpos): j = j + 1 while j < n and rawdata[j].isspace(): j = j + 1 - if j < n: - if rawdata[j] == ">": - return j - self.updatepos(declstartpos, j) - raise AssertionError("unexpected char after internal subset") - else: + if j >= n: return -1 + if rawdata[j] == ">": + return j + self.updatepos(declstartpos, j) + raise AssertionError("unexpected char after internal subset") elif c.isspace(): j = j + 1 else: @@ -252,9 +250,7 @@ def _parse_doctype_element(self, i, declstartpos): return -1 # style content model; just skip until '>' rawdata = self.rawdata - if '>' in rawdata[j:]: - return rawdata.find(">", j) + 1 - return -1 + return rawdata.find(">", j) + 1 if '>' in rawdata[j:] else -1 # Internal -- scan past ': return j + 1 if c in "'\"": - m = _declstringlit_match(rawdata, j) - if not m: + if m := _declstringlit_match(rawdata, j): + j = m.end() + else: return -1 - j = m.end() else: name, j = self._scan_name(j, declstartpos) if j < 0: @@ -359,8 +354,7 @@ def _parse_doctype_entity(self, i, declstartpos): if not c: return -1 if c in "'\"": - m = _declstringlit_match(rawdata, j) - if m: + if m := _declstringlit_match(rawdata, j): j = m.end() else: return -1 # incomplete @@ -378,13 +372,10 @@ def _scan_name(self, i, declstartpos): n = len(rawdata) if i == n: return None, -1 - m = _declname_match(rawdata, i) - if m: + if m := _declname_match(rawdata, i): s = m.group() name = s.strip() - if (i + len(s)) == n: - return None, -1 # end of buffer - return name.lower(), m.end() + return (None, -1) if (i + len(s)) == n else (name.lower(), m.end()) else: self.updatepos(declstartpos, i) raise AssertionError( diff --git a/Lib/_osx_support.py b/Lib/_osx_support.py index aa66c8b9f4..5ea0f2f2a7 100644 --- a/Lib/_osx_support.py +++ b/Lib/_osx_support.py @@ -39,17 +39,16 @@ def _find_executable(executable, path=None): base, ext = os.path.splitext(executable) if (sys.platform == 'win32') and (ext != '.exe'): - executable = executable + '.exe' - - if not os.path.isfile(executable): - for p in paths: - f = os.path.join(p, executable) - if os.path.isfile(f): - # the file exists, we have a shot at spawn working - return f - return None - else: + executable = f'{executable}.exe' + + if os.path.isfile(executable): return executable + for p in paths: + f = os.path.join(p, executable) + if os.path.isfile(f): + # the file exists, we have a shot at spawn working + return f + return None def _read_output(commandstring, capture_stderr=False): @@ -63,23 +62,23 @@ def _read_output(commandstring, capture_stderr=False): import tempfile fp = tempfile.NamedTemporaryFile() except ImportError: - fp = open("/tmp/_osx_support.%s"%( - os.getpid(),), "w+b") + fp = open(f"/tmp/_osx_support.{os.getpid()}", "w+b") with contextlib.closing(fp) as fp: if capture_stderr: - cmd = "%s >'%s' 2>&1" % (commandstring, fp.name) + cmd = f"{commandstring} >'{fp.name}' 2>&1" else: - cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + cmd = f"{commandstring} 2>/dev/null >'{fp.name}'" return fp.read().decode('utf-8').strip() if not os.system(cmd) else None def _find_build_tool(toolname): """Find a build tool on current path or using xcrun""" - return (_find_executable(toolname) - or _read_output("/usr/bin/xcrun -find %s" % (toolname,)) - or '' - ) + return ( + _find_executable(toolname) + or _read_output(f"/usr/bin/xcrun -find {toolname}") + or '' + ) _SYSTEM_VERSION = None @@ -108,8 +107,8 @@ def _get_system_version(): finally: f.close() if m is not None: - _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2]) - # else: fall back to the default behaviour + _SYSTEM_VERSION = '.'.join(m[1].split('.')[:2]) + # else: fall back to the default behaviour return _SYSTEM_VERSION @@ -123,8 +122,7 @@ def _get_system_version_tuple(): """ global _SYSTEM_VERSION_TUPLE if _SYSTEM_VERSION_TUPLE is None: - osx_version = _get_system_version() - if osx_version: + if osx_version := _get_system_version(): try: _SYSTEM_VERSION_TUPLE = tuple(int(i) for i in osx_version.split('.')) except ValueError: @@ -157,7 +155,7 @@ def _default_sysroot(cc): if _cache_default_sysroot is not None: return _cache_default_sysroot - contents = _read_output('%s -c -E -v - "): @@ -183,7 +181,7 @@ def _supports_universal_builds(): # is in support of allowing 10.4 universal builds to run on 10.3.x systems. osx_version = _get_system_version_tuple() - return bool(osx_version >= (10, 4)) if osx_version else False + return osx_version >= (10, 4) if osx_version else False def _supports_arm64_builds(): """Returns True if arm64 builds are supported on this system""" @@ -251,7 +249,7 @@ def _find_appropriate_compiler(_config_vars): for cv in _COMPILER_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: cv_split = _config_vars[cv].split() - cv_split[0] = cc if cv != 'CXX' else cc + '++' + cv_split[0] = cc if cv != 'CXX' else f'{cc}++' _save_modified_value(_config_vars, cv, ' '.join(cv_split)) return _config_vars @@ -287,13 +285,11 @@ def _remove_unsupported_archs(_config_vars): return _config_vars if re.search(r'-arch\s+ppc', _config_vars['CFLAGS']) is not None: - # NOTE: Cannot use subprocess here because of bootstrap - # issues when building Python itself - status = os.system( + if status := os.system( """echo 'int main{};' | """ """'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null""" - %(_config_vars['CC'].replace("'", "'\"'\"'"),)) - if status: + % (_config_vars['CC'].replace("'", "'\"'\"'"),) + ): # The compile failed for some reason. Because of differences # across Xcode and compiler versions, there is no reliable way # to be sure why it failed. Assume here it was due to lack of @@ -322,7 +318,7 @@ def _override_all_archs(_config_vars): if cv in _config_vars and '-arch' in _config_vars[cv]: flags = _config_vars[cv] flags = re.sub(r'-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch + flags = f'{flags} {arch}' _save_modified_value(_config_vars, cv, flags) return _config_vars @@ -343,7 +339,7 @@ def _check_for_unavailable_sdk(_config_vars): cflags = _config_vars.get('CFLAGS', '') m = re.search(r'-isysroot\s*(\S+)', cflags) if m is not None: - sdk = m.group(1) + sdk = m[1] if not os.path.exists(sdk): for cv in _UNIVERSAL_CONFIG_VARS: # Do not alter a config var explicitly overridden by env var @@ -394,7 +390,7 @@ def compiler_fixup(compiler_so, cc_args): if 'ARCHFLAGS' in os.environ and not stripArch: # User specified different -arch flags in the environ, # see also distutils.sysconfig - compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() + compiler_so += os.environ['ARCHFLAGS'].split() if stripSysroot: while True: @@ -422,11 +418,9 @@ def compiler_fixup(compiler_so, cc_args): for idx in indices: if argvar[idx] == '-isysroot': sysroot = argvar[idx+1] - break else: sysroot = argvar[idx][len('-isysroot'):] - break - + break if sysroot and not os.path.isdir(sysroot): sys.stderr.write(f"Compiling with an SDK that doesn't seem to exist: {sysroot}\n") sys.stderr.write("Please check your Xcode installation\n") @@ -508,9 +502,7 @@ def get_platform_osx(_config_vars, osname, release, machine): macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '') macrelease = _get_system_version() or macver - macver = macver or macrelease - - if macver: + if macver := macver or macrelease: release = macver osname = "macosx" @@ -518,11 +510,10 @@ def get_platform_osx(_config_vars, osname, release, machine): # return the same machine type for the platform string. # Otherwise, distutils may consider this a cross-compiling # case and disallow installs. - cflags = _config_vars.get(_INITPRE+'CFLAGS', - _config_vars.get('CFLAGS', '')) + cflags = _config_vars.get(f'{_INITPRE}CFLAGS', _config_vars.get('CFLAGS', '')) if macrelease: try: - macrelease = tuple(int(i) for i in macrelease.split('.')[0:2]) + macrelease = tuple(int(i) for i in macrelease.split('.')[:2]) except ValueError: macrelease = (10, 3) else: @@ -566,9 +557,5 @@ def get_platform_osx(_config_vars, osname, release, machine): elif machine in ('PowerPC', 'Power_Macintosh'): # Pick a sane name for the PPC architecture. # See 'i386' case - if sys.maxsize >= 2**32: - machine = 'ppc64' - else: - machine = 'ppc' - + machine = 'ppc64' if sys.maxsize >= 2**32 else 'ppc' return (osname, release, machine) diff --git a/Lib/_py_abc.py b/Lib/_py_abc.py index c870ae9048..06236d9d73 100644 --- a/Lib/_py_abc.py +++ b/Lib/_py_abc.py @@ -51,97 +51,99 @@ def __new__(mcls, name, bases, namespace, /, **kwargs): cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter return cls - def register(cls, subclass): + def register(self, subclass): """Register a virtual subclass of an ABC. Returns the subclass, to allow usage as a class decorator. """ if not isinstance(subclass, type): raise TypeError("Can only register classes") - if issubclass(subclass, cls): + if issubclass(subclass, self): return subclass # Already a subclass # Subtle: test for cycles *after* testing for "already a subclass"; # this means we allow X.register(X) and interpret it as a no-op. - if issubclass(cls, subclass): + if issubclass(self, subclass): # This would create a cycle, which is bad for the algorithm below raise RuntimeError("Refusing to create an inheritance cycle") - cls._abc_registry.add(subclass) + self._abc_registry.add(subclass) ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache return subclass - def _dump_registry(cls, file=None): + def _dump_registry(self, file=None): """Debug helper to print the ABC registry.""" - print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file) + print(f"Class: {self.__module__}.{self.__qualname__}", file=file) print(f"Inv. counter: {get_cache_token()}", file=file) - for name in cls.__dict__: + for name in self.__dict__: if name.startswith("_abc_"): - value = getattr(cls, name) + value = getattr(self, name) if isinstance(value, WeakSet): value = set(value) print(f"{name}: {value!r}", file=file) - def _abc_registry_clear(cls): + def _abc_registry_clear(self): """Clear the registry (for debugging or testing).""" - cls._abc_registry.clear() + self._abc_registry.clear() - def _abc_caches_clear(cls): + def _abc_caches_clear(self): """Clear the caches (for debugging or testing).""" - cls._abc_cache.clear() - cls._abc_negative_cache.clear() + self._abc_cache.clear() + self._abc_negative_cache.clear() - def __instancecheck__(cls, instance): + def __instancecheck__(self, instance): """Override for isinstance(instance, cls).""" # Inline the cache checking subclass = instance.__class__ - if subclass in cls._abc_cache: + if subclass in self._abc_cache: return True subtype = type(instance) if subtype is subclass: - if (cls._abc_negative_cache_version == - ABCMeta._abc_invalidation_counter and - subclass in cls._abc_negative_cache): + if ( + self._abc_negative_cache_version + == ABCMeta._abc_invalidation_counter + and subclass in self._abc_negative_cache + ): return False # Fall back to the subclass check. - return cls.__subclasscheck__(subclass) - return any(cls.__subclasscheck__(c) for c in (subclass, subtype)) + return self.__subclasscheck__(subclass) + return any(self.__subclasscheck__(c) for c in (subclass, subtype)) - def __subclasscheck__(cls, subclass): + def __subclasscheck__(self, subclass): """Override for issubclass(subclass, cls).""" if not isinstance(subclass, type): raise TypeError('issubclass() arg 1 must be a class') # Check cache - if subclass in cls._abc_cache: + if subclass in self._abc_cache: return True # Check negative cache; may have to invalidate - if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter: + if self._abc_negative_cache_version < ABCMeta._abc_invalidation_counter: # Invalidate the negative cache - cls._abc_negative_cache = WeakSet() - cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter - elif subclass in cls._abc_negative_cache: + self._abc_negative_cache = WeakSet() + self._abc_negative_cache_version = ABCMeta._abc_invalidation_counter + elif subclass in self._abc_negative_cache: return False # Check the subclass hook - ok = cls.__subclasshook__(subclass) + ok = self.__subclasshook__(subclass) if ok is not NotImplemented: assert isinstance(ok, bool) if ok: - cls._abc_cache.add(subclass) + self._abc_cache.add(subclass) else: - cls._abc_negative_cache.add(subclass) + self._abc_negative_cache.add(subclass) return ok # Check if it's a direct subclass - if cls in getattr(subclass, '__mro__', ()): - cls._abc_cache.add(subclass) + if self in getattr(subclass, '__mro__', ()): + self._abc_cache.add(subclass) return True # Check if it's a subclass of a registered class (recursive) - for rcls in cls._abc_registry: + for rcls in self._abc_registry: if issubclass(subclass, rcls): - cls._abc_cache.add(subclass) + self._abc_cache.add(subclass) return True # Check if it's a subclass of a subclass (recursive) - for scls in cls.__subclasses__(): + for scls in self.__subclasses__(): if issubclass(subclass, scls): - cls._abc_cache.add(subclass) + self._abc_cache.add(subclass) return True # No dice; update negative cache - cls._abc_negative_cache.add(subclass) + self._abc_negative_cache.add(subclass) return False diff --git a/Lib/_pycodecs.py b/Lib/_pycodecs.py index 0741504cc9..56721bdb9e 100644 --- a/Lib/_pycodecs.py +++ b/Lib/_pycodecs.py @@ -76,10 +76,7 @@ def mbcs_decode(): def readbuffer_encode( obj, errors='strict'): """None """ - if isinstance(obj, str): - res = obj.encode() - else: - res = bytes(obj) + res = obj.encode() if isinstance(obj, str) else bytes(obj) return res, len(obj) def escape_encode( obj, errors='strict'): @@ -157,10 +154,7 @@ def charmap_encode(obj, errors='strict', mapping='latin-1'): def charmap_build(s): return {ord(c): i for i, c in enumerate(s)} -if sys.maxunicode == 65535: - unicode_bytes = 2 -else: - unicode_bytes = 4 +unicode_bytes = 2 if sys.maxunicode == 65535 else 4 def unicode_internal_encode( obj, errors='strict'): """None @@ -170,44 +164,42 @@ def unicode_internal_encode( obj, errors='strict'): t = [ord(x) for x in obj] for i in t: b = bytearray() - for j in range(unicode_bytes): + for _ in range(unicode_bytes): b.append(i%256) i >>= 8 if sys.byteorder == "big": b.reverse() p += b res = bytes(p) - return res, len(res) else: res = "You can do better than this" # XXX make this right - return res, len(res) + + return res, len(res) def unicode_internal_decode( unistr, errors='strict'): """None """ if type(unistr) == str: return unistr, len(unistr) + p = [] + i = 0 + if sys.byteorder == "big": + start = unicode_bytes - 1 + stop = -1 + step = -1 else: - p = [] - i = 0 - if sys.byteorder == "big": - start = unicode_bytes - 1 - stop = -1 - step = -1 - else: - start = 0 - stop = unicode_bytes - step = 1 - while i < len(unistr)-unicode_bytes+1: - t = 0 - h = 0 - for j in range(start, stop, step): - t += ord(unistr[i+j])<<(h*8) - h += 1 - i += unicode_bytes - p += chr(t) - res = ''.join(p) - return res, len(res) + start = 0 + stop = unicode_bytes + step = 1 + while i < len(unistr)-unicode_bytes+1: + t = sum( + ord(unistr[i + j]) << (h * 8) + for h, j in enumerate(range(start, stop, step)) + ) + i += unicode_bytes + p += chr(t) + res = ''.join(p) + return res, len(res) def utf_16_ex_decode( data, errors='strict', byteorder=0, final=0): """None @@ -238,37 +230,36 @@ def escape_decode(data, errors='strict'): i += 1 if i >= l: raise ValueError("Trailing \\ in string") - else: - if data[i] == '\\': - res += b'\\' - elif data[i] == 'n': - res += b'\n' - elif data[i] == 't': - res += b'\t' - elif data[i] == 'r': - res += b'\r' - elif data[i] == 'b': - res += b'\b' - elif data[i] == '\'': - res += b'\'' - elif data[i] == '\"': - res += b'\"' - elif data[i] == 'f': - res += b'\f' - elif data[i] == 'a': - res += b'\a' - elif data[i] == 'v': - res += b'\v' - elif '0' <= data[i] <= '9': - # emulate a strange wrap-around behavior of CPython: - # \400 is the same as \000 because 0400 == 256 - octal = data[i:i+3] - res.append(int(octal, 8) & 0xFF) - i += 2 - elif data[i] == 'x': - hexa = data[i+1:i+3] - res.append(int(hexa, 16)) - i += 2 + if data[i] == '\\': + res += b'\\' + elif data[i] == 'n': + res += b'\n' + elif data[i] == 't': + res += b'\t' + elif data[i] == 'r': + res += b'\r' + elif data[i] == 'b': + res += b'\b' + elif data[i] == '\'': + res += b'\'' + elif data[i] == '\"': + res += b'\"' + elif data[i] == 'f': + res += b'\f' + elif data[i] == 'a': + res += b'\a' + elif data[i] == 'v': + res += b'\v' + elif '0' <= data[i] <= '9': + # emulate a strange wrap-around behavior of CPython: + # \400 is the same as \000 because 0400 == 256 + octal = data[i:i+3] + res.append(int(octal, 8) & 0xFF) + i += 2 + elif data[i] == 'x': + hexa = data[i+1:i+3] + res.append(int(hexa, 16)) + i += 2 else: res.append(data[i]) i += 1 @@ -436,20 +427,20 @@ def PyUnicode_DecodeUTF7(s, size, errors): while i < size: ch = bytes([s[i]]) - if (inShift): + if inShift: + i += 1 + if ((ch == b'-') or not B64CHAR(ch)): inShift = 0 - i += 1 - while (bitsleft >= 16): outCh = ((charsleft) >> (bitsleft-16)) & 0xffff bitsleft -= 16 - - if (surrogate): + + if surrogate: ## We have already generated an error for the high surrogate ## so let's not bother seeing if the low surrogate is correct or not surrogate = 0 - elif (0xDC00 <= (outCh) and (outCh) <= 0xDFFF): + elif 0xDC00 <= outCh <= 0xDFFF: ## This is a surrogate pair. Unfortunately we can't represent ## it in a 16-bit character surrogate = 1 @@ -467,7 +458,7 @@ def PyUnicode_DecodeUTF7(s, size, errors): ## but that is not the case here */ msg = "partial character in shift sequence" out, x = unicode_call_errorhandler(errors, 'utf-7', msg, s, i-1, i) - + ## /* According to RFC2152 the remaining bits should be zero. We ## choose to signal an error/insert a replacement character ## here so indicate the potential of a misencoded character. */ @@ -479,17 +470,15 @@ def PyUnicode_DecodeUTF7(s, size, errors): if ((i < size) and (s[i] == '-')) : p += '-' inShift = 1 - + elif SPECIAL(ch, 0, 0) : raise UnicodeDecodeError("unexpected special character") - + else: p.append(chr(ord(ch))) else: charsleft = (charsleft << 6) | UB64(ch) bitsleft += 6 - i += 1 -## /* p, charsleft, bitsleft, surrogate = */ DECODE(p, charsleft, bitsleft, surrogate); elif ( ch == b'+' ): startinpos = i i += 1 @@ -499,7 +488,7 @@ def PyUnicode_DecodeUTF7(s, size, errors): else: inShift = 1 bitsleft = 0 - + elif (SPECIAL(ch, 0, 0)): i += 1 raise UnicodeDecodeError("unexpected special character") @@ -511,31 +500,18 @@ def PyUnicode_DecodeUTF7(s, size, errors): #XXX This aint right endinpos = size raise UnicodeDecodeError("unterminated shift sequence") - + return p def PyUnicode_EncodeUTF7(s, size, encodeSetO, encodeWhiteSpace, errors): # /* It might be possible to tighten this worst case */ inShift = False - i = 0 bitsleft = 0 charsleft = 0 out = [] - for ch in s: - if (not inShift) : - if (ch == '+'): - out.append(b'+-') - elif (SPECIAL(ch, encodeSetO, encodeWhiteSpace)): - charsleft = ord(ch) - bitsleft = 16 - out.append(b'+') - p, bitsleft = ENCODE( charsleft, bitsleft) - out.append(p) - inShift = bitsleft > 0 - else: - out.append(bytes([ord(ch)])) - else: + for i, ch in enumerate(s): + if inShift: if (not SPECIAL(ch, encodeSetO, encodeWhiteSpace)): out.append(B64((charsleft) << (6-bitsleft))) charsleft = 0 @@ -570,12 +546,19 @@ def PyUnicode_EncodeUTF7(s, size, encodeSetO, encodeWhiteSpace, errors): else: out.append(b'-') inShift = False - i += 1 - - if (bitsleft): - out.append(B64(charsleft << (6-bitsleft) ) ) - out.append(b'-') - + elif (ch == '+'): + out.append(b'+-') + elif (SPECIAL(ch, encodeSetO, encodeWhiteSpace)): + charsleft = ord(ch) + bitsleft = 16 + out.append(b'+') + p, bitsleft = ENCODE( charsleft, bitsleft) + out.append(p) + inShift = bitsleft > 0 + else: + out.append(bytes([ord(ch)])) + if bitsleft: + out.extend((B64(charsleft << (6-bitsleft) ), b'-')) return out unicode_empty = '' @@ -592,40 +575,36 @@ def unicodeescape_string(s, size, quotes): while (pos < size): ch = s[pos] #/* Escape quotes */ - if (quotes and (ch == p[1] or ch == '\\')): + if quotes and ch in [p[1], '\\']: p.append(b'\\%c' % ord(ch)) pos += 1 continue -#ifdef Py_UNICODE_WIDE - #/* Map 21-bit characters to '\U00xxxxxx' */ elif (ord(ch) >= 0x10000): p.append(b'\\U%08x' % ord(ch)) pos += 1 - continue -#endif - #/* Map UTF-16 surrogate pairs to Unicode \UXXXXXXXX escapes */ + continue elif (ord(ch) >= 0xD800 and ord(ch) < 0xDC00): pos += 1 ch2 = s[pos] - + if (ord(ch2) >= 0xDC00 and ord(ch2) <= 0xDFFF): ucs = (((ord(ch) & 0x03FF) << 10) | (ord(ch2) & 0x03FF)) + 0x00010000 p.append(b'\\U%08x' % ucs) pos += 1 continue - + #/* Fall through: isolated surrogates are copied as-is */ pos -= 1 - + #/* Map 16-bit characters to '\uxxxx' */ if (ord(ch) >= 256): p.append(b'\\u%04x' % ord(ch)) - + #/* Map special whitespace to '\t', \n', '\r' */ elif (ch == '\t'): p.append(b'\\t') - + elif (ch == '\n'): p.append(b'\\n') @@ -675,7 +654,7 @@ def PyUnicode_EncodeASCII(p, size, errors): def PyUnicode_AsASCIIString(unistr): - if not type(unistr) == str: + if type(unistr) != str: raise TypeError return PyUnicode_EncodeASCII(str(unistr), len(str), @@ -693,7 +672,7 @@ def PyUnicode_DecodeUTF16Stateful(s, size, errors, byteorder='native', final=Tru else: ihi = 0 ilo = 1 - + #/* Unpack UTF-16 encoded data */ @@ -703,32 +682,35 @@ def PyUnicode_DecodeUTF16Stateful(s, size, errors, byteorder='native', final=Tru ## stream as-is (giving a ZWNBSP character). */ q = 0 p = [] - if byteorder == 'native': + if byteorder == 'little': + bo = -1 + elif byteorder == 'native': if (size >= 2): bom = (s[ihi] << 8) | s[ilo] #ifdef BYTEORDER_IS_LITTLE_ENDIAN - if sys.byteorder == 'little': - if (bom == 0xFEFF): - q += 2 - bo = -1 - elif bom == 0xFFFE: - q += 2 - bo = 1 - else: - if bom == 0xFEFF: - q += 2 - bo = 1 - elif bom == 0xFFFE: - q += 2 - bo = -1 - elif byteorder == 'little': - bo = -1 + if ( + sys.byteorder == 'little' + and bom == 0xFEFF + or sys.byteorder != 'little' + and bom != 0xFEFF + and bom == 0xFFFE + ): + q += 2 + bo = -1 + elif ( + sys.byteorder == 'little' + and bom == 0xFFFE + or sys.byteorder != 'little' + and bom == 0xFEFF + ): + q += 2 + bo = 1 else: bo = 1 - + if (size == 0): return [''], 0, bo - + if (bo == -1): #/* force LE */ ihi = 1 @@ -740,7 +722,7 @@ def PyUnicode_DecodeUTF16Stateful(s, size, errors, byteorder='native', final=Tru ilo = 1 while (q < len(s)): - + #/* remaining bytes at the end? (size should be even) */ if (len(s)-q<2): if not final: @@ -751,14 +733,14 @@ def PyUnicode_DecodeUTF16Stateful(s, size, errors, byteorder='native', final=Tru unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True) # /* The remaining input chars are ignored if the callback ## chooses to skip the input */ - + ch = (s[q+ihi] << 8) | s[q+ilo] q += 2 - + if (ch < 0xD800 or ch > 0xDFFF): p.append(chr(ch)) continue - + #/* UTF-16 code pair: */ if (q >= len(s)): errmsg = "unexpected end of data" @@ -766,10 +748,10 @@ def PyUnicode_DecodeUTF16Stateful(s, size, errors, byteorder='native', final=Tru endinpos = len(s) unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True) - if (0xD800 <= ch and ch <= 0xDBFF): + if ch <= 0xDBFF: ch2 = (s[q+ihi] << 8) | s[q+ilo] q += 2 - if (0xDC00 <= ch2 and ch2 <= 0xDFFF): + if 0xDC00 <= ch2 <= 0xDFFF: #ifndef Py_UNICODE_WIDE if sys.maxunicode < 65536: p += [chr(ch), chr(ch2)] @@ -783,12 +765,12 @@ def PyUnicode_DecodeUTF16Stateful(s, size, errors, byteorder='native', final=Tru startinpos = q-4 endinpos = startinpos+2 unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True) - + errmsg = "illegal encoding" startinpos = q-2 endinpos = startinpos+2 unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True) - + return p, q, bo # moved out of local scope, especially because it didn't @@ -797,10 +779,7 @@ def PyUnicode_DecodeUTF16Stateful(s, size, errors, byteorder='native', final=Tru def STORECHAR(CH, byteorder): hi = (CH >> 8) & 0xff lo = CH & 0xff - if byteorder == 'little': - return [lo, hi] - else: - return [hi, lo] + return [lo, hi] if byteorder == 'little' else [hi, lo] def PyUnicode_EncodeUTF16(s, size, errors, byteorder='little'): @@ -860,7 +839,9 @@ def unicode_call_errorhandler(errors, encoding, raise IndexError( "position %d from error handler out of bounds" % newpos) return res[0], newpos else: - raise TypeError("encoding error handler must return (unicode, int) tuple, not %s" % repr(res)) + raise TypeError( + f"encoding error handler must return (unicode, int) tuple, not {repr(res)}" + ) #/* --- Latin-1 Codec ------------------------------------------------------ */ @@ -908,8 +889,7 @@ def unicode_encode_ucs1(p, size, errors, limit): return res def PyUnicode_EncodeLatin1(p, size, errors): - res = unicode_encode_ucs1(p, size, errors, 256) - return res + return unicode_encode_ucs1(p, size, errors, 256) hexdigits = [ord(hex(i)[-1]) for i in range(16)]+[ord(hex(i)[-1].upper()) for i in range(10, 16)] @@ -936,8 +916,7 @@ def hexescape(s, pos, digits, message, errors): elif (ch <= 0x10ffff): ch -= 0x10000 - p.append(chr(0xD800 + (ch >> 10))) - p.append(chr(0xDC00 + (ch & 0x03FF))) + p.extend((chr(0xD800 + (ch >> 10)), chr(0xDC00 + (ch & 0x03FF)))) pos += digits else: message = "illegal Unicode character" diff --git a/Lib/_pydecimal.py b/Lib/_pydecimal.py index e7df67dc9b..2bf390e9ed 100644 --- a/Lib/_pydecimal.py +++ b/Lib/_pydecimal.py @@ -596,10 +596,7 @@ def __new__(cls, value="0", context=None): return context._raise_error(ConversionSyntax, "Invalid literal for Decimal: %r" % value) - if m.group('sign') == "-": - self._sign = 1 - else: - self._sign = 0 + self._sign = 1 if m.group('sign') == "-" else 0 intpart = m.group('int') if intpart is not None: # finite number @@ -613,10 +610,7 @@ def __new__(cls, value="0", context=None): if diag is not None: # NaN self._int = str(int(diag or '0')).lstrip('0') - if m.group('signal'): - self._exp = 'N' - else: - self._exp = 'n' + self._exp = 'N' if m.group('signal') else 'n' else: # infinity self._int = '0' @@ -626,10 +620,7 @@ def __new__(cls, value="0", context=None): # From an integer if isinstance(value, int): - if value >= 0: - self._sign = 0 - else: - self._sign = 1 + self._sign = 0 if value >= 0 else 1 self._exp = 0 self._int = str(abs(value)) self._is_special = False @@ -658,7 +649,7 @@ def __new__(cls, value="0", context=None): 'from list or tuple. The list or tuple ' 'should have exactly three elements.') # process sign. The isinstance test rejects floats - if not (isinstance(value[0], int) and value[0] in (0,1)): + if not isinstance(value[0], int) or value[0] not in (0, 1): raise ValueError("Invalid sign. The first value in the tuple " "should be an integer; either 0 for a " "positive number or 1 for a negative number.") @@ -672,14 +663,13 @@ def __new__(cls, value="0", context=None): # process and validate the digits in value[1] digits = [] for digit in value[1]: - if isinstance(digit, int) and 0 <= digit <= 9: - # skip leading zeros - if digits or digit != 0: - digits.append(digit) - else: + if not isinstance(digit, int) or not 0 <= digit <= 9: raise ValueError("The second value in the tuple must " "be composed of integers in the range " "0 through 9.") + # skip leading zeros + if digits or digit != 0: + digits.append(digit) if value[2] in ('n', 'N'): # NaN: digits form the diagnostic self._int = ''.join(map(str, digits)) @@ -739,17 +729,11 @@ def from_float(cls, f): raise TypeError("argument must be int or float.") if _math.isinf(f) or _math.isnan(f): return cls(repr(f)) - if _math.copysign(1.0, f) == 1.0: - sign = 0 - else: - sign = 1 + sign = 0 if _math.copysign(1.0, f) == 1.0 else 1 n, d = abs(f).as_integer_ratio() k = d.bit_length() - 1 result = _dec_from_triple(sign, str(n*5**k), -k) - if cls is Decimal: - return result - else: - return cls(result) + return result if cls is Decimal else cls(result) def _isnan(self): """Returns whether the number is not actually one. @@ -774,9 +758,7 @@ def _isinfinity(self): -1 if -INF """ if self._exp == 'F': - if self._sign: - return -1 - return 1 + return -1 if self._sign else 1 return 0 def _check_nans(self, other=None, context=None): @@ -790,11 +772,7 @@ def _check_nans(self, other=None, context=None): """ self_is_nan = self._isnan() - if other is None: - other_is_nan = False - else: - other_is_nan = other._isnan() - + other_is_nan = False if other is None else other._isnan() if self_is_nan or other_is_nan: if context is None: context = getcontext() @@ -805,10 +783,7 @@ def _check_nans(self, other=None, context=None): if other_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', other) - if self_is_nan: - return self._fix_nan(context) - - return other._fix_nan(context) + return self._fix_nan(context) if self_is_nan else other._fix_nan(context) return 0 def _compare_check_nans(self, other, context): @@ -869,10 +844,7 @@ def _cmp(self, other): # check for zeros; Decimal('0') == Decimal('-0') if not self: - if not other: - return 0 - else: - return -((-1)**other._sign) + return 0 if not other else -((-1)**other._sign) if not other: return (-1)**self._sign @@ -919,16 +891,13 @@ def __eq__(self, other, context=None): self, other = _convert_for_comparison(self, other, equality_op=True) if other is NotImplemented: return other - if self._check_nans(other, context): - return False - return self._cmp(other) == 0 + return False if self._check_nans(other, context) else self._cmp(other) == 0 def __lt__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - ans = self._compare_check_nans(other, context) - if ans: + if ans := self._compare_check_nans(other, context): return False return self._cmp(other) < 0 @@ -936,8 +905,7 @@ def __le__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - ans = self._compare_check_nans(other, context) - if ans: + if ans := self._compare_check_nans(other, context): return False return self._cmp(other) <= 0 @@ -945,8 +913,7 @@ def __gt__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - ans = self._compare_check_nans(other, context) - if ans: + if ans := self._compare_check_nans(other, context): return False return self._cmp(other) > 0 @@ -954,8 +921,7 @@ def __ge__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - ans = self._compare_check_nans(other, context) - if ans: + if ans := self._compare_check_nans(other, context): return False return self._cmp(other) >= 0 @@ -971,8 +937,7 @@ def compare(self, other, context=None): # Compare(NaN, NaN) = NaN if (self._is_special or other and other._is_special): - ans = self._check_nans(other, context) - if ans: + if ans := self._check_nans(other, context): return ans return Decimal(self._cmp(other)) @@ -990,11 +955,7 @@ def __hash__(self): elif self.is_nan(): return _PyHASH_NAN else: - if self._sign: - return -_PyHASH_INF - else: - return _PyHASH_INF - + return -_PyHASH_INF if self._sign else _PyHASH_INF if self._exp >= 0: exp_hash = pow(10, self._exp, _PyHASH_MODULUS) else: @@ -1049,8 +1010,7 @@ def as_integer_ratio(self): # (n & -n).bit_length() - 1 counts trailing zeros in binary # representation of n (provided n is nonzero). d2 = -self._exp - shift2 = min((n & -n).bit_length() - 1, d2) - if shift2: + if shift2 := min((n & -n).bit_length() - 1, d2): n >>= shift2 d2 -= shift2 @@ -1063,7 +1023,7 @@ def as_integer_ratio(self): def __repr__(self): """Represents the number as an instance of Decimal.""" # Invariant: eval(repr(d)) == d - return "Decimal('%s')" % str(self) + return f"Decimal('{str(self)}')" def __str__(self, eng=False, context=None): """Return string representation of the number in scientific notation. @@ -1074,11 +1034,11 @@ def __str__(self, eng=False, context=None): sign = ['', '-'][self._sign] if self._is_special: if self._exp == 'F': - return sign + 'Infinity' + return f'{sign}Infinity' elif self._exp == 'n': - return sign + 'NaN' + self._int + return f'{sign}NaN{self._int}' else: # self._exp == 'N' - return sign + 'sNaN' + self._int + return f'{sign}sNaN{self._int}' # number of digits of self._int to left of decimal point leftdigits = self._exp + len(self._int) @@ -1107,7 +1067,7 @@ def __str__(self, eng=False, context=None): fracpart = '' else: intpart = self._int[:dotplace] - fracpart = '.' + self._int[dotplace:] + fracpart = f'.{self._int[dotplace:]}' if leftdigits == dotplace: exp = '' else: @@ -1139,13 +1099,13 @@ def __neg__(self, context=None): if context is None: context = getcontext() - if not self and context.rounding != ROUND_FLOOR: + if self or context.rounding == ROUND_FLOOR: + ans = self.copy_negate() + + else: # -Decimal('0') is Decimal('0'), not Decimal('-0'), except # in ROUND_FLOOR rounding mode. ans = self.copy_abs() - else: - ans = self.copy_negate() - return ans._fix(context) def __pos__(self, context=None): @@ -1161,12 +1121,12 @@ def __pos__(self, context=None): if context is None: context = getcontext() - if not self and context.rounding != ROUND_FLOOR: - # + (-0) = 0, except in ROUND_FLOOR rounding mode. - ans = self.copy_abs() - else: + if self or context.rounding == ROUND_FLOOR: ans = Decimal(self) + else: + # + (-0) = 0, except in ROUND_FLOOR rounding mode. + ans = self.copy_abs() return ans._fix(context) def __abs__(self, round=True, context=None): @@ -1223,9 +1183,7 @@ def __add__(self, other, context=None): negativezero = 1 if not self and not other: - sign = min(self._sign, other._sign) - if negativezero: - sign = 1 + sign = 1 if negativezero else min(self._sign, other._sign) ans = _dec_from_triple(sign, '0', exp) ans = ans._fix(context) return ans @@ -1267,11 +1225,7 @@ def __add__(self, other, context=None): result.sign = 0 # Now, op1 > abs(op2) > 0 - if op2.sign == 0: - result.int = op1.int + op2.int - else: - result.int = op1.int - op2.int - + result.int = op1.int + op2.int if op2.sign == 0 else op1.int - op2.int result.exp = op1.exp ans = Decimal(result) ans = ans._fix(context) @@ -1286,8 +1240,7 @@ def __sub__(self, other, context=None): return other if self._is_special or other._is_special: - ans = self._check_nans(other, context=context) - if ans: + if ans := self._check_nans(other, context=context): return ans # self - other is computed as self + other.copy_negate() @@ -1424,11 +1377,7 @@ def _divide(self, other, context): infinite and that other is nonzero. """ sign = self._sign ^ other._sign - if other._isinfinity(): - ideal_exp = self._exp - else: - ideal_exp = min(self._exp, other._exp) - + ideal_exp = self._exp if other._isinfinity() else min(self._exp, other._exp) expdiff = self.adjusted() - other.adjusted() if not self or other._isinfinity() or expdiff <= -2: return (_dec_from_triple(sign, '0', 0), @@ -1468,27 +1417,24 @@ def __divmod__(self, other, context=None): if context is None: context = getcontext() - ans = self._check_nans(other, context) - if ans: + if ans := self._check_nans(other, context): return (ans, ans) sign = self._sign ^ other._sign if self._isinfinity(): - if other._isinfinity(): - ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)') - return ans, ans - else: + if not other._isinfinity(): return (_SignedInfinity[sign], context._raise_error(InvalidOperation, 'INF % x')) + ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)') + return ans, ans if not other: - if not self: - ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)') - return ans, ans - else: + if self: return (context._raise_error(DivisionByZero, 'x // 0', sign), context._raise_error(InvalidOperation, 'x % 0')) + ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)') + return ans, ans quotient, remainder = self._divide(other, context) remainder = remainder._fix(context) return quotient, remainder @@ -1511,8 +1457,7 @@ def __mod__(self, other, context=None): if context is None: context = getcontext() - ans = self._check_nans(other, context) - if ans: + if ans := self._check_nans(other, context): return ans if self._isinfinity(): @@ -1618,8 +1563,7 @@ def __floordiv__(self, other, context=None): if context is None: context = getcontext() - ans = self._check_nans(other, context) - if ans: + if ans := self._check_nans(other, context): return ans if self._isinfinity(): diff --git a/Lib/_pyio.py b/Lib/_pyio.py index 56e9a0cb33..df0e8148b2 100644 --- a/Lib/_pyio.py +++ b/Lib/_pyio.py @@ -271,10 +271,8 @@ def open(file, mode="r", buffering=-1, encoding=None, errors=None, buffer = BufferedRandom(raw, buffering) elif creating or writing or appending: buffer = BufferedWriter(raw, buffering) - elif reading: - buffer = BufferedReader(raw, buffering) else: - raise ValueError("unknown mode: %r" % mode) + buffer = BufferedReader(raw, buffering) result = buffer if binary: return result @@ -373,8 +371,7 @@ class IOBase(metaclass=abc.ABCMeta): def _unsupported(self, name): """Internal: raise an OSError exception for unsupported operations.""" - raise UnsupportedOperation("%s.%s() not supported" % - (self.__class__.__name__, name)) + raise UnsupportedOperation(f"{self.__class__.__name__}.{name}() not supported") ### Positioning ### @@ -593,10 +590,10 @@ def __iter__(self): return self def __next__(self): - line = self.readline() - if not line: + if line := self.readline(): + return line + else: raise StopIteration - return line def readlines(self, hint=None): """Return a list of lines from the stream. @@ -668,11 +665,7 @@ def readall(self): if not data: break res += data - if res: - return bytes(res) - else: - # b'' or None - return data + return bytes(res) if res else data def readinto(self, b): """Read bytes into a pre-allocated bytes-like object b. @@ -768,10 +761,7 @@ def _readinto(self, b, read1): b = memoryview(b) b = b.cast('B') - if read1: - data = self.read1(len(b)) - else: - data = self.read(len(b)) + data = self.read1(len(b)) if read1 else self.read(len(b)) n = len(data) b[:n] = data @@ -895,7 +885,7 @@ def __repr__(self): try: name = self.name except AttributeError: - return "<{}.{}>".format(modname, clsname) + return f"<{modname}.{clsname}>" else: return "<{}.{} name={!r}>".format(modname, clsname, name) @@ -1106,10 +1096,7 @@ def _read_unlocked(self, n=None): self._reset_read_buf() if hasattr(self.raw, 'readall'): chunk = self.raw.readall() - if chunk is None: - return buf[pos:] or None - else: - return buf[pos:] + chunk + return buf[pos:] or None if chunk is None else buf[pos:] + chunk chunks = [buf[pos:]] # Strip the consumed bytes. current_size = 0 while True: @@ -1162,8 +1149,7 @@ def _peek_unlocked(self, n=0): have = len(self._read_buf) - self._read_pos if have < want or have <= 0: to_read = self.buffer_size - have - current = self.raw.read(to_read) - if current: + if current := self.raw.read(to_read): self._read_buf = self._read_buf[self._read_pos:] + current self._read_pos = 0 return self._read_buf[self._read_pos:] @@ -1202,11 +1188,9 @@ def _readinto(self, buf, read1): with self._read_lock: while written < len(buf): - # First try to read from internal buffer - avail = min(len(self._read_buf) - self._read_pos, len(buf)) - if avail: + if avail := min(len(self._read_buf) - self._read_pos, len(buf)): buf[written:written+avail] = \ - self._read_buf[self._read_pos:self._read_pos+avail] + self._read_buf[self._read_pos:self._read_pos+avail] self._read_pos += avail written += avail if written == len(buf): @@ -1215,14 +1199,12 @@ def _readinto(self, buf, read1): # If remaining space in callers buffer is larger than # internal buffer, read directly into callers buffer if len(buf) - written > self.buffer_size: - n = self.raw.readinto(buf[written:]) - if not n: - break # eof - written += n + if n := self.raw.readinto(buf[written:]): + written += n - # Otherwise refill internal buffer - unless we're - # in read1 mode and already got some data - elif not (read1 and written): + else: + break # eof + elif not read1 or not written: if not self._peek_unlocked(1): break # eof @@ -1537,9 +1519,9 @@ def __init__(self, file, mode='r', closefd=True, opener=None): fd = -1 if not isinstance(mode, str): - raise TypeError('invalid mode: %s' % (mode,)) + raise TypeError(f'invalid mode: {mode}') if not set(mode) <= set('xrwab+'): - raise ValueError('invalid mode: %s' % (mode,)) + raise ValueError(f'invalid mode: {mode}') if sum(c in 'rwax' for c in mode) != 1 or mode.count('+') > 1: raise ValueError('Must have exactly one of create/read/write/append ' 'mode and at most one plus') @@ -1591,8 +1573,7 @@ def __init__(self, file, mode='r', closefd=True, opener=None): raise OSError('Negative file descriptor') owned_fd = fd if not noinherit_flag: - os.set_inheritable(fd, False) - + os.set_inheritable(owned_fd, False) self._closefd = closefd fdfstat = os.fstat(fd) try: @@ -1638,10 +1619,9 @@ def __getstate__(self): raise TypeError(f"cannot pickle {self.__class__.__name__!r} object") def __repr__(self): - class_name = '%s.%s' % (self.__class__.__module__, - self.__class__.__qualname__) + class_name = f'{self.__class__.__module__}.{self.__class__.__qualname__}' if self.closed: - return '<%s [closed]>' % class_name + return f'<{class_name} [closed]>' try: name = self.name except AttributeError: @@ -1822,20 +1802,11 @@ def closefd(self): def mode(self): """String giving the file mode""" if self._created: - if self._readable: - return 'xb+' - else: - return 'xb' + return 'xb+' if self._readable else 'xb' elif self._appending: - if self._readable: - return 'ab+' - else: - return 'ab' + return 'ab+' if self._readable else 'ab' elif self._readable: - if self._writable: - return 'rb+' - else: - return 'rb' + return 'rb+' if self._writable else 'rb' else: return 'wb' @@ -2113,8 +2084,7 @@ def _configure(self, encoding=None, errors=None, newline=None, # - "chars_..." for integer variables that count decoded characters def __repr__(self): - result = "<{}.{}".format(self.__class__.__module__, - self.__class__.__qualname__) + result = f"<{self.__class__.__module__}.{self.__class__.__qualname__}" try: name = self.name except AttributeError: @@ -2164,18 +2134,14 @@ def reconfigure(self, *, "after the first read") if errors is None: - if encoding is None: - errors = self._errors - else: - errors = 'strict' + errors = self._errors if encoding is None else 'strict' elif not isinstance(errors, str): raise TypeError("invalid errors: %r" % errors) if encoding is None: encoding = self._encoding - else: - if not isinstance(encoding, str): - raise TypeError("invalid encoding: %r" % encoding) + elif not isinstance(encoding, str): + raise TypeError("invalid encoding: %r" % encoding) if newline is Ellipsis: newline = self._readnl @@ -2231,8 +2197,7 @@ def write(self, s): if self.closed: raise ValueError("write to closed file") if not isinstance(s, str): - raise TypeError("can't write %s to text stream" % - s.__class__.__name__) + raise TypeError(f"can't write {s.__class__.__name__} to text stream") length = len(s) haslf = (self._writetranslate or self._line_buffering) and "\n" in s if haslf and self._writetranslate and self._writenl != "\n": @@ -2551,7 +2516,6 @@ def read(self, size=None): decoder.decode(self.buffer.read(), final=True)) self._set_decoded_chars('') self._snapshot = None - return result else: # Keep reading chunks until we have size characters to return. eof = False @@ -2559,7 +2523,8 @@ def read(self, size=None): while len(result) < size and not eof: eof = not self._read_chunk() result += self._get_decoded_chars(size - len(result)) - return result + + return result def __next__(self): self._telling = False diff --git a/Lib/_sitebuiltins.py b/Lib/_sitebuiltins.py index 3e07ead16e..c699ee6d21 100644 --- a/Lib/_sitebuiltins.py +++ b/Lib/_sitebuiltins.py @@ -16,7 +16,7 @@ def __init__(self, name, eof): self.name = name self.eof = eof def __repr__(self): - return 'Use %s() or %s to exit' % (self.name, self.eof) + return f'Use {self.name}() or {self.eof} to exit' def __call__(self, code=None): # Shells like IDLE catch the SystemExit, but listen when their # stdin wrapper is closed. diff --git a/Lib/_threading_local.py b/Lib/_threading_local.py index e520433998..ca5dc3e351 100644 --- a/Lib/_threading_local.py +++ b/Lib/_threading_local.py @@ -156,7 +156,7 @@ def __init__(self): # The key used in the Thread objects' attribute dicts. # We keep it a string for speed but make it unlikely to clash with # a "real" attribute. - self.key = '_threading_local._localimpl.' + str(id(self)) + self.key = f'_threading_local._localimpl.{id(self)}' # { id(Thread) -> (ref(Thread), thread-local dict) } self.dicts = {} diff --git a/Lib/abc.py b/Lib/abc.py index bfccab2dfc..b6dbe1684c 100644 --- a/Lib/abc.py +++ b/Lib/abc.py @@ -111,40 +111,44 @@ def __new__(mcls, name, bases, namespace, /, **kwargs): _abc_init(cls) return cls - def register(cls, subclass): + def register(self, subclass): """Register a virtual subclass of an ABC. Returns the subclass, to allow usage as a class decorator. """ - return _abc_register(cls, subclass) + return _abc_register(self, subclass) - def __instancecheck__(cls, instance): + def __instancecheck__(self, instance): """Override for isinstance(instance, cls).""" - return _abc_instancecheck(cls, instance) + return _abc_instancecheck(self, instance) - def __subclasscheck__(cls, subclass): + def __subclasscheck__(self, subclass): """Override for issubclass(subclass, cls).""" - return _abc_subclasscheck(cls, subclass) + return _abc_subclasscheck(self, subclass) - def _dump_registry(cls, file=None): + def _dump_registry(self, file=None): """Debug helper to print the ABC registry.""" - print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file) + print(f"Class: {self.__module__}.{self.__qualname__}", file=file) print(f"Inv. counter: {get_cache_token()}", file=file) - (_abc_registry, _abc_cache, _abc_negative_cache, - _abc_negative_cache_version) = _get_dump(cls) + ( + _abc_registry, + _abc_cache, + _abc_negative_cache, + _abc_negative_cache_version, + ) = _get_dump(self) print(f"_abc_registry: {_abc_registry!r}", file=file) print(f"_abc_cache: {_abc_cache!r}", file=file) print(f"_abc_negative_cache: {_abc_negative_cache!r}", file=file) print(f"_abc_negative_cache_version: {_abc_negative_cache_version!r}", file=file) - def _abc_registry_clear(cls): + def _abc_registry_clear(self): """Clear the registry (for debugging or testing).""" - _reset_registry(cls) + _reset_registry(self) - def _abc_caches_clear(cls): + def _abc_caches_clear(self): """Clear the caches (for debugging or testing).""" - _reset_caches(cls) + _reset_caches(self) def update_abstractmethods(cls): diff --git a/Lib/aifc.py b/Lib/aifc.py index 1916e7ef8e..7ce87e1a93 100644 --- a/Lib/aifc.py +++ b/Lib/aifc.py @@ -171,10 +171,7 @@ def _read_ushort(file): def _read_string(file): length = ord(file.read(1)) - if length == 0: - data = b'' - else: - data = file.read(length) + data = b'' if length == 0 else file.read(length) if length & 1 == 0: dummy = file.read(1) return data @@ -409,9 +406,7 @@ def getparams(self): self.getcomptype(), self.getcompname()) def getmarkers(self): - if len(self._markers) == 0: - return None - return self._markers + return None if len(self._markers) == 0 else self._markers def getmark(self, id): for marker in self._markers: @@ -429,8 +424,7 @@ def readframes(self, nframes): if self._ssnd_seek_needed: self._ssnd_chunk.seek(0) dummy = self._ssnd_chunk.read(8) - pos = self._soundpos * self._framesize - if pos: + if pos := self._soundpos * self._framesize: self._ssnd_chunk.seek(pos + 8) self._ssnd_seek_needed = 0 if nframes == 0: @@ -485,7 +479,7 @@ def _read_comm_chunk(self, chunk): if kludge: length = ord(chunk.file.read(1)) if length & 1 == 0: - length = length + 1 + length += 1 chunk.chunksize = chunk.chunksize + length chunk.file.seek(-1, 1) #DEBUG end @@ -509,7 +503,7 @@ def _readmark(self, chunk): # Some files appear to contain invalid counts. # Cope with this by testing for EOF. try: - for i in range(nmarkers): + for _ in range(nmarkers): id = _read_short(chunk) pos = _read_long(chunk) name = _read_string(chunk) @@ -519,9 +513,7 @@ def _readmark(self, chunk): # a position 0 and name '' self._markers.append((id, pos, name)) except EOFError: - w = ('Warning: MARK chunk contains only %s marker%s instead of %s' % - (len(self._markers), '' if len(self._markers) == 1 else 's', - nmarkers)) + w = f"Warning: MARK chunk contains only {len(self._markers)} marker{'' if len(self._markers) == 1 else 's'} instead of {nmarkers}" warnings.warn(w) class Aifc_write: @@ -714,9 +706,7 @@ def getmark(self, id): raise Error('marker {0!r} does not exist'.format(id)) def getmarkers(self): - if len(self._markers) == 0: - return None - return self._markers + return None if len(self._markers) == 0 else self._markers def tell(self): return self._nframeswritten @@ -779,20 +769,21 @@ def _lin2adpcm(self, data): return data def _ensure_header_written(self, datasize): - if not self._nframeswritten: - if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'): - if not self._sampwidth: - self._sampwidth = 2 - if self._sampwidth != 2: - raise Error('sample width must be 2 when compressing ' - 'with ulaw/ULAW, alaw/ALAW or G7.22 (ADPCM)') - if not self._nchannels: - raise Error('# channels not specified') + if self._nframeswritten: + return + if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'): if not self._sampwidth: - raise Error('sample width not specified') - if not self._framerate: - raise Error('sampling rate not specified') - self._write_header(datasize) + self._sampwidth = 2 + if self._sampwidth != 2: + raise Error('sample width must be 2 when compressing ' + 'with ulaw/ULAW, alaw/ALAW or G7.22 (ADPCM)') + if not self._nchannels: + raise Error('# channels not specified') + if not self._sampwidth: + raise Error('sample width not specified') + if not self._framerate: + raise Error('sampling rate not specified') + self._write_header(datasize) def _init_compression(self): if self._comptype == b'G722': @@ -909,10 +900,7 @@ def _writemarkers(self): def open(f, mode=None): if mode is None: - if hasattr(f, 'mode'): - mode = f.mode - else: - mode = 'rb' + mode = f.mode if hasattr(f, 'mode') else 'rb' if mode in ('r', 'rb'): return Aifc_read(f) elif mode in ('w', 'wb'): @@ -944,8 +932,8 @@ def openfp(f, mode=None): with open(gn, 'w') as g: g.setparams(f.getparams()) while 1: - data = f.readframes(1024) - if not data: + if data := f.readframes(1024): + g.writeframes(data) + else: break - g.writeframes(data) print("Done.") diff --git a/Lib/antigravity.py b/Lib/antigravity.py index 6dc5207335..a4a3b32225 100644 --- a/Lib/antigravity.py +++ b/Lib/antigravity.py @@ -13,5 +13,5 @@ def geohash(latitude, longitude, datedow): ''' # https://xkcd.com/426/ h = hashlib.md5(datedow, usedforsecurity=False).hexdigest() - p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])] + p, q = ['%f' % float.fromhex(f'0.{x}') for x in (h[:16], h[16:32])] print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:])) diff --git a/Lib/argparse.py b/Lib/argparse.py index 7761908861..ab9fbd2346 100644 --- a/Lib/argparse.py +++ b/Lib/argparse.py @@ -117,18 +117,16 @@ class _AttributeHolder(object): def __repr__(self): type_name = type(self).__name__ - arg_strings = [] star_args = {} - for arg in self._get_args(): - arg_strings.append(repr(arg)) + arg_strings = [repr(arg) for arg in self._get_args()] for name, value in self._get_kwargs(): if name.isidentifier(): arg_strings.append('%s=%r' % (name, value)) else: star_args[name] = value if star_args: - arg_strings.append('**%s' % repr(star_args)) - return '%s(%s)' % (type_name, ', '.join(arg_strings)) + arg_strings.append(f'**{repr(star_args)}') + return f"{type_name}({', '.join(arg_strings)})" def _get_kwargs(self): return list(self.__dict__.items()) @@ -263,9 +261,10 @@ def add_argument(self, action): # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] - for subaction in self._iter_indented_subactions(action): - invocations.append(get_invocation(subaction)) - + invocations.extend( + get_invocation(subaction) + for subaction in self._iter_indented_subactions(action) + ) # update the maximum item length invocation_length = max(map(len, invocations)) action_length = invocation_length + self._current_indent @@ -302,11 +301,9 @@ def _format_usage(self, usage, actions, groups, prefix): if usage is not None: usage = usage % dict(prog=self._prog) - # if no optionals or positionals are available, usage is just prog elif usage is None and not actions: usage = '%(prog)s' % dict(prog=self._prog) - # if optionals and positionals are available, calculate usage elif usage is None: prog = '%(prog)s' % dict(prog=self._prog) @@ -345,10 +342,7 @@ def _format_usage(self, usage, actions, groups, prefix): def get_lines(parts, indent, prefix=None): lines = [] line = [] - if prefix is not None: - line_len = len(prefix) - 1 - else: - line_len = len(indent) - 1 + line_len = len(prefix) - 1 if prefix is not None else len(indent) - 1 for part in parts: if line_len + 1 + len(part) > text_width and line: lines.append(indent + ' '.join(line)) @@ -441,7 +435,6 @@ def _format_actions_usage(self, actions, groups): elif inserts.get(i + 1) == '|': inserts.pop(i + 1) - # produce all arg strings elif not action.option_strings: default = self._get_default_metavar_for_positional(action) part = self._format_args(action, default) @@ -454,7 +447,6 @@ def _format_actions_usage(self, actions, groups): # add the action string to the list parts.append(part) - # produce the first way to invoke the option in brackets else: option_string = action.option_strings[0] @@ -463,16 +455,14 @@ def _format_actions_usage(self, actions, groups): if action.nargs == 0: part = action.format_usage() - # if the Optional takes a value, format is: - # -s ARGS or --long ARGS else: default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) - part = '%s %s' % (option_string, args_string) + part = f'{option_string} {args_string}' # make it look optional if it's not required or in a group if not action.required and action not in group_actions: - part = '[%s]' % part + part = f'[{part}]' # add the action string to the list parts.append(part) @@ -487,9 +477,9 @@ def _format_actions_usage(self, actions, groups): # clean up separators for mutually exclusive groups open = r'[\[(]' close = r'[\])]' - text = _re.sub(r'(%s) ' % open, r'\1', text) - text = _re.sub(r' (%s)' % close, r'\1', text) - text = _re.sub(r'%s *%s' % (open, close), r'', text) + text = _re.sub(f'({open}) ', r'\1', text) + text = _re.sub(f' ({close})', r'\1', text) + text = _re.sub(f'{open} *{close}', r'', text) text = _re.sub(r'\(([^|]*)\)', r'\1', text) text = text.strip() @@ -533,21 +523,18 @@ def _format_action(self, action): # if there was help for the action, add lines of help text if action.help and action.help.strip(): - help_text = self._expand_help(action) - if help_text: + if help_text := self._expand_help(action): help_lines = self._split_lines(help_text, help_width) parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) - for line in help_lines[1:]: - parts.append('%*s%s\n' % (help_position, '', line)) - - # or add a newline if the description doesn't end with one + parts.extend('%*s%s\n' % (help_position, '', line) for line in help_lines[1:]) elif not action_header.endswith('\n'): parts.append('\n') # if there are any sub-actions, add their help as well - for subaction in self._iter_indented_subactions(action): - parts.append(self._format_action(subaction)) - + parts.extend( + self._format_action(subaction) + for subaction in self._iter_indented_subactions(action) + ) # return a single string return self._join_parts(parts) @@ -565,14 +552,13 @@ def _format_action_invocation(self, action): if action.nargs == 0: parts.extend(action.option_strings) - # if the Optional takes a value, format is: - # -s ARGS, --long ARGS else: default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) - for option_string in action.option_strings: - parts.append('%s %s' % (option_string, args_string)) - + parts.extend( + f'{option_string} {args_string}' + for option_string in action.option_strings + ) return ', '.join(parts) def _metavar_formatter(self, action, default_metavar): @@ -585,39 +571,33 @@ def _metavar_formatter(self, action, default_metavar): result = default_metavar def format(tuple_size): - if isinstance(result, tuple): - return result - else: - return (result, ) * tuple_size + return result if isinstance(result, tuple) else (result, ) * tuple_size + return format def _format_args(self, action, default_metavar): get_metavar = self._metavar_formatter(action, default_metavar) if action.nargs is None: - result = '%s' % get_metavar(1) + return f'{get_metavar(1)}' elif action.nargs == OPTIONAL: - result = '[%s]' % get_metavar(1) + return f'[{get_metavar(1)}]' elif action.nargs == ZERO_OR_MORE: metavar = get_metavar(1) - if len(metavar) == 2: - result = '[%s [%s ...]]' % metavar - else: - result = '[%s ...]' % metavar + return '[%s [%s ...]]' % metavar if len(metavar) == 2 else f'[{metavar} ...]' elif action.nargs == ONE_OR_MORE: - result = '%s [%s ...]' % get_metavar(2) + return '%s [%s ...]' % get_metavar(2) elif action.nargs == REMAINDER: - result = '...' + return '...' elif action.nargs == PARSER: - result = '%s ...' % get_metavar(1) + return f'{get_metavar(1)} ...' elif action.nargs == SUPPRESS: - result = '' + return '' else: try: formats = ['%s' for _ in range(action.nargs)] except TypeError: raise ValueError("invalid nargs value") from None - result = ' '.join(formats) % get_metavar(action.nargs) - return result + return ' '.join(formats) % get_metavar(action.nargs) def _expand_help(self, action): params = dict(vars(action), prog=self._prog) @@ -891,7 +871,7 @@ def __init__(self, _option_strings.append(option_string) if option_string.startswith('--'): - option_string = '--no-' + option_string[2:] + option_string = f'--no-{option_string[2:]}' _option_strings.append(option_string) super().__init__( @@ -1146,7 +1126,7 @@ class _ChoicesPseudoAction(Action): def __init__(self, name, aliases, help): metavar = dest = name if aliases: - metavar += ' (%s)' % ', '.join(aliases) + metavar += f" ({', '.join(aliases)})" sup = super(_SubParsersAction._ChoicesPseudoAction, self) sup.__init__(option_strings=[], dest=dest, help=help, metavar=metavar) @@ -1177,7 +1157,7 @@ def __init__(self, def add_parser(self, name, **kwargs): # set prog from the existing prefix if kwargs.get('prog') is None: - kwargs['prog'] = '%s %s' % (self._prog_prefix, name) + kwargs['prog'] = f'{self._prog_prefix} {name}' aliases = kwargs.pop('aliases', ()) @@ -1299,7 +1279,7 @@ def __repr__(self): args_str = ', '.join([repr(arg) for arg in args if arg != -1] + ['%s=%r' % (kw, arg) for kw, arg in kwargs if arg is not None]) - return '%s(%s)' % (type(self).__name__, args_str) + return f'{type(self).__name__}({args_str})' # =========================== # Optional and Positional Parsing @@ -1439,7 +1419,7 @@ def add_argument(self, *args, **kwargs): # create the action object, and add it to the parser action_class = self._pop_action_class(kwargs) if not callable(action_class): - raise ValueError('unknown action "%s"' % (action_class,)) + raise ValueError(f'unknown action "{action_class}"') action = action_class(**kwargs) # raise an error if the action type is not callable @@ -1556,7 +1536,7 @@ def _get_optional_kwargs(self, *args, **kwargs): long_option_strings = [] for option_string in args: # error on strings that don't start with an appropriate prefix - if not option_string[0] in self.prefix_chars: + if option_string[0] not in self.prefix_chars: args = {'option': option_string, 'prefix_chars': self.prefix_chars} msg = _('invalid option string %(option)r: ' @@ -1590,7 +1570,7 @@ def _pop_action_class(self, kwargs, default=None): def _get_handler(self): # determine function from conflict handler string - handler_func_name = '_handle_conflict_%s' % self.conflict_handler + handler_func_name = f'_handle_conflict_{self.conflict_handler}' try: return getattr(self, handler_func_name) except AttributeError: @@ -1769,6 +1749,7 @@ def __init__(self, # register types def identity(string): return string + self.register('type', None, identity) # add help argument if necessary @@ -1776,9 +1757,12 @@ def identity(string): default_prefix = '-' if '-' in prefix_chars else prefix_chars[0] if self.add_help: self.add_argument( - default_prefix+'h', default_prefix*2+'help', - action='help', default=SUPPRESS, - help=_('show this help message and exit')) + f'{default_prefix}h', + default_prefix * 2 + 'help', + action='help', + default=SUPPRESS, + help=_('show this help message and exit'), + ) # add parent arguments and defaults for parent in parents: @@ -1866,13 +1850,7 @@ def parse_args(self, args=None, namespace=None): return args def parse_known_args(self, args=None, namespace=None): - if args is None: - # args default to the system args - args = _sys.argv[1:] - else: - # make sure that args are mutable - args = list(args) - + args = _sys.argv[1:] if args is None else list(args) # default Namespace built from parser defaults if namespace is None: namespace = Namespace() @@ -2081,10 +2059,9 @@ def consume_positionals(start_index): while start_index <= max_option_string_index: # consume any Positionals preceding the next option - next_option_string_index = min([ - index - for index in option_string_indices - if index >= start_index]) + next_option_string_index = min( + index for index in option_string_indices if index >= start_index + ) if start_index != next_option_string_index: positionals_end_index = consume_positionals(start_index) @@ -2162,14 +2139,12 @@ def _read_args_from_files(self, arg_strings): if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) - # replace arguments referencing files with the file content else: try: with open(arg_string[1:]) as args_file: arg_strings = [] for arg_line in args_file.read().splitlines(): - for arg in self.convert_arg_line_to_args(arg_line): - arg_strings.append(arg) + arg_strings.extend(iter(self.convert_arg_line_to_args(arg_line))) arg_strings = self._read_args_from_files(arg_strings) new_arg_strings.extend(arg_strings) except OSError as err: @@ -2201,7 +2176,7 @@ def _match_argument(self, action, arg_strings_pattern): raise ArgumentError(action, msg) # return the number of arguments matched - return len(match.group(1)) + return len(match[1]) def _match_arguments_partial(self, actions, arg_strings_pattern): # progressively shorten the actions list by slicing off the @@ -2225,7 +2200,7 @@ def _parse_optional(self, arg_string): return None # if it doesn't start with a prefix, it was meant to be positional - if not arg_string[0] in self.prefix_chars: + if arg_string[0] not in self.prefix_chars: return None # if the option string is present in the parser, return the action @@ -2270,12 +2245,7 @@ def _parse_optional(self, arg_string): return None # if it contains a space, it was meant to be a positional - if ' ' in arg_string: - return None - - # it was meant to be an optional but there is no such option - # in this parser (though it might be a valid option in a subparser) - return None, arg_string, None + return None if ' ' in arg_string else (None, arg_string, None) def _get_option_tuples(self, option_string): result = [] @@ -2296,10 +2266,7 @@ def _get_option_tuples(self, option_string): tup = action, option_string, explicit_arg result.append(tup) - # single character options can be concatenated with their arguments - # but multiple character options always have to have their argument - # separate - elif option_string[0] in chars and option_string[1] not in chars: + elif option_string[0] in chars: option_prefix = option_string explicit_arg = None short_option_prefix = option_string[:2] @@ -2315,7 +2282,6 @@ def _get_option_tuples(self, option_string): tup = action, option_string, explicit_arg result.append(tup) - # shouldn't ever get here else: self.error(_('unexpected option string: %s') % option_string) @@ -2331,33 +2297,26 @@ def _get_nargs_pattern(self, action): if nargs is None: nargs_pattern = '(-*A-*)' - # allow zero or one arguments elif nargs == OPTIONAL: nargs_pattern = '(-*A?-*)' - # allow zero or more arguments elif nargs == ZERO_OR_MORE: nargs_pattern = '(-*[A-]*)' - # allow one or more arguments elif nargs == ONE_OR_MORE: nargs_pattern = '(-*A[A-]*)' - # allow any number of options or arguments elif nargs == REMAINDER: nargs_pattern = '([-AO]*)' - # allow one argument followed by any number of options or arguments elif nargs == PARSER: nargs_pattern = '(-*A[-AO]*)' - # suppress action, like nargs=0 elif nargs == SUPPRESS: nargs_pattern = '(-*-*)' - # all others should be integers else: - nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) + nargs_pattern = f"(-*{'-*'.join('A' * nargs)}-*)" # if this is an optional action, -- is not allowed if action.option_strings: @@ -2392,11 +2351,12 @@ def parse_known_intermixed_args(self, args=None, namespace=None): # namespace positionals = self._get_positional_actions() - a = [action for action in positionals - if action.nargs in [PARSER, REMAINDER]] - if a: - raise TypeError('parse_intermixed_args: positional arg' - ' with nargs=%s'%a[0].nargs) + if a := [ + action for action in positionals if action.nargs in [PARSER, REMAINDER] + ]: + raise TypeError( + f'parse_intermixed_args: positional arg with nargs={a[0].nargs}' + ) if [action.dest for group in self._mutually_exclusive_groups for action in group._group_actions if action in positionals]: @@ -2423,7 +2383,7 @@ def parse_known_intermixed_args(self, args=None, namespace=None): if (hasattr(namespace, action.dest) and getattr(namespace, action.dest)==[]): from warnings import warn - warn('Do not expect %s in %s' % (action.dest, namespace)) + warn(f'Do not expect {action.dest} in {namespace}') delattr(namespace, action.dest) finally: # restore nargs and usage before exiting @@ -2465,44 +2425,31 @@ def _get_values(self, action, arg_strings): # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: - if action.option_strings: - value = action.const - else: - value = action.default + value = action.const if action.option_strings else action.default if isinstance(value, str): value = self._get_value(action, value) self._check_value(action, value) - # when nargs='*' on a positional, if there were no command-line - # args, use the default if it is anything other than None elif (not arg_strings and action.nargs == ZERO_OR_MORE and not action.option_strings): - if action.default is not None: - value = action.default - else: - value = arg_strings + value = action.default if action.default is not None else arg_strings self._check_value(action, value) - # single argument or optional argument produces a single value elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: arg_string, = arg_strings value = self._get_value(action, arg_string) self._check_value(action, value) - # REMAINDER arguments convert all values, checking none elif action.nargs == REMAINDER: value = [self._get_value(action, v) for v in arg_strings] - # PARSER arguments convert all values, but check only the first elif action.nargs == PARSER: value = [self._get_value(action, v) for v in arg_strings] self._check_value(action, value[0]) - # SUPPRESS argument does not put anything in the namespace elif action.nargs == SUPPRESS: value = SUPPRESS - # all other types of nargs produce a list else: value = [self._get_value(action, v) for v in arg_strings] for v in value: diff --git a/Lib/ast.py b/Lib/ast.py index 4f5f982714..c065fefd93 100644 --- a/Lib/ast.py +++ b/Lib/ast.py @@ -68,19 +68,19 @@ def _raise_malformed_node(node): msg = "malformed node or string" if lno := getattr(node, 'lineno', None): msg += f' on line {lno}' - raise ValueError(msg + f': {node!r}') + raise ValueError(f'{msg}: {node!r}') + def _convert_num(node): if not isinstance(node, Constant) or type(node.value) not in (int, float, complex): _raise_malformed_node(node) return node.value + def _convert_signed_num(node): if isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)): operand = _convert_num(node.operand) - if isinstance(node.op, UAdd): - return + operand - else: - return - operand + return + operand if isinstance(node.op, UAdd) else - operand return _convert_num(node) + def _convert(node): if isinstance(node, Constant): return node.value @@ -102,11 +102,9 @@ def _convert(node): left = _convert_signed_num(node.left) right = _convert_num(node.right) if isinstance(left, (int, float)) and isinstance(right, complex): - if isinstance(node.op, Add): - return left + right - else: - return left - right + return left + right if isinstance(node.op, Add) else left - right return _convert_signed_num(node) + return _convert(node_or_string) @@ -147,7 +145,7 @@ def _format(node, level=0): value, simple = _format(value, level) allsimple = allsimple and simple if keywords: - args.append('%s=%s' % (name, value)) + args.append(f'{name}={value}') else: args.append(value) if include_attributes and node._attributes: @@ -160,14 +158,14 @@ def _format(node, level=0): continue value, simple = _format(value, level) allsimple = allsimple and simple - args.append('%s=%s' % (name, value)) + args.append(f'{name}={value}') if allsimple and len(args) <= 3: - return '%s(%s)' % (node.__class__.__name__, ', '.join(args)), not args - return '%s(%s%s)' % (node.__class__.__name__, prefix, sep.join(args)), False + return f"{node.__class__.__name__}({', '.join(args)})", not args + return f'{node.__class__.__name__}({prefix}{sep.join(args)})', False elif isinstance(node, list): if not node: return '[]', True - return '[%s%s]' % (prefix, sep.join(_format(x, level)[0] for x in node)), False + return f'[{prefix}{sep.join(_format(x, level)[0] for x in node)}]', False return repr(node), True if not isinstance(node, AST): @@ -331,13 +329,7 @@ def _splitlines_no_ff(source): def _pad_whitespace(source): r"""Replace all chars except '\f\t' in a line with spaces.""" - result = '' - for c in source: - if c in '\f\t': - result += c - else: - result += ' ' - return result + return ''.join(c if c in '\f\t' else ' ' for c in source) def get_source_segment(source, node, *, padded=False): @@ -413,7 +405,7 @@ class name of the node. So a `TryFinally` node visit function would def visit(self, node): """Visit a node.""" - method = 'visit_' + node.__class__.__name__ + method = f'visit_{node.__class__.__name__}' visitor = getattr(self, method, self.generic_visit) return visitor(node) @@ -436,7 +428,7 @@ def visit_Constant(self, node): type_name = name break if type_name is not None: - method = 'visit_' + type_name + method = f'visit_{type_name}' try: visitor = getattr(self, method) except AttributeError: @@ -525,23 +517,22 @@ def _setter(self, value): class _ABC(type): - def __init__(cls, *args): - cls.__doc__ = """Deprecated AST node class. Use ast.Constant instead""" + def __init__(self, *args): + self.__doc__ = """Deprecated AST node class. Use ast.Constant instead""" - def __instancecheck__(cls, inst): + def __instancecheck__(self, inst): if not isinstance(inst, Constant): return False - if cls in _const_types: + if self in _const_types: try: value = inst.value except AttributeError: return False else: - return ( - isinstance(value, _const_types[cls]) and - not isinstance(value, _const_types_not.get(cls, ())) + return isinstance(value, _const_types[self]) and not isinstance( + value, _const_types_not.get(self, ()) ) - return type.__instancecheck__(cls, inst) + return type.__instancecheck__(self, inst) def _new(cls, *args, **kwargs): for key in kwargs: @@ -629,20 +620,15 @@ def _dims_setter(self, value): class Suite(mod): """Deprecated AST node class. Unused in Python 3.""" - class AugLoad(expr_context): """Deprecated AST node class. Unused in Python 3.""" - class AugStore(expr_context): """Deprecated AST node class. Unused in Python 3.""" - class Param(expr_context): """Deprecated AST node class. Unused in Python 3.""" - - # Large float and imaginary literals get turned into infinities in the AST. # We unparse those infinities to INFSTR. -_INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1) +_INFSTR = f"1e{repr(sys.float_info.max_10_exp + 1)}" class _Precedence(IntEnum): """Precedence table that originated from python grammar.""" @@ -761,10 +747,7 @@ def delimit(self, start, end): self.write(end) def delimit_if(self, start, end, condition): - if condition: - return self.delimit(start, end) - else: - return nullcontext() + return self.delimit(start, end) if condition else nullcontext() def require_parens(self, precedence, node): """Shortcut to adding precedence related parens""" @@ -875,7 +858,7 @@ def visit_Assign(self, node): def visit_AugAssign(self, node): self.fill() self.traverse(node.target) - self.write(" " + self.binop[node.op.__class__.__name__] + "= ") + self.write(f" {self.binop[node.op.__class__.__name__]}= ") self.traverse(node.value) def visit_AnnAssign(self, node): @@ -950,7 +933,7 @@ def visit_Raise(self, node): self.fill("raise") if not node.exc: if node.cause: - raise ValueError(f"Node can't use cause without an exception.") + raise ValueError("Node can't use cause without an exception.") return self.write(" ") self.traverse(node.exc) @@ -989,7 +972,7 @@ def visit_ClassDef(self, node): for deco in node.decorator_list: self.fill("@") self.traverse(deco) - self.fill("class " + node.name) + self.fill(f"class {node.name}") with self.delimit_if("(", ")", condition = node.bases or node.keywords): comma = False for e in node.bases: @@ -1019,7 +1002,7 @@ def _function_helper(self, node, fill_suffix): for deco in node.decorator_list: self.fill("@") self.traverse(deco) - def_str = fill_suffix + " " + node.name + def_str = f"{fill_suffix} {node.name}" self.fill(def_str) with self.delimit("(", ")"): self.traverse(node.args) @@ -1146,7 +1129,7 @@ def visit_JoinedStr(self, node): # Constant parts of the f-string, and allow escapes accordingly. buffer = [] for value in node.values: - meth = getattr(self, "_fstring_" + type(value).__name__) + meth = getattr(self, f"_fstring_{type(value).__name__}") meth(value, self.buffer_writer) buffer.append((self.buffer, isinstance(value, Constant))) new_buffer = [] @@ -1169,7 +1152,7 @@ def visit_FormattedValue(self, node): def _fstring_JoinedStr(self, node, write): for value in node.values: - meth = getattr(self, "_fstring_" + type(value).__name__) + meth = getattr(self, f"_fstring_{type(value).__name__}") meth(value, write) def _fstring_Constant(self, node, write): @@ -1195,7 +1178,7 @@ def _fstring_FormattedValue(self, node, write): write(f"!{conversion}") if node.format_spec: write(":") - meth = getattr(self, "_fstring_" + type(node.format_spec).__name__) + meth = getattr(self, f"_fstring_{type(node.format_spec).__name__}") meth(node.format_spec, write) write("}") @@ -1411,7 +1394,7 @@ def visit_Compare(self, node): self.set_precedence(_Precedence.CMP.next(), node.left, *node.comparators) self.traverse(node.left) for o, e in zip(node.ops, node.comparators): - self.write(" " + self.cmpops[o.__class__.__name__] + " ") + self.write(f" {self.cmpops[o.__class__.__name__]} ") self.traverse(e) boolops = {"And": "and", "Or": "or"} @@ -1535,11 +1518,11 @@ def visit_arguments(self, node): else: self.write(", ") self.write("*") - if node.vararg: - self.write(node.vararg.arg) - if node.vararg.annotation: - self.write(": ") - self.traverse(node.vararg.annotation) + if node.vararg: + self.write(node.vararg.arg) + if node.vararg.annotation: + self.write(": ") + self.traverse(node.vararg.annotation) # keyword-only arguments if node.kwonlyargs: @@ -1556,7 +1539,7 @@ def visit_arguments(self, node): first = False else: self.write(", ") - self.write("**" + node.kwarg.arg) + self.write(f"**{node.kwarg.arg}") if node.kwarg.annotation: self.write(": ") self.traverse(node.kwarg.annotation) @@ -1580,7 +1563,7 @@ def visit_Lambda(self, node): def visit_alias(self, node): self.write(node.name) if node.asname: - self.write(" as " + node.asname) + self.write(f" as {node.asname}") def visit_withitem(self, node): self.traverse(node.context_expr) diff --git a/Lib/asynchat.py b/Lib/asynchat.py index fc1146adbb..ea887d316f 100644 --- a/Lib/asynchat.py +++ b/Lib/asynchat.py @@ -169,19 +169,16 @@ def handle_read(self): # This does the Right Thing if the terminator # is changed here. self.found_terminator() + elif index := find_prefix_at_end(self.ac_in_buffer, terminator): + if index != lb: + # we found a prefix, collect up to the prefix + self.collect_incoming_data(self.ac_in_buffer[:-index]) + self.ac_in_buffer = self.ac_in_buffer[-index:] + break else: - # check for a prefix of the terminator - index = find_prefix_at_end(self.ac_in_buffer, terminator) - if index: - if index != lb: - # we found a prefix, collect up to the prefix - self.collect_incoming_data(self.ac_in_buffer[:-index]) - self.ac_in_buffer = self.ac_in_buffer[-index:] - break - else: - # no prefix, collect it all - self.collect_incoming_data(self.ac_in_buffer) - self.ac_in_buffer = b'' + # no prefix, collect it all + self.collect_incoming_data(self.ac_in_buffer) + self.ac_in_buffer = b'' def handle_write(self): self.initiate_send() @@ -278,11 +275,11 @@ def more(self): if len(self.data) > self.buffer_size: result = self.data[:self.buffer_size] self.data = self.data[self.buffer_size:] - return result else: result = self.data self.data = b'' - return result + + return result # Given 'haystack', see if any prefix of 'needle' is at its end. This diff --git a/Lib/asyncore.py b/Lib/asyncore.py index 0e92be3ad1..6bc0bdbe07 100644 --- a/Lib/asyncore.py +++ b/Lib/asyncore.py @@ -69,9 +69,7 @@ def _strerror(err): try: return os.strerror(err) except (ValueError, OverflowError, NameError): - if err in errorcode: - return errorcode[err] - return "Unknown error %s" %err + return errorcode[err] if err in errorcode else f"Unknown error {err}" class ExitNow(Exception): pass @@ -193,11 +191,7 @@ def loop(timeout=30.0, use_poll=False, map=None, count=None): if map is None: map = socket_map - if use_poll and hasattr(select, 'poll'): - poll_fun = poll2 - else: - poll_fun = poll - + poll_fun = poll2 if use_poll and hasattr(select, 'poll') else poll if count is None: while map: poll_fun(timeout, map) @@ -218,11 +212,7 @@ class dispatcher: ignore_log_types = frozenset({'warning'}) def __init__(self, sock=None, map=None): - if map is None: - self._map = socket_map - else: - self._map = map - + self._map = socket_map if map is None else map self._fileno = None if sock: @@ -250,7 +240,7 @@ def __init__(self, sock=None, map=None): self.socket = None def __repr__(self): - status = [self.__class__.__module__+"."+self.__class__.__qualname__] + status = [f"{self.__class__.__module__}.{self.__class__.__qualname__}"] if self.accepting and self.addr: status.append('listening') elif self.connected: @@ -330,14 +320,13 @@ def connect(self, address): self.connecting = True err = self.socket.connect_ex(address) if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \ - or err == EINVAL and os.name == 'nt': + or err == EINVAL and os.name == 'nt': self.addr = address return - if err in (0, EISCONN): - self.addr = address - self.handle_connect_event() - else: + if err not in (0, EISCONN): raise OSError(err, errorcode[err]) + self.addr = address + self.handle_connect_event() def accept(self): # XXX can return either an address pair or None @@ -355,8 +344,7 @@ def accept(self): def send(self, data): try: - result = self.socket.send(data) - return result + return self.socket.send(data) except OSError as why: if why.args[0] == EWOULDBLOCK: return 0 @@ -368,21 +356,17 @@ def send(self, data): def recv(self, buffer_size): try: - data = self.socket.recv(buffer_size) - if not data: - # a closed connection is indicated by signaling - # a read condition, and having recv() return 0. - self.handle_close() - return b'' - else: + if data := self.socket.recv(buffer_size): return data + # a closed connection is indicated by signaling + # a read condition, and having recv() return 0. + self.handle_close() + return b'' except OSError as why: - # winsock sometimes raises ENOTCONN - if why.args[0] in _DISCONNECTED: - self.handle_close() - return b'' - else: + if why.args[0] not in _DISCONNECTED: raise + self.handle_close() + return b'' def close(self): self.connected = False @@ -405,7 +389,7 @@ def log(self, message): def log_info(self, message, type='info'): if type not in self.ignore_log_types: - print('%s: %s' % (type, message)) + print(f'{type}: {message}') def handle_read_event(self): if self.accepting: @@ -463,14 +447,9 @@ def handle_error(self): self_repr = '<__repr__(self) failed for object at %0x>' % id(self) self.log_info( - 'uncaptured python exception, closing channel %s (%s:%s %s)' % ( - self_repr, - t, - v, - tbinfo - ), - 'error' - ) + f'uncaptured python exception, closing channel {self_repr} ({t}:{v} {tbinfo})', + 'error', + ) self.handle_close() def handle_expt(self): @@ -522,7 +501,7 @@ def writable(self): def send(self, data): if self.debug: - self.log_info('sending %s' % repr(data)) + self.log_info(f'sending {repr(data)}') self.out_buffer = self.out_buffer + data self.initiate_send() diff --git a/Lib/base64.py b/Lib/base64.py index 7e9c2a2ca4..c5ba9ac9dd 100755 --- a/Lib/base64.py +++ b/Lib/base64.py @@ -389,10 +389,7 @@ def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): "Ascii85 encoded byte sequences must end " "with {!r}".format(_A85END) ) - if b.startswith(_A85START): - b = b[2:-2] # Strip off start/end markers - else: - b = b[:-2] + b = b[2:-2] if b.startswith(_A85START) else b[:-2] # # We have to go through this stepwise, so as to ignore spaces and handle # special short sequences @@ -430,8 +427,7 @@ def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): raise ValueError('Non-Ascii85 digit found: %c' % x) result = b''.join(decoded) - padding = 4 - len(curr) - if padding: + if padding := 4 - len(curr): # Throw away the extra padding result = result[:-padding] return result @@ -513,10 +509,10 @@ def encode(input, output): if not s: break while len(s) < MAXBINSIZE: - ns = input.read(MAXBINSIZE-len(s)) - if not ns: + if ns := input.read(MAXBINSIZE - len(s)): + s += ns + else: break - s += ns line = binascii.b2a_base64(s) output.write(line) @@ -534,7 +530,7 @@ def _input_type_check(s): try: m = memoryview(s) except TypeError as err: - msg = "expected bytes-like object, not %s" % s.__class__.__name__ + msg = f"expected bytes-like object, not {s.__class__.__name__}" raise TypeError(msg) from err if m.format not in ('c', 'b', 'B'): msg = ("expected single byte elements, not %r from %s" % diff --git a/Lib/bdb.py b/Lib/bdb.py index 75d6113576..95e511a666 100644 --- a/Lib/bdb.py +++ b/Lib/bdb.py @@ -44,7 +44,7 @@ def canonic(self, filename): angle brackets, such as "", generated in interactive mode, are returned unchanged. """ - if filename == "<" + filename[1:-1] + ">": + if filename == f"<{filename[1:-1]}>": return filename canonic = self.fncache.get(filename) if not canonic: @@ -194,25 +194,18 @@ def is_skipped_module(self, module_name): "Return True if module_name matches any skip pattern." if module_name is None: # some modules do not have names return False - for pattern in self.skip: - if fnmatch.fnmatch(module_name, pattern): - return True - return False + return any(fnmatch.fnmatch(module_name, pattern) for pattern in self.skip) def stop_here(self, frame): "Return True if frame is below the starting frame in the stack." # (CT) stopframe may now also be None, see dispatch_call. # (CT) the former test for None is therefore removed from here. if self.skip and \ - self.is_skipped_module(frame.f_globals.get('__name__')): + self.is_skipped_module(frame.f_globals.get('__name__')): return False if frame is self.stopframe: - if self.stoplineno == -1: - return False - return frame.f_lineno >= self.stoplineno - if not self.stopframe: - return True - return False + return False if self.stoplineno == -1 else frame.f_lineno >= self.stoplineno + return not self.stopframe def break_here(self, frame): """Return True if there is an effective breakpoint for this line. @@ -228,8 +221,8 @@ def break_here(self, frame): # The line itself has no breakpoint, but maybe the line is the # first line of a function with breakpoint set by function name. lineno = frame.f_code.co_firstlineno - if lineno not in self.breaks[filename]: - return False + if lineno not in self.breaks[filename]: + return False # flag says ok to delete temp. bp (bp, flag) = effective(filename, lineno, frame) @@ -420,7 +413,7 @@ def clear_break(self, filename, lineno): """ filename = self.canonic(filename) if filename not in self.breaks: - return 'There are no breakpoints in %s' % filename + return f'There are no breakpoints in {filename}' if lineno not in self.breaks[filename]: return 'There is no breakpoint at %s:%d' % (filename, lineno) # If there's only one bp in the list for that file,line @@ -450,7 +443,7 @@ def clear_all_file_breaks(self, filename): """ filename = self.canonic(filename) if filename not in self.breaks: - return 'There are no breakpoints in %s' % filename + return f'There are no breakpoints in {filename}' for line in self.breaks[filename]: blist = Breakpoint.bplist[filename, line] for bp in blist: @@ -482,7 +475,7 @@ def get_bpbynumber(self, arg): try: number = int(arg) except ValueError: - raise ValueError('Non-numeric breakpoint number %s' % arg) from None + raise ValueError(f'Non-numeric breakpoint number {arg}') from None try: bp = Breakpoint.bpbynumber[number] except IndexError: @@ -513,10 +506,7 @@ def get_file_breaks(self, filename): If no breakpoints are set, return an empty list. """ filename = self.canonic(filename) - if filename in self.breaks: - return self.breaks[filename] - else: - return [] + return self.breaks[filename] if filename in self.breaks else [] def get_all_breaks(self): """Return all breakpoints that are set.""" @@ -561,17 +551,13 @@ def format_stack_entry(self, frame_lineno, lprefix=': '): frame, lineno = frame_lineno filename = self.canonic(frame.f_code.co_filename) s = '%s(%r)' % (filename, lineno) - if frame.f_code.co_name: - s += frame.f_code.co_name - else: - s += "" + s += frame.f_code.co_name if frame.f_code.co_name else "" s += '()' if '__return__' in frame.f_locals: rv = frame.f_locals['__return__'] s += '->' s += reprlib.repr(rv) - line = linecache.getline(filename, lineno, frame.f_globals) - if line: + if line := linecache.getline(filename, lineno, frame.f_globals): s += lprefix + line.strip() return s @@ -744,14 +730,9 @@ def bpformat(self): ignore, and number of times hit. """ - if self.temporary: - disp = 'del ' - else: - disp = 'keep ' - if self.enabled: - disp = disp + 'yes ' - else: - disp = disp + 'no ' + disp = ('del ' if self.temporary else 'keep ') + ( + 'yes ' if self.enabled else 'no ' + ) ret = '%-4dbreakpoint %s at %s:%d' % (self.number, disp, self.file, self.line) if self.cond: @@ -759,16 +740,13 @@ def bpformat(self): if self.ignore: ret += '\n\tignore next %d hits' % (self.ignore,) if self.hits: - if self.hits > 1: - ss = 's' - else: - ss = '' + ss = 's' if self.hits > 1 else '' ret += '\n\tbreakpoint already hit %d time%s' % (self.hits, ss) return ret def __str__(self): "Return a condensed description of the breakpoint." - return 'breakpoint %s at %s:%s' % (self.number, self.file, self.line) + return f'breakpoint {self.number} at {self.file}:{self.line}' # -----------end of Breakpoint class---------- @@ -782,13 +760,7 @@ def checkfuncname(b, frame): the right function and if it is on the first executable line. """ if not b.funcname: - # Breakpoint was set via line number. - if b.line != frame.f_lineno: - # Breakpoint was set at a line with a def statement and the function - # defined is called: don't break. - return False - return True - + return b.line == frame.f_lineno # Breakpoint set via function name. if frame.f_code.co_name != b.funcname: # It's not a function call, but rather execution of def statement. @@ -799,10 +771,7 @@ def checkfuncname(b, frame): # The function is entered for the 1st time. b.func_first_executable_line = frame.f_lineno - if b.func_first_executable_line != frame.f_lineno: - # But we are not at the first line number: don't break. - return False - return True + return b.func_first_executable_line == frame.f_lineno # Determines if there is an effective (active) breakpoint at this @@ -823,33 +792,29 @@ def effective(file, line, frame): continue # Count every hit when bp is enabled b.hits += 1 - if not b.cond: - # If unconditional, and ignoring go on to next, else break - if b.ignore > 0: - b.ignore -= 1 - continue - else: - # breakpoint and marker that it's ok to delete if temporary - return (b, True) - else: + if b.cond: # Conditional bp. # Ignore count applies only to those bpt hits where the # condition evaluates to true. try: - val = eval(b.cond, frame.f_globals, frame.f_locals) - if val: + if val := eval(b.cond, frame.f_globals, frame.f_locals): if b.ignore > 0: b.ignore -= 1 # continue else: return (b, True) - # else: - # continue + # else: + # continue except: # if eval fails, most conservative thing is to stop on # breakpoint regardless of ignore count. Don't delete # temporary, as another hint to user. return (b, False) + elif b.ignore > 0: + b.ignore -= 1 + else: + # breakpoint and marker that it's ok to delete if temporary + return (b, True) return (None, None) diff --git a/Lib/binhex.py b/Lib/binhex.py index ace5217d27..d06d07f213 100644 --- a/Lib/binhex.py +++ b/Lib/binhex.py @@ -206,10 +206,7 @@ def _write(self, data): def _writecrc(self): # XXXX Should this be here?? # self.crc = binascii.crc_hqx('\0\0', self.crc) - if self.crc < 0: - fmt = '>h' - else: - fmt = '>H' + fmt = '>h' if self.crc < 0 else '>H' self.ofp.write(struct.pack(fmt, self.crc)) self.crc = 0 @@ -258,16 +255,18 @@ def binhex(inp, out): with io.open(inp, 'rb') as ifp: # XXXX Do textfile translation on non-mac systems while True: - d = ifp.read(128000) - if not d: break - ofp.write(d) + if d := ifp.read(128000): + ofp.write(d) + else: + break ofp.close_data() ifp = openrsrc(inp, 'rb') while True: - d = ifp.read(128000) - if not d: break - ofp.write_rsrc(d) + if d := ifp.read(128000): + ofp.write_rsrc(d) + else: + break ofp.close() ifp.close() @@ -295,16 +294,14 @@ def read(self, totalwtd): # loop. # while True: - try: + with contextlib.suppress(binascii.Incomplete): with _ignore_deprecation_warning(): decdatacur, self.eof = binascii.a2b_hqx(data) break - except binascii.Incomplete: - pass - newdata = self.ifp.read(1) - if not newdata: + if newdata := self.ifp.read(1): + data = data + newdata + else: raise Error('Premature EOF on binhex file') - data = data + newdata decdata = decdata + decdatacur wtd = totalwtd - len(decdata) if not decdata and not self.eof: @@ -335,7 +332,7 @@ def _fill(self, wtd): if self.ifp.eof: with _ignore_deprecation_warning(): self.post_buffer = self.post_buffer + \ - binascii.rledecode_hqx(self.pre_buffer) + binascii.rledecode_hqx(self.pre_buffer) self.pre_buffer = b'' return @@ -352,19 +349,17 @@ def _fill(self, wtd): # mark = len(self.pre_buffer) if self.pre_buffer[-3:] == RUNCHAR + b'\0' + RUNCHAR: - mark = mark - 3 + mark -= 3 elif self.pre_buffer[-1:] == RUNCHAR: - mark = mark - 2 + mark -= 2 elif self.pre_buffer[-2:] == RUNCHAR + b'\0': - mark = mark - 2 - elif self.pre_buffer[-2:-1] == RUNCHAR: - pass # Decode all - else: - mark = mark - 1 + mark -= 2 + elif self.pre_buffer[-2:-1] != RUNCHAR: + mark -= 1 with _ignore_deprecation_warning(): self.post_buffer = self.post_buffer + \ - binascii.rledecode_hqx(self.pre_buffer[:mark]) + binascii.rledecode_hqx(self.pre_buffer[:mark]) self.pre_buffer = self.pre_buffer[mark:] def close(self): @@ -438,7 +433,7 @@ def read(self, *n): n = self.dlen rv = b'' while len(rv) < n: - rv = rv + self._read(n-len(rv)) + rv += self._read(n-len(rv)) self.dlen = self.dlen - n return rv @@ -484,19 +479,20 @@ def hexbin(inp, out): with io.open(out, 'wb') as ofp: # XXXX Do translation on non-mac systems while True: - d = ifp.read(128000) - if not d: break - ofp.write(d) + if d := ifp.read(128000): + ofp.write(d) + else: + break ifp.close_data() - d = ifp.read_rsrc(128000) - if d: + if d := ifp.read_rsrc(128000): ofp = openrsrc(out, 'wb') ofp.write(d) while True: - d = ifp.read_rsrc(128000) - if not d: break - ofp.write(d) + if d := ifp.read_rsrc(128000): + ofp.write(d) + else: + break ofp.close() ifp.close() diff --git a/Lib/bz2.py b/Lib/bz2.py index fabe4f73c8..e93aaf0d02 100644 --- a/Lib/bz2.py +++ b/Lib/bz2.py @@ -263,9 +263,7 @@ def seek(self, offset, whence=io.SEEK_SET): def tell(self): """Return the current file position.""" self._check_not_closed() - if self._mode == _MODE_READ: - return self._buffer.tell() - return self._pos + return self._buffer.tell() if self._mode == _MODE_READ else self._pos def open(filename, mode="rb", compresslevel=9, diff --git a/Lib/calendar.py b/Lib/calendar.py index 657396439c..51ee151e0b 100644 --- a/Lib/calendar.py +++ b/Lib/calendar.py @@ -134,17 +134,11 @@ def _monthlen(year, month): def _prevmonth(year, month): - if month == 1: - return year-1, 12 - else: - return year, month-1 + return (year-1, 12) if month == 1 else (year, month-1) def _nextmonth(year, month): - if month == 12: - return year+1, 1 - else: - return year, month+1 + return (year+1, 1) if month == 12 else (year, month+1) class Calendar(object): @@ -308,10 +302,7 @@ def formatday(self, day, weekday, width): """ Returns a formatted day. """ - if day == 0: - s = '' - else: - s = '%2i' % day # right-align single-digit days + s = '' if day == 0 else '%2i' % day return s.center(width) def formatweek(self, theweek, width): @@ -324,10 +315,7 @@ def formatweekday(self, day, width): """ Returns a formatted week day name. """ - if width >= 9: - names = day_name - else: - names = day_abbr + names = day_name if width >= 9 else day_abbr return names[day][:width].center(width) def formatweekheader(self, width): @@ -388,7 +376,7 @@ def formatyear(self, theyear, w=2, l=1, c=6, m=3): for k in months) a(formatstring(names, colwidth, c).rstrip()) a('\n'*l) - headers = (header for k in months) + headers = (header for _ in months) a(formatstring(headers, colwidth, c).rstrip()) a('\n'*l) # max number of weeks for this row @@ -441,7 +429,7 @@ def formatday(self, day, weekday): """ if day == 0: # day outside month - return ' ' % self.cssclass_noday + return f' ' else: return '%d' % (self.cssclasses[weekday], day) @@ -450,32 +438,30 @@ def formatweek(self, theweek): Return a complete week as a table row. """ s = ''.join(self.formatday(d, wd) for (d, wd) in theweek) - return '%s' % s + return f'{s}' def formatweekday(self, day): """ Return a weekday name as a table header. """ - return '%s' % ( - self.cssclasses_weekday_head[day], day_abbr[day]) + return f'{day_abbr[day]}' def formatweekheader(self): """ Return a header for a week as a table row. """ s = ''.join(self.formatweekday(i) for i in self.iterweekdays()) - return '%s' % s + return f'{s}' def formatmonthname(self, theyear, themonth, withyear=True): """ Return a month name as a table row. """ if withyear: - s = '%s %s' % (month_name[themonth], theyear) + s = f'{month_name[themonth]} {theyear}' else: - s = '%s' % month_name[themonth] - return '%s' % ( - self.cssclass_month_head, s) + s = f'{month_name[themonth]}' + return f'{s}' def formatmonth(self, theyear, themonth, withyear=True): """ @@ -483,8 +469,9 @@ def formatmonth(self, theyear, themonth, withyear=True): """ v = [] a = v.append - a('' % ( - self.cssclass_month)) + a( + f'
' + ) a('\n') a(self.formatmonthname(theyear, themonth, withyear=withyear)) a('\n') @@ -504,8 +491,9 @@ def formatyear(self, theyear, width=3): v = [] a = v.append width = max(width, 1) - a('
' % - self.cssclass_year) + a( + f'
' + ) a('\n') a('' % ( width, self.cssclass_year_head, theyear)) @@ -656,8 +644,7 @@ def timegm(tuple): days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1 hours = days*24 + hour minutes = hours*60 + minute - seconds = minutes*60 + second - return seconds + return minutes*60 + second def main(args): @@ -726,10 +713,7 @@ def main(args): locale = options.locale, options.encoding if options.type == "html": - if options.locale: - cal = LocaleHTMLCalendar(locale=locale) - else: - cal = HTMLCalendar() + cal = LocaleHTMLCalendar(locale=locale) if options.locale else HTMLCalendar() encoding = options.encoding if encoding is None: encoding = sys.getdefaultencoding() @@ -743,10 +727,7 @@ def main(args): parser.error("incorrect number of arguments") sys.exit(1) else: - if options.locale: - cal = LocaleTextCalendar(locale=locale) - else: - cal = TextCalendar() + cal = LocaleTextCalendar(locale=locale) if options.locale else TextCalendar() optdict = dict(w=options.width, l=options.lines) if options.month is None: optdict["c"] = options.spacing diff --git a/Lib/cgi.py b/Lib/cgi.py index c22c71b387..889f023c07 100755 --- a/Lib/cgi.py +++ b/Lib/cgi.py @@ -82,10 +82,7 @@ def initlog(*allargs): logfp = open(logfile, "a") except OSError: pass - if not logfp: - log = nolog - else: - log = dolog + log = nolog if not logfp else dolog log(*allargs) def dolog(fmt, *args): @@ -140,16 +137,12 @@ def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0): # field keys and values (except for files) are returned as strings # an encoding is required to decode the bytes read from self.fp - if hasattr(fp,'encoding'): - encoding = fp.encoding - else: - encoding = 'latin-1' - + encoding = fp.encoding if hasattr(fp,'encoding') else 'latin-1' # fp.read() must return bytes if isinstance(fp, TextIOWrapper): fp = fp.buffer - if not 'REQUEST_METHOD' in environ: + if 'REQUEST_METHOD' not in environ: environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone if environ['REQUEST_METHOD'] == 'POST': ctype, pdict = parse_header(environ['CONTENT_TYPE']) @@ -163,19 +156,18 @@ def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0): else: qs = '' # Unknown content-type if 'QUERY_STRING' in environ: - if qs: qs = qs + '&' + if qs: + qs = f'{qs}&' qs = qs + environ['QUERY_STRING'] elif sys.argv[1:]: - if qs: qs = qs + '&' + if qs: + qs = f'{qs}&' qs = qs + sys.argv[1] environ['QUERY_STRING'] = qs # XXX Shouldn't, really elif 'QUERY_STRING' in environ: qs = environ['QUERY_STRING'] else: - if sys.argv[1:]: - qs = sys.argv[1] - else: - qs = "" + qs = sys.argv[1] if sys.argv[1:] else "" environ['QUERY_STRING'] = qs # XXX Shouldn't, really return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing, encoding=encoding) @@ -197,7 +189,7 @@ def parse_multipart(fp, pdict, encoding="utf-8", errors="replace"): # RFC 2026, Section 5.1 : The "multipart" boundary delimiters are always # represented as 7bit US-ASCII. boundary = pdict['boundary'].decode('ascii') - ctype = "multipart/form-data; boundary={}".format(boundary) + ctype = f"multipart/form-data; boundary={boundary}" headers = Message() headers.set_type(ctype) headers['Content-Length'] = pdict['CONTENT-LENGTH'] @@ -223,7 +215,7 @@ def parse_header(line): Return the main content-type and a dictionary of options. """ - parts = _parseparam(';' + line) + parts = _parseparam(f';{line}') key = parts.__next__() pdict = {} for p in parts: @@ -363,7 +355,7 @@ def __init__(self, fp=None, headers=None, outerboundary=b'', if 'REQUEST_METHOD' in environ: method = environ['REQUEST_METHOD'].upper() self.qs_on_post = None - if method == 'GET' or method == 'HEAD': + if method in ['GET', 'HEAD']: if 'QUERY_STRING' in environ: qs = environ['QUERY_STRING'] elif sys.argv[1:]: @@ -386,27 +378,26 @@ def __init__(self, fp=None, headers=None, outerboundary=b'', self.qs_on_post = environ['QUERY_STRING'] if 'CONTENT_LENGTH' in environ: headers['content-length'] = environ['CONTENT_LENGTH'] - else: - if not (isinstance(headers, (Mapping, Message))): - raise TypeError("headers must be mapping or an instance of " - "email.message.Message") + elif not (isinstance(headers, (Mapping, Message))): + raise TypeError("headers must be mapping or an instance of " + "email.message.Message") self.headers = headers if fp is None: self.fp = sys.stdin.buffer - # self.fp.read() must return bytes elif isinstance(fp, TextIOWrapper): self.fp = fp.buffer - else: - if not (hasattr(fp, 'read') and hasattr(fp, 'readline')): - raise TypeError("fp must be file pointer") + elif hasattr(fp, 'read') and hasattr(fp, 'readline'): self.fp = fp + else: + raise TypeError("fp must be file pointer") self.encoding = encoding self.errors = errors if not isinstance(outerboundary, bytes): - raise TypeError('outerboundary must be bytes, not %s' - % type(outerboundary).__name__) + raise TypeError( + f'outerboundary must be bytes, not {type(outerboundary).__name__}' + ) self.outerboundary = outerboundary self.bytes_read = 0 @@ -510,54 +501,37 @@ def __getitem__(self, key): """Dictionary style indexing.""" if self.list is None: raise TypeError("not indexable") - found = [] - for item in self.list: - if item.name == key: found.append(item) - if not found: - raise KeyError(key) - if len(found) == 1: - return found[0] + if found := [item for item in self.list if item.name == key]: + return found[0] if len(found) == 1 else found else: - return found + raise KeyError(key) def getvalue(self, key, default=None): """Dictionary style get() method, including 'value' lookup.""" - if key in self: - value = self[key] - if isinstance(value, list): - return [x.value for x in value] - else: - return value.value - else: + if key not in self: return default + value = self[key] + return [x.value for x in value] if isinstance(value, list) else value.value def getfirst(self, key, default=None): """ Return the first value received.""" - if key in self: - value = self[key] - if isinstance(value, list): - return value[0].value - else: - return value.value - else: + if key not in self: return default + value = self[key] + return value[0].value if isinstance(value, list) else value.value def getlist(self, key): """ Return list of received values.""" - if key in self: - value = self[key] - if isinstance(value, list): - return [x.value for x in value] - else: - return [value.value] - else: + if key not in self: return [] + value = self[key] + return [x.value for x in value] if isinstance(value, list) else [value.value] def keys(self): """Dictionary style keys() method.""" if self.list is None: raise TypeError("not indexable") - return list(set(item.name for item in self.list)) + return list({item.name for item in self.list}) def __contains__(self, key): """Dictionary style __contains__ method.""" @@ -578,11 +552,10 @@ def read_urlencoded(self): """Internal: read data in query string format.""" qs = self.fp.read(self.length) if not isinstance(qs, bytes): - raise ValueError("%s should return bytes, got %s" \ - % (self.fp, type(qs).__name__)) + raise ValueError(f"{self.fp} should return bytes, got {type(qs).__name__}") qs = qs.decode(self.encoding, self.errors) if self.qs_on_post: - qs += '&' + self.qs_on_post + qs += f'&{self.qs_on_post}' query = urllib.parse.parse_qsl( qs, self.keep_blank_values, self.strict_parsing, encoding=self.encoding, errors=self.errors, @@ -608,8 +581,9 @@ def read_multi(self, environ, keep_blank_values, strict_parsing): klass = self.FieldStorageClass or self.__class__ first_line = self.fp.readline() # bytes if not isinstance(first_line, bytes): - raise ValueError("%s should return bytes, got %s" \ - % (self.fp, type(first_line).__name__)) + raise ValueError( + f"{self.fp} should return bytes, got {type(first_line).__name__}" + ) self.bytes_read += len(first_line) # Ensure that we consume the file until we've hit our inner boundary @@ -643,7 +617,7 @@ def read_multi(self, environ, keep_blank_values, strict_parsing): del headers['content-length'] limit = None if self.limit is None \ - else self.limit - self.bytes_read + else self.limit - self.bytes_read part = klass(self.fp, headers, ib, environ, keep_blank_values, strict_parsing, limit, self.encoding, self.errors, max_num_fields) @@ -680,8 +654,7 @@ def read_binary(self): while todo > 0: data = self.fp.read(min(todo, self.bufsize)) # bytes if not isinstance(data, bytes): - raise ValueError("%s should return bytes, got %s" - % (self.fp, type(data).__name__)) + raise ValueError(f"{self.fp} should return bytes, got {type(data).__name__}") self.bytes_read += len(data) if not data: self.done = -1 @@ -877,10 +850,9 @@ def print_exception(type=None, value=None, tb=None, limit=None): print("

Traceback (most recent call last):

") list = traceback.format_tb(tb, limit) + \ traceback.format_exception_only(type, value) - print("
%s%s
" % ( - html.escape("".join(list[:-1])), - html.escape(list[-1]), - )) + print( + f'
{html.escape("".join(list[:-1]))}{html.escape(list[-1])}
' + ) del tb def print_environ(environ=os.environ): @@ -903,10 +875,10 @@ def print_form(form): print("

No form fields.") print("

") for key in keys: - print("
" + html.escape(key) + ":", end=' ') + print(f"
{html.escape(key)}:", end=' ') value = form[key] - print("" + html.escape(repr(type(value))) + "") - print("
" + html.escape(repr(value))) + print(f"{html.escape(repr(type(value)))}") + print(f"
{html.escape(repr(value))}") print("
") print() diff --git a/Lib/cgitb.py b/Lib/cgitb.py index 4f81271be3..905ca4c284 100644 --- a/Lib/cgitb.py +++ b/Lib/cgitb.py @@ -44,22 +44,13 @@ def reset(): __UNDEF__ = [] # a special sentinel object def small(text): - if text: - return '' + text + '' - else: - return '' + return f'{text}' if text else '' def strong(text): - if text: - return '' + text + '' - else: - return '' + return f'{text}' if text else '' def grey(text): - if text: - return '' + text + '' - else: - return '' + return f'{text}' if text else '' def lookup(name, frame, locals): """Find the value for a given name in the given environment.""" @@ -72,9 +63,8 @@ def lookup(name, frame, locals): if type(builtins) is type({}): if name in builtins: return 'builtin', builtins[name] - else: - if hasattr(builtins, name): - return 'builtin', getattr(builtins, name) + elif hasattr(builtins, name): + return 'builtin', getattr(builtins, name) return None, __UNDEF__ def scanvars(reader, frame, locals): @@ -91,7 +81,7 @@ def scanvars(reader, frame, locals): where, value = lookup(token, frame, locals) vars.append((token, where, value)) elif token == '.': - prefix += lasttoken + '.' + prefix += f'{lasttoken}.' parent = value else: parent, prefix = None, '' @@ -103,14 +93,22 @@ def html(einfo, context=5): etype, evalue, etb = einfo if isinstance(etype, type): etype = etype.__name__ - pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + pyver = f'Python {sys.version.split()[0]}: {sys.executable}' date = time.ctime(time.time()) - head = '' + pydoc.html.heading( - '%s' % - strong(pydoc.html.escape(str(etype))), - '#ffffff', '#6622aa', pyver + '
' + date) + ''' + head = ( + ( + '' + + pydoc.html.heading( + f'{strong(pydoc.html.escape(str(etype)))}', + '#ffffff', + '#6622aa', + f'{pyver}
{date}', + ) + ) + + '''

A problem occurred in a Python script. Here is the sequence of function calls leading up to the error, in the order they occurred.

''' + ) indent = '' + small(' ' * 5) + ' ' frames = [] @@ -118,36 +116,41 @@ def html(einfo, context=5): for frame, file, lnum, func, lines, index in records: if file: file = os.path.abspath(file) - link = '%s' % (file, pydoc.html.escape(file)) + link = f'{pydoc.html.escape(file)}' else: file = link = '?' args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': - call = 'in ' + strong(pydoc.html.escape(func)) + call = f'in {strong(pydoc.html.escape(func))}' if func != "": - call += inspect.formatargvalues(args, varargs, varkw, locals, - formatvalue=lambda value: '=' + pydoc.html.repr(value)) + call += inspect.formatargvalues( + args, + varargs, + varkw, + locals, + formatvalue=lambda value: f'={pydoc.html.repr(value)}', + ) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 + vars = scanvars(reader, frame, locals) - rows = ['' % - (' ', link, call)] + rows = [f''] if index is not None: i = lnum - index for line in lines: num = small(' ' * (5-len(str(i))) + str(i)) + ' ' if i in highlight: - line = '=>%s%s' % (num, pydoc.html.preformat(line)) - rows.append('' % line) + line = f'=>{num}{pydoc.html.preformat(line)}' + rows.append(f'') else: - line = '  %s%s' % (num, pydoc.html.preformat(line)) - rows.append('' % grey(line)) + line = f'  {num}{pydoc.html.preformat(line)}' + rows.append(f'') i += 1 done, dump = {}, [] @@ -156,22 +159,23 @@ def reader(lnum=[lnum]): done[name] = 1 if value is not __UNDEF__: if where in ('global', 'builtin'): - name = ('%s ' % where) + strong(name) + name = f'{where} {strong(name)}' elif where == 'local': name = strong(name) else: name = where + strong(name.split('.')[-1]) - dump.append('%s = %s' % (name, pydoc.html.repr(value))) + dump.append(f'{name} = {pydoc.html.repr(value)}') else: - dump.append(name + ' undefined') + dump.append(f'{name} undefined') - rows.append('' % small(grey(', '.join(dump)))) + rows.append(f"") frames.append('''
%s
%s%s %s
 {link} {call}
%s
{line}
%s
{grey(line)}
%s
{small(grey(', '.join(dump)))}
%s
''' % '\n'.join(rows)) - exception = ['

%s: %s' % (strong(pydoc.html.escape(str(etype))), - pydoc.html.escape(str(evalue)))] + exception = [ + f'

{strong(pydoc.html.escape(str(etype)))}: {pydoc.html.escape(str(evalue))}' + ] for name in dir(evalue): if name[:1] == '_': continue value = pydoc.html.repr(getattr(evalue, name)) @@ -194,7 +198,7 @@ def text(einfo, context=5): etype, evalue, etb = einfo if isinstance(etype, type): etype = etype.__name__ - pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + pyver = f'Python {sys.version.split()[0]}: {sys.executable}' date = time.ctime(time.time()) head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + ''' A problem occurred in a Python script. Here is the sequence of @@ -208,19 +212,25 @@ def text(einfo, context=5): args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': - call = 'in ' + func + call = f'in {func}' if func != "": - call += inspect.formatargvalues(args, varargs, varkw, locals, - formatvalue=lambda value: '=' + pydoc.text.repr(value)) + call += inspect.formatargvalues( + args, + varargs, + varkw, + locals, + formatvalue=lambda value: f'={pydoc.text.repr(value)}', + ) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 + vars = scanvars(reader, frame, locals) - rows = [' %s %s' % (file, call)] + rows = [f' {file} {call}'] if index is not None: i = lnum - index for line in lines: @@ -233,16 +243,17 @@ def reader(lnum=[lnum]): if name in done: continue done[name] = 1 if value is not __UNDEF__: - if where == 'global': name = 'global ' + name + if where == 'global': + name = f'global {name}' elif where != 'local': name = where + name.split('.')[-1] - dump.append('%s = %s' % (name, pydoc.text.repr(value))) + dump.append(f'{name} = {pydoc.text.repr(value)}') else: - dump.append(name + ' undefined') + dump.append(f'{name} undefined') rows.append('\n'.join(dump)) frames.append('\n%s\n' % '\n'.join(rows)) - exception = ['%s: %s' % (str(etype), str(evalue))] + exception = [f'{str(etype)}: {str(evalue)}'] for name in dir(evalue): value = pydoc.text.repr(getattr(evalue, name)) exception.append('\n%s%s = %s' % (" "*4, name, value)) @@ -285,7 +296,7 @@ def handle(self, info=None): if self.display: if plain: doc = pydoc.html.escape(doc) - self.file.write('

' + doc + '
\n') + self.file.write(f'
{doc}' + '
\n') else: self.file.write(doc + '\n') else: @@ -298,9 +309,9 @@ def handle(self, info=None): try: with os.fdopen(fd, 'w') as file: file.write(doc) - msg = '%s contains the description of this error.' % path + msg = f'{path} contains the description of this error.' except: - msg = 'Tried to save traceback to %s, but failed.' % path + msg = f'Tried to save traceback to {path}, but failed.' if self.format == 'html': self.file.write('

%s

\n' % msg) diff --git a/Lib/chunk.py b/Lib/chunk.py index d94dd39807..517651b7b8 100644 --- a/Lib/chunk.py +++ b/Lib/chunk.py @@ -53,16 +53,13 @@ def __init__(self, file, align=True, bigendian=True, inclheader=False): import struct self.closed = False self.align = align # whether to align to word (2-byte) boundaries - if bigendian: - strflag = '>' - else: - strflag = '<' + strflag = '>' if bigendian else '<' self.file = file self.chunkname = file.read(4) if len(self.chunkname) < 4: raise EOFError try: - self.chunksize = struct.unpack_from(strflag+'L', file.read(4))[0] + self.chunksize = struct.unpack_from(f'{strflag}L', file.read(4))[0] except struct.error: raise EOFError if inclheader: @@ -131,13 +128,12 @@ def read(self, size=-1): return b'' if size < 0: size = self.chunksize - self.size_read - if size > self.chunksize - self.size_read: - size = self.chunksize - self.size_read + size = min(size, self.chunksize - self.size_read) data = self.file.read(size) self.size_read = self.size_read + len(data) if self.size_read == self.chunksize and \ - self.align and \ - (self.chunksize & 1): + self.align and \ + (self.chunksize & 1): dummy = self.file.read(1) self.size_read = self.size_read + len(dummy) return data diff --git a/Lib/cmd.py b/Lib/cmd.py index 859e91096d..5ceaab5b2e 100644 --- a/Lib/cmd.py +++ b/Lib/cmd.py @@ -84,14 +84,8 @@ def __init__(self, completekey='tab', stdin=None, stdout=None): sys.stdin and sys.stdout are used. """ - if stdin is not None: - self.stdin = stdin - else: - self.stdin = sys.stdin - if stdout is not None: - self.stdout = stdout - else: - self.stdout = sys.stdout + self.stdin = stdin if stdin is not None else sys.stdin + self.stdout = stdout if stdout is not None else sys.stdout self.cmdqueue = [] self.completekey = completekey @@ -108,7 +102,7 @@ def cmdloop(self, intro=None): import readline self.old_completer = readline.get_completer() readline.set_completer(self.complete) - readline.parse_and_bind(self.completekey+": complete") + readline.parse_and_bind(f"{self.completekey}: complete") except ImportError: pass try: @@ -120,20 +114,16 @@ def cmdloop(self, intro=None): while not stop: if self.cmdqueue: line = self.cmdqueue.pop(0) + elif self.use_rawinput: + try: + line = input(self.prompt) + except EOFError: + line = 'EOF' else: - if self.use_rawinput: - try: - line = input(self.prompt) - except EOFError: - line = 'EOF' - else: - self.stdout.write(self.prompt) - self.stdout.flush() - line = self.stdin.readline() - if not len(line): - line = 'EOF' - else: - line = line.rstrip('\r\n') + self.stdout.write(self.prompt) + self.stdout.flush() + line = self.stdin.readline() + line = 'EOF' if not len(line) else line.rstrip('\r\n') line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) @@ -178,14 +168,15 @@ def parseline(self, line): if not line: return None, None, line elif line[0] == '?': - line = 'help ' + line[1:] + line = f'help {line[1:]}' elif line[0] == '!': if hasattr(self, 'do_shell'): - line = 'shell ' + line[1:] + line = f'shell {line[1:]}' else: return None, None, line i, n = 0, len(line) - while i < n and line[i] in self.identchars: i = i+1 + while i < n and line[i] in self.identchars: + i += 1 cmd, arg = line[:i], line[i:].strip() return cmd, arg, line @@ -209,12 +200,11 @@ def onecmd(self, line): self.lastcmd = '' if cmd == '': return self.default(line) - else: - try: - func = getattr(self, 'do_' + cmd) - except AttributeError: - return self.default(line) - return func(arg) + try: + func = getattr(self, f'do_{cmd}') + except AttributeError: + return self.default(line) + return func(arg) def emptyline(self): """Called when an empty line is entered in response to the prompt. @@ -245,7 +235,7 @@ def completedefault(self, *ignored): return [] def completenames(self, text, *ignored): - dotext = 'do_'+text + dotext = f'do_{text}' return [a[3:] for a in self.get_names() if a.startswith(dotext)] def complete(self, text, state): @@ -267,7 +257,7 @@ def complete(self, text, state): compfunc = self.completedefault else: try: - compfunc = getattr(self, 'complete_' + cmd) + compfunc = getattr(self, f'complete_{cmd}') except AttributeError: compfunc = self.completedefault else: @@ -285,8 +275,7 @@ def get_names(self): def complete_help(self, *args): commands = set(self.completenames(*args)) - topics = set(a[5:] for a in self.get_names() - if a.startswith('help_' + args[0])) + topics = {a[5:] for a in self.get_names() if a.startswith(f'help_{args[0]}')} return list(commands | topics) def do_help(self, arg): @@ -294,11 +283,10 @@ def do_help(self, arg): if arg: # XXX check arg syntax try: - func = getattr(self, 'help_' + arg) + func = getattr(self, f'help_{arg}') except AttributeError: try: - doc=getattr(self, 'do_' + arg).__doc__ - if doc: + if doc := getattr(self, f'do_{arg}').__doc__: self.stdout.write("%s\n"%str(doc)) return except AttributeError: @@ -310,10 +298,7 @@ def do_help(self, arg): names = self.get_names() cmds_doc = [] cmds_undoc = [] - help = {} - for name in names: - if name[:5] == 'help_': - help[name[5:]]=1 + help = {name[5:]: 1 for name in names if name[:5] == 'help_'} names.sort() # There can be duplicates if routines overridden prevname = '' @@ -353,11 +338,12 @@ def columnize(self, list, displaywidth=80): self.stdout.write("\n") return - nonstrings = [i for i in range(len(list)) - if not isinstance(list[i], str)] - if nonstrings: - raise TypeError("list[i] not a string for i in %s" - % ", ".join(map(str, nonstrings))) + if nonstrings := [ + i for i in range(len(list)) if not isinstance(list[i], str) + ]: + raise TypeError( + f'list[i] not a string for i in {", ".join(map(str, nonstrings))}' + ) size = len(list) if size == 1: self.stdout.write('%s\n'%str(list[0])) @@ -389,13 +375,10 @@ def columnize(self, list, displaywidth=80): texts = [] for col in range(ncols): i = row + nrows*col - if i >= size: - x = "" - else: - x = list[i] + x = "" if i >= size else list[i] texts.append(x) while texts and not texts[-1]: del texts[-1] for col in range(len(texts)): texts[col] = texts[col].ljust(colwidths[col]) - self.stdout.write("%s\n"%str(" ".join(texts))) + self.stdout.write("%s\n" % " ".join(texts)) diff --git a/Lib/code.py b/Lib/code.py index 23295f4cf5..78b4f3b5e9 100644 --- a/Lib/code.py +++ b/Lib/code.py @@ -210,8 +210,8 @@ def interact(self, banner=None, exitmsg=None): sys.ps2 except AttributeError: sys.ps2 = "... " - cprt = 'Type "help", "copyright", "credits" or "license" for more information.' if banner is None: + cprt = 'Type "help", "copyright", "credits" or "license" for more information.' self.write("Python %s on %s\n%s\n(%s)\n" % (sys.version, sys.platform, cprt, self.__class__.__name__)) @@ -220,10 +220,7 @@ def interact(self, banner=None, exitmsg=None): more = 0 while 1: try: - if more: - prompt = sys.ps2 - else: - prompt = sys.ps1 + prompt = sys.ps2 if more else sys.ps1 try: line = self.raw_input(prompt) except EOFError: @@ -307,8 +304,5 @@ def interact(banner=None, readfunc=None, local=None, exitmsg=None): parser.add_argument('-q', action='store_true', help="don't print version and copyright messages") args = parser.parse_args() - if args.q or sys.flags.quiet: - banner = '' - else: - banner = None + banner = '' if args.q or sys.flags.quiet else None interact(banner) diff --git a/Lib/codecs.py b/Lib/codecs.py index e6ad6e3a05..04b5cb91ae 100644 --- a/Lib/codecs.py +++ b/Lib/codecs.py @@ -7,6 +7,7 @@ """ + import builtins import sys @@ -15,7 +16,7 @@ try: from _codecs import * except ImportError as why: - raise SystemError('Failed to load the builtin codecs: %s' % why) + raise SystemError(f'Failed to load the builtin codecs: {why}') __all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", @@ -492,10 +493,7 @@ def read(self, size=-1, chars=-1, firstline=False): if len(self.charbuffer) >= chars: break # we need more data - if size < 0: - newdata = self.stream.read() - else: - newdata = self.stream.read(size) + newdata = self.stream.read() if size < 0 else self.stream.read(size) # decode bytes (those remaining from the last call included) data = self.bytebuffer + newdata if not data: @@ -503,13 +501,12 @@ def read(self, size=-1, chars=-1, firstline=False): try: newchars, decodedbytes = self.decode(data, self.errors) except UnicodeDecodeError as exc: - if firstline: - newchars, decodedbytes = \ + if not firstline: + raise + newchars, decodedbytes = \ self.decode(data[:exc.start], self.errors) - lines = newchars.splitlines(keepends=True) - if len(lines)<=1: - raise - else: + lines = newchars.splitlines(keepends=True) + if len(lines)<=1: raise # keep undecoded bytes until the next call self.bytebuffer = data[decodedbytes:] @@ -561,12 +558,11 @@ def readline(self, size=None, keepends=True): # be a "\n") to get a proper line ending. If the stream is # temporarily exhausted we return the wrong line ending. if (isinstance(data, str) and data.endswith("\r")) or \ - (isinstance(data, bytes) and data.endswith(b"\r")): + (isinstance(data, bytes) and data.endswith(b"\r")): data += self.read(size=1, chars=1) line += data - lines = line.splitlines(keepends=True) - if lines: + if lines := line.splitlines(keepends=True): if len(lines) > 1: # More than one line result; the first line is a full line # to return @@ -588,11 +584,8 @@ def readline(self, size=None, keepends=True): if line0withend != line0withoutend: # We really have a line end # Put the rest back together and keep it until the next call self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \ - self.charbuffer - if keepends: - line = line0withend - else: - line = line0withoutend + self.charbuffer + line = line0withend if keepends else line0withoutend break # we didn't get anything or this was our only try if not data or size is not None: @@ -642,8 +635,7 @@ def seek(self, offset, whence=0): def __next__(self): """ Return the next decoded line from the input stream.""" - line = self.readline() - if line: + if line := self.readline(): return line raise StopIteration @@ -808,10 +800,7 @@ def read(self, size=-1): def readline(self, size=None): - if size is None: - data = self.reader.readline() - else: - data = self.reader.readline(size) + data = self.reader.readline() if size is None else self.reader.readline(size) data, bytesencoded = self.encode(data, self.errors) return data @@ -901,7 +890,7 @@ def open(filename, mode='r', encoding=None, errors='strict', buffering=-1): if encoding is not None and \ 'b' not in mode: # Force opening of the file in binary mode - mode = mode + 'b' + mode = f'{mode}b' file = builtins.open(filename, mode, buffering) if encoding is None: return file @@ -1033,11 +1022,9 @@ def iterencode(iterator, encoding, errors='strict', **kwargs): """ encoder = getincrementalencoder(encoding)(errors, **kwargs) for input in iterator: - output = encoder.encode(input) - if output: + if output := encoder.encode(input): yield output - output = encoder.encode("", True) - if output: + if output := encoder.encode("", True): yield output def iterdecode(iterator, encoding, errors='strict', **kwargs): @@ -1051,11 +1038,9 @@ def iterdecode(iterator, encoding, errors='strict', **kwargs): """ decoder = getincrementaldecoder(encoding)(errors, **kwargs) for input in iterator: - output = decoder.decode(input) - if output: + if output := decoder.decode(input): yield output - output = decoder.decode(b"", True) - if output: + if output := decoder.decode(b"", True): yield output ### Helpers for charmap-based codecs @@ -1085,10 +1070,7 @@ def make_encoding_map(decoding_map): """ m = {} for k,v in decoding_map.items(): - if not v in m: - m[v] = k - else: - m[v] = None + m[v] = k if v not in m else None return m ### error handlers diff --git a/Lib/colorsys.py b/Lib/colorsys.py index 12b432537b..63aeafaff6 100644 --- a/Lib/colorsys.py +++ b/Lib/colorsys.py @@ -48,18 +48,12 @@ def yiq_to_rgb(y, i, q): g = y - 0.27478764629897834*i - 0.6356910791873801*q b = y - 1.1085450346420322*i + 1.7090069284064666*q - if r < 0.0: - r = 0.0 - if g < 0.0: - g = 0.0 - if b < 0.0: - b = 0.0 - if r > 1.0: - r = 1.0 - if g > 1.0: - g = 1.0 - if b > 1.0: - b = 1.0 + r = max(r, 0.0) + g = max(g, 0.0) + b = max(b, 0.0) + r = min(r, 1.0) + g = min(g, 1.0) + b = min(b, 1.0) return (r, g, b) @@ -94,10 +88,7 @@ def rgb_to_hls(r, g, b): def hls_to_rgb(h, l, s): if s == 0.0: return l, l, l - if l <= 0.5: - m2 = l * (1.0+s) - else: - m2 = l+s-(l*s) + m2 = l * (1.0+s) if l <= 0.5 else l+s-(l*s) m1 = 2.0*l - m2 return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) @@ -107,9 +98,7 @@ def _v(m1, m2, hue): return m1 + (m2-m1)*hue*6.0 if hue < 0.5: return m2 - if hue < TWO_THIRD: - return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 - return m1 + return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 if hue < TWO_THIRD else m1 # HSV: Hue, Saturation, Value @@ -144,7 +133,7 @@ def hsv_to_rgb(h, s, v): p = v*(1.0 - s) q = v*(1.0 - s*f) t = v*(1.0 - s*(1.0-f)) - i = i%6 + i %= 6 if i == 0: return v, t, p if i == 1: diff --git a/Lib/compileall.py b/Lib/compileall.py index 1c9ceb6930..f8bf87846a 100644 --- a/Lib/compileall.py +++ b/Lib/compileall.py @@ -40,10 +40,7 @@ def _walk_dir(dir, ddir=None, maxlevels=10, quiet=0): if name == '__pycache__': continue fullname = os.path.join(dir, name) - if ddir is not None: - dfile = os.path.join(ddir, name) - else: - dfile = None + dfile = os.path.join(ddir, name) if ddir is not None else None if not os.path.isdir(fullname): yield fullname elif (maxlevels > 0 and name != os.curdir and name != os.pardir and @@ -110,17 +107,13 @@ def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, if quiet < 2 and isinstance(fullname, os.PathLike): fullname = os.fspath(fullname) name = os.path.basename(fullname) - if ddir is not None: - dfile = os.path.join(ddir, name) - else: - dfile = None + dfile = os.path.join(ddir, name) if ddir is not None else None if rx is not None: - mo = rx.search(fullname) - if mo: + if mo := rx.search(fullname): return success if os.path.isfile(fullname): if legacy: - cfile = fullname + 'c' + cfile = f'{fullname}c' else: if optimize >= 0: opt = optimize if optimize >= 1 else '' @@ -168,7 +161,7 @@ def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, print('*** Error compiling {!r}...'.format(fullname)) else: print('*** ', end='') - print(e.__class__.__name__ + ':', e) + print(f'{e.__class__.__name__}:', e) else: if ok == 0: success = False @@ -247,11 +240,7 @@ def main(): args.rx = re.compile(args.rx) - if args.recursion is not None: - maxlevels = args.recursion - else: - maxlevels = args.maxlevels - + maxlevels = args.recursion if args.recursion is not None else args.maxlevels # if flist is provided then load it if args.flist: try: @@ -260,7 +249,7 @@ def main(): compile_dests.append(line.strip()) except OSError: if args.quiet < 2: - print("Error reading file list {}".format(args.flist)) + print(f"Error reading file list {args.flist}") return False if args.workers is not None: @@ -268,21 +257,19 @@ def main(): success = True try: - if compile_dests: - for dest in compile_dests: - if os.path.isfile(dest): - if not compile_file(dest, args.ddir, args.force, args.rx, - args.quiet, args.legacy): - success = False - else: - if not compile_dir(dest, maxlevels, args.ddir, - args.force, args.rx, args.quiet, - args.legacy, workers=args.workers): - success = False - return success - else: + if not compile_dests: return compile_path(legacy=args.legacy, force=args.force, quiet=args.quiet) + for dest in compile_dests: + if os.path.isfile(dest): + if not compile_file(dest, args.ddir, args.force, args.rx, + args.quiet, args.legacy): + success = False + elif not compile_dir(dest, maxlevels, args.ddir, + args.force, args.rx, args.quiet, + args.legacy, workers=args.workers): + success = False + return success except KeyboardInterrupt: if args.quiet < 2: print("\n[interrupted]") diff --git a/Lib/configparser.py b/Lib/configparser.py index df2d7e335d..b6757f0e02 100644 --- a/Lib/configparser.py +++ b/Lib/configparser.py @@ -561,10 +561,7 @@ def before_set(self, parser, section, option, value): @staticmethod def _interpolation_replace(match, parser): s = match.group(1) - if s is None: - return match.group() - else: - return "%%(%s)s" % parser.optionxform(s) + return match.group() if s is None else "%%(%s)s" % parser.optionxform(s) class RawConfigParser(MutableMapping): @@ -928,7 +925,7 @@ def write(self, fp, space_around_delimiters=True): preserved when writing the configuration back. """ if space_around_delimiters: - d = " {} ".format(self._delimiters[0]) + d = f" {self._delimiters[0]} " else: d = self._delimiters[0] if self._defaults: @@ -940,7 +937,7 @@ def write(self, fp, space_around_delimiters=True): def _write_section(self, fp, section_name, section_items, delimiter): """Write a single section to the specified `fp`.""" - fp.write("[{}]\n".format(section_name)) + fp.write(f"[{section_name}]\n") for key, value in section_items: value = self._interpolation.before_write(self, section_name, key, value) @@ -948,7 +945,7 @@ def _write_section(self, fp, section_name, section_items, delimiter): value = delimiter + str(value).replace('\n', '\n\t') else: value = "" - fp.write("{}{}\n".format(key, value)) + fp.write(f"{key}{value}\n") fp.write("\n") def remove_option(self, section, option): @@ -1074,12 +1071,9 @@ def _read(self, fp, fpname): if (cursect is not None and optname and cur_indent_level > indent_level): cursect[optname].append(value) - # a section header or option header? else: indent_level = cur_indent_level - # is it a section header? - mo = self.SECTCRE.match(value) - if mo: + if mo := self.SECTCRE.match(value): sectname = mo.group('header') if sectname in self._sections: if self._strict and sectname in elements_added: @@ -1096,13 +1090,10 @@ def _read(self, fp, fpname): elements_added.add(sectname) # So sections can't start with a continuation line optname = None - # no section header in the file? elif cursect is None: raise MissingSectionHeaderError(fpname, lineno, line) - # an option line? else: - mo = self._optcre.match(value) - if mo: + if mo := self._optcre.match(value): optname, vi, optval = mo.group('option', 'vi', 'value') if not optname: e = self._handle_error(e, fpname, lineno, line) @@ -1179,7 +1170,7 @@ def _convert_to_boolean(self, value): """Return a boolean value translating from other types if necessary. """ if value.lower() not in self.BOOLEAN_STATES: - raise ValueError('Not a boolean: %s' % value) + raise ValueError(f'Not a boolean: {value}') return self.BOOLEAN_STATES[value.lower()] def _validate_value_types(self, *, section="", option="", value=""): @@ -1261,12 +1252,12 @@ def __init__(self, parser, name): self._parser = parser self._name = name for conv in parser.converters: - key = 'get' + conv + key = f'get{conv}' getter = functools.partial(self.get, _impl=getattr(parser, key)) setattr(self, key, getter) def __repr__(self): - return ''.format(self._name) + return f'' def __getitem__(self, key): if not self._parser.has_option(self._name, key): @@ -1347,10 +1338,9 @@ def __getitem__(self, key): def __setitem__(self, key, value): try: - k = 'get' + key + k = f'get{key}' except TypeError: - raise ValueError('Incompatible key: {} (type: {})' - ''.format(key, type(key))) + raise ValueError(f'Incompatible key: {key} (type: {type(key)})') if k == 'get': raise ValueError('Incompatible key: cannot use "" as a name') self._data[key] = value diff --git a/Lib/contextlib.py b/Lib/contextlib.py index 58e9a49887..831994609c 100644 --- a/Lib/contextlib.py +++ b/Lib/contextlib.py @@ -710,11 +710,7 @@ def _fix_exception_context(new_exc, old_exc): while self._exit_callbacks: is_sync, cb = self._exit_callbacks.pop() try: - if is_sync: - cb_suppress = cb(*exc_details) - else: - cb_suppress = await cb(*exc_details) - + cb_suppress = cb(*exc_details) if is_sync else await cb(*exc_details) if cb_suppress: suppressed_exc = True pending_raise = False diff --git a/Lib/copy.py b/Lib/copy.py index 1b276afe08..10aeb900b8 100644 --- a/Lib/copy.py +++ b/Lib/copy.py @@ -90,16 +90,12 @@ def copy(x): reductor = getattr(x, "__reduce_ex__", None) if reductor is not None: rv = reductor(4) + elif reductor := getattr(x, "__reduce__", None): + rv = reductor() else: - reductor = getattr(x, "__reduce__", None) - if reductor: - rv = reductor() - else: - raise Error("un(shallow)copyable object of type %s" % cls) + raise Error(f"un(shallow)copyable object of type {cls}") - if isinstance(rv, str): - return x - return _reconstruct(x, None, *rv) + return x if isinstance(rv, str) else _reconstruct(x, None, *rv) _copy_dispatch = d = {} @@ -144,33 +140,24 @@ def deepcopy(x, memo=None, _nil=[]): copier = _deepcopy_dispatch.get(cls) if copier is not None: y = copier(x, memo) + elif issubclass(cls, type): + y = _deepcopy_atomic(x, memo) else: - if issubclass(cls, type): - y = _deepcopy_atomic(x, memo) - else: - copier = getattr(x, "__deepcopy__", None) - if copier is not None: - y = copier(memo) + copier = getattr(x, "__deepcopy__", None) + if copier is None: + if reductor := dispatch_table.get(cls): + rv = reductor(x) else: - reductor = dispatch_table.get(cls) - if reductor: - rv = reductor(x) - else: - reductor = getattr(x, "__reduce_ex__", None) - if reductor is not None: - rv = reductor(4) - else: - reductor = getattr(x, "__reduce__", None) - if reductor: - rv = reductor() - else: - raise Error( - "un(deep)copyable object of type %s" % cls) - if isinstance(rv, str): - y = x + reductor = getattr(x, "__reduce_ex__", None) + if reductor is not None: + rv = reductor(4) + elif reductor := getattr(x, "__reduce__", None): + rv = reductor() else: - y = _reconstruct(x, memo, *rv) - + raise Error(f"un(deep)copyable object of type {cls}") + y = x if isinstance(rv, str) else _reconstruct(x, memo, *rv) + else: + y = copier(memo) # If is its own copy, don't memoize. if y is not x: memo[d] = y @@ -283,22 +270,16 @@ def _reconstruct(x, memo, func, args, setattr(y, key, value) if listiter is not None: - if deep: - for item in listiter: + for item in listiter: + if deep: item = deepcopy(item, memo) - y.append(item) - else: - for item in listiter: - y.append(item) + y.append(item) if dictiter is not None: - if deep: - for key, value in dictiter: + for key, value in dictiter: + if deep: key = deepcopy(key, memo) value = deepcopy(value, memo) - y[key] = value - else: - for key, value in dictiter: - y[key] = value + y[key] = value return y del types, weakref, PyStringMap diff --git a/Lib/copyreg.py b/Lib/copyreg.py index dfc463c49a..2934ff0cb2 100644 --- a/Lib/copyreg.py +++ b/Lib/copyreg.py @@ -61,9 +61,9 @@ def _reduce_ex(self, proto): base = object # not really reachable if base is object: state = None + elif base is cls: + raise TypeError(f"cannot pickle {cls.__name__!r} object") else: - if base is cls: - raise TypeError(f"cannot pickle {cls.__name__!r} object") state = base(self) args = (cls, base, state) try: @@ -80,10 +80,7 @@ def _reduce_ex(self, proto): dict = None else: dict = getstate() - if dict: - return _reconstructor, args, dict - else: - return _reconstructor, args + return (_reconstructor, args, dict) if dict else (_reconstructor, args) # Helper for __reduce_ex__ protocol 2 @@ -114,10 +111,7 @@ class found there. (This assumes classes don't modify their # Not cached -- calculate the value names = [] - if not hasattr(cls, "__slots__"): - # This class has no slots - pass - else: + if hasattr(cls, "__slots__"): # Slots found -- gather slot names from all base classes for c in cls.__mro__: if "__slots__" in c.__dict__: @@ -129,11 +123,9 @@ class found there. (This assumes classes don't modify their # special descriptors if name in ("__dict__", "__weakref__"): continue - # mangled names elif name.startswith('__') and not name.endswith('__'): - stripped = c.__name__.lstrip('_') - if stripped: - names.append('_%s%s' % (stripped, name)) + if stripped := c.__name__.lstrip('_'): + names.append(f'_{stripped}{name}') else: names.append(name) else: @@ -172,11 +164,13 @@ def add_extension(module, name, code): _inverted_registry.get(code) == key): return # Redundant registrations are benign if key in _extension_registry: - raise ValueError("key %s is already registered with code %s" % - (key, _extension_registry[key])) + raise ValueError( + f"key {key} is already registered with code {_extension_registry[key]}" + ) if code in _inverted_registry: - raise ValueError("code %s is already in use for key %s" % - (code, _inverted_registry[code])) + raise ValueError( + f"code {code} is already in use for key {_inverted_registry[code]}" + ) _extension_registry[key] = code _inverted_registry[code] = key @@ -185,8 +179,7 @@ def remove_extension(module, name, code): key = (module, name) if (_extension_registry.get(key) != code or _inverted_registry.get(code) != key): - raise ValueError("key %s is not registered with code %s" % - (key, code)) + raise ValueError(f"key {key} is not registered with code {code}") del _extension_registry[key] del _inverted_registry[code] if code in _extension_cache: diff --git a/Lib/csv.py b/Lib/csv.py index 2f38bb1a19..45b0003012 100644 --- a/Lib/csv.py +++ b/Lib/csv.py @@ -128,8 +128,7 @@ def __init__(self, f, fieldnames, restval="", extrasaction="raise", self.fieldnames = fieldnames # list of keys for the dict self.restval = restval # for writing short dicts if extrasaction.lower() not in ("raise", "ignore"): - raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'" - % extrasaction) + raise ValueError(f"extrasaction ({extrasaction}) must be 'raise' or 'ignore'") self.extrasaction = extrasaction self.writer = writer(f, dialect, *args, **kwds) @@ -139,8 +138,7 @@ def writeheader(self): def _dict_to_list(self, rowdict): if self.extrasaction == "raise": - wrong_fields = rowdict.keys() - self.fieldnames - if wrong_fields: + if wrong_fields := rowdict.keys() - self.fieldnames: raise ValueError("dict contains fields not in fieldnames: " + ", ".join([repr(x) for x in wrong_fields])) return (rowdict.get(key, self.restval) for key in self.fieldnames) @@ -260,15 +258,11 @@ def _guess_quote_and_delimiter(self, data, delimiters): # double quoted format dq_regexp = re.compile( r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \ - {'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE) + {'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE) - if dq_regexp.search(data): - doublequote = True - else: - doublequote = False - + doublequote = bool(dq_regexp.search(data)) return (quotechar, doublequote, delim, skipinitialspace) @@ -302,6 +296,8 @@ def _guess_delimiter(self, data, delimiters): modes = {} delims = {} start, end = 0, chunkLength + # minimum consistency threshold + threshold = 0.9 while start < len(data): iteration += 1 for line in data[start:end]: @@ -313,7 +309,7 @@ def _guess_delimiter(self, data, delimiters): metaFrequency[freq] = metaFrequency.get(freq, 0) + 1 charFrequency[char] = metaFrequency - for char in charFrequency.keys(): + for char in charFrequency: items = list(charFrequency[char].items()) if len(items) == 1 and items[0][0] == 0: continue @@ -333,9 +329,7 @@ def _guess_delimiter(self, data, delimiters): total = float(min(chunkLength * iteration, len(data))) # (rows of consistent data) / (number of rows) = 100% consistency = 1.0 - # minimum consistency threshold - threshold = 0.9 - while len(delims) == 0 and consistency >= threshold: + while not delims and consistency >= threshold: for k, v in modeList: if v[0] > 0 and v[1] > 0: if ((v[1]/total) >= consistency and @@ -359,7 +353,7 @@ def _guess_delimiter(self, data, delimiters): # if there's more than one, fall back to a 'preferred' list if len(delims) > 1: for d in self.preferred: - if d in delims.keys(): + if d in delims: skipinitialspace = (data[0].count(d) == data[0].count("%c " % d)) return (d, skipinitialspace) @@ -390,9 +384,7 @@ def has_header(self, sample): header = next(rdr) # assume first row is header columns = len(header) - columnTypes = {} - for i in range(columns): columnTypes[i] = None - + columnTypes = {i: None for i in range(columns)} checked = 0 for row in rdr: # arbitrary number of rows to check, to keep it sane diff --git a/Lib/dataclasses.py b/Lib/dataclasses.py index e1687a117d..937837331d 100644 --- a/Lib/dataclasses.py +++ b/Lib/dataclasses.py @@ -303,8 +303,7 @@ def __repr__(self): # with the default value, so the end result is a descriptor that # had __set_name__ called on it at the right time. def __set_name__(self, owner, name): - func = getattr(type(self.default), '__set_name__', None) - if func: + if func := getattr(type(self.default), '__set_name__', None): # There is a __set_name__ method on the descriptor, call # it. func(self.default, owner, name) @@ -451,49 +450,27 @@ def _field_init(f, frozen, globals, self_name, slots): default_name = f'_dflt_{f.name}' if f.default_factory is not MISSING: - if f.init: - # This field has a default factory. If a parameter is - # given, use it. If not, call the factory. - globals[default_name] = f.default_factory - value = (f'{default_name}() ' - f'if {f.name} is _HAS_DEFAULT_FACTORY ' - f'else {f.name}') - else: - # This is a field that's not in the __init__ params, but - # has a default factory function. It needs to be - # initialized here by calling the factory function, - # because there's no other way to initialize it. - - # For a field initialized with a default=defaultvalue, the - # class dict just has the default value - # (cls.fieldname=defaultvalue). But that won't work for a - # default factory, the factory must be called in __init__ - # and we must assign that to self.fieldname. We can't - # fall back to the class dict's value, both because it's - # not set, and because it might be different per-class - # (which, after all, is why we have a factory function!). - - globals[default_name] = f.default_factory - value = f'{default_name}()' + value = ( + f'{default_name}() if {f.name} is _HAS_DEFAULT_FACTORY else {f.name}' + if f.init + else f'{default_name}()' + ) + # This field has a default factory. If a parameter is + # given, use it. If not, call the factory. + globals[default_name] = f.default_factory + elif f.init: + if f.default is not MISSING: + globals[default_name] = f.default + # There's no default, just do an assignment. + value = f.name + elif slots and f.default is not MISSING: + globals[default_name] = f.default + value = default_name else: - # No default factory. - if f.init: - if f.default is MISSING: - # There's no default, just do an assignment. - value = f.name - elif f.default is not MISSING: - globals[default_name] = f.default - value = f.name - else: - # If the class has slots, then initialize this field. - if slots and f.default is not MISSING: - globals[default_name] = f.default - value = default_name - else: - # This field does not need initialization: reading from it will - # just use the class attribute that contains the default. - # Signify that to the caller by returning None. - return None + # This field does not need initialization: reading from it will + # just use the class attribute that contains the default. + # Signify that to the caller by returning None. + return None # Only test this now, so that we can create variables for the # default. However, return None to signify that we're not going @@ -518,7 +495,7 @@ def _init_param(f): # There's a default, this will be the name that's used to look # it up. default = f'=_dflt_{f.name}' - elif f.default_factory is not MISSING: + else: # There's a factory function. Set a marker. default = '=_HAS_DEFAULT_FACTORY' return f'{f.name}:_type_{f.name}{default}' @@ -538,25 +515,20 @@ def _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init, for f in std_fields: # Only consider the non-kw-only fields in the __init__ call. if f.init: - if not (f.default is MISSING and f.default_factory is MISSING): + if f.default is not MISSING or f.default_factory is not MISSING: seen_default = True elif seen_default: raise TypeError(f'non-default argument {f.name!r} ' 'follows default argument') - locals = {f'_type_{f.name}': f.type for f in fields} - locals.update({ + locals = {f'_type_{f.name}': f.type for f in fields} | { 'MISSING': MISSING, '_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY, '__dataclass_builtins_object__': object, - }) - + } body_lines = [] for f in fields: - line = _field_init(f, frozen, locals, self_name, slots) - # line is None means that this field doesn't require - # initialization (it's a pseudo-field). Just skip it. - if line: + if line := _field_init(f, frozen, locals, self_name, slots): body_lines.append(line) # Does this class have a post-init function? @@ -603,21 +575,27 @@ def _frozen_get_del_attr(cls, fields, globals): else: # Special case for the zero-length tuple. fields_str = '()' - return (_create_fn('__setattr__', - ('self', 'name', 'value'), - (f'if type(self) is cls or name in {fields_str}:', - ' raise FrozenInstanceError(f"cannot assign to field {name!r}")', - f'super(cls, self).__setattr__(name, value)'), - locals=locals, - globals=globals), - _create_fn('__delattr__', - ('self', 'name'), - (f'if type(self) is cls or name in {fields_str}:', - ' raise FrozenInstanceError(f"cannot delete field {name!r}")', - f'super(cls, self).__delattr__(name)'), - locals=locals, - globals=globals), - ) + return _create_fn( + '__setattr__', + ('self', 'name', 'value'), + ( + f'if type(self) is cls or name in {fields_str}:', + ' raise FrozenInstanceError(f"cannot assign to field {name!r}")', + 'super(cls, self).__setattr__(name, value)', + ), + locals=locals, + globals=globals, + ), _create_fn( + '__delattr__', + ('self', 'name'), + ( + f'if type(self) is cls or name in {fields_str}:', + ' raise FrozenInstanceError(f"cannot delete field {name!r}")', + 'super(cls, self).__delattr__(name)', + ), + locals=locals, + globals=globals, + ) def _cmp_fn(name, op, self_tuple, other_tuple, globals): @@ -661,59 +639,17 @@ def _is_kw_only(a_type, dataclasses): def _is_type(annotation, cls, a_module, a_type, is_type_predicate): - # Given a type annotation string, does it refer to a_type in - # a_module? For example, when checking that annotation denotes a - # ClassVar, then a_module is typing, and a_type is - # typing.ClassVar. - - # It's possible to look up a_module given a_type, but it involves - # looking in sys.modules (again!), and seems like a waste since - # the caller already knows a_module. - - # - annotation is a string type annotation - # - cls is the class that this annotation was found in - # - a_module is the module we want to match - # - a_type is the type in that module we want to match - # - is_type_predicate is a function called with (obj, a_module) - # that determines if obj is of the desired type. - - # Since this test does not do a local namespace lookup (and - # instead only a module (global) lookup), there are some things it - # gets wrong. - - # With string annotations, cv0 will be detected as a ClassVar: - # CV = ClassVar - # @dataclass - # class C0: - # cv0: CV - - # But in this example cv1 will not be detected as a ClassVar: - # @dataclass - # class C1: - # CV = ClassVar - # cv1: CV - - # In C1, the code in this function (_is_type) will look up "CV" in - # the module and not find it, so it will not consider cv1 as a - # ClassVar. This is a fairly obscure corner case, and the best - # way to fix it would be to eval() the string "CV" with the - # correct global and local namespaces. However that would involve - # a eval() penalty for every single field of every dataclass - # that's defined. It was judged not worth it. - - match = _MODULE_IDENTIFIER_RE.match(annotation) - if match: + if match := _MODULE_IDENTIFIER_RE.match(annotation): ns = None - module_name = match.group(1) - if not module_name: - # No module name, assume the class's module did - # "from dataclasses import InitVar". - ns = sys.modules.get(cls.__module__).__dict__ - else: + if module_name := match.group(1): # Look up module_name in the class's module. module = sys.modules.get(cls.__module__) if module and module.__dict__.get(module_name) is a_module: ns = sys.modules.get(a_type.__module__).__dict__ + else: + # No module name, assume the class's module did + # "from dataclasses import InitVar". + ns = sys.modules.get(cls.__module__).__dict__ if ns and is_type_predicate(ns.get(match.group(2)), a_module): return True return False @@ -745,22 +681,7 @@ def _get_field(cls, a_name, a_type, default_kw_only): # is just a normal field. f._field_type = _FIELD - # In addition to checking for actual types here, also check for - # string annotations. get_type_hints() won't always work for us - # (see https://github.com/python/typing/issues/508 for example), - # plus it's expensive and would require an eval for every string - # annotation. So, make a best effort to see if this is a ClassVar - # or InitVar using regex's and checking that the thing referenced - # is actually of the correct type. - - # For the complete discussion, see https://bugs.python.org/issue33453 - - # If typing has not been imported, then it's impossible for any - # annotation to be a ClassVar. So, only look for ClassVar if - # typing has been imported by any module (not necessarily cls's - # module). - typing = sys.modules.get('typing') - if typing: + if typing := sys.modules.get('typing'): if (_is_classvar(a_type, typing) or (isinstance(f.type, str) and _is_type(f.type, cls, typing, typing.ClassVar, @@ -975,7 +896,7 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, # Do we have any Field members that don't also have annotations? for name, value in cls.__dict__.items(): - if isinstance(value, Field) and not name in cls_annotations: + if isinstance(value, Field) and name not in cls_annotations: raise TypeError(f'{name!r} is a field but has no type annotation') # Check rules that apply if we are derived from any dataclasses. @@ -1000,8 +921,9 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, # that such a __hash__ == None was not auto-generated, but it # close enough. class_hash = cls.__dict__.get('__hash__', MISSING) - has_explicit_hash = not (class_hash is MISSING or - (class_hash is None and '__eq__' in cls.__dict__)) + has_explicit_hash = class_hash is not MISSING and ( + class_hash is not None or '__eq__' not in cls.__dict__ + ) # If we're generating ordering methods, we must be generating the # eq methods. @@ -1077,12 +999,9 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, raise TypeError(f'Cannot overwrite attribute {fn.__name__} ' f'in class {cls.__name__}') - # Decide if/how we're going to create a hash function. - hash_action = _hash_action[bool(unsafe_hash), - bool(eq), - bool(frozen), - has_explicit_hash] - if hash_action: + if hash_action := _hash_action[ + bool(unsafe_hash), bool(eq), bool(frozen), has_explicit_hash + ]: # No need to call _set_new_attribute here, since by the time # we're here the overwriting is unconditional. cls.__hash__ = hash_action(cls, field_list, globals) @@ -1176,12 +1095,7 @@ def wrap(cls): frozen, match_args, kw_only, slots) # See if we're being called as @dataclass or @dataclass(). - if cls is None: - # We're called with parens. - return wrap - - # We're called as @dataclass without parens. - return wrap(cls) + return wrap if cls is None else wrap(cls) def fields(class_or_instance): diff --git a/Lib/datetime.py b/Lib/datetime.py index 353e48b68c..43a664dbbe 100644 --- a/Lib/datetime.py +++ b/Lib/datetime.py @@ -51,9 +51,7 @@ def _days_before_year(year): def _days_in_month(year, month): "year, month -> number of days in that month in that year." assert 1 <= month <= 12, month - if month == 2 and _is_leap(year): - return 29 - return _DAYS_IN_MONTH[month] + return 29 if month == 2 and _is_leap(year) else _DAYS_IN_MONTH[month] def _days_before_month(year, month): "year, month -> number of days in year preceding first day of month." @@ -193,8 +191,8 @@ def _format_offset(off): if ss or ss.microseconds: s += ":%02d" % ss.seconds - if ss.microseconds: - s += '.%06d' % ss.microseconds + if ss.microseconds: + s += '.%06d' % ss.microseconds return s # Correctly substitute for %z and %Z escapes in strftime formats. @@ -233,8 +231,7 @@ def _wrap_strftime(object, format, timetuple): h, rest = divmod(offset, timedelta(hours=1)) m, rest = divmod(rest, timedelta(minutes=1)) s = rest.seconds - u = offset.microseconds - if u: + if u := offset.microseconds: zreplace = '%c%02d%02d%02d.%06d' % (sign, h, m, s, u) elif s: zreplace = '%c%02d%02d%02d' % (sign, h, m, s) @@ -265,18 +262,16 @@ def _wrap_strftime(object, format, timetuple): def _parse_isoformat_date(dtstr): # It is assumed that this function will only be called with a # string of length exactly 10, and (though this is not used) ASCII-only - year = int(dtstr[0:4]) + year = int(dtstr[:4]) if dtstr[4] != '-': - raise ValueError('Invalid date separator: %s' % dtstr[4]) - - month = int(dtstr[5:7]) + raise ValueError(f'Invalid date separator: {dtstr[4]}') if dtstr[7] != '-': raise ValueError('Invalid date separator') - day = int(dtstr[8:10]) + month = int(dtstr[5:7]) - return [year, month, day] + return [year, month, int(dtstr[8:10])] def _parse_hh_mm_ss_ff(tstr): # Parses things of the form HH[:MM[:SS[.fff[fff]]]] @@ -304,16 +299,15 @@ def _parse_hh_mm_ss_ff(tstr): if pos < len_str: if tstr[pos] != '.': raise ValueError('Invalid microsecond component') - else: - pos += 1 + pos += 1 - len_remainder = len_str - pos - if len_remainder not in (3, 6): - raise ValueError('Invalid microsecond component') + len_remainder = len_str - pos + if len_remainder not in (3, 6): + raise ValueError('Invalid microsecond component') - time_comps[3] = int(tstr[pos:]) - if len_remainder == 3: - time_comps[3] *= 1000 + time_comps[3] = int(tstr[pos:]) + if len_remainder == 3: + time_comps[3] *= 1000 return time_comps @@ -416,8 +410,7 @@ def _check_tzinfo_arg(tz): raise TypeError("tzinfo argument must be None or of a tzinfo subclass") def _cmperror(x, y): - raise TypeError("can't compare '%s' to '%s'" % ( - type(x).__name__, type(y).__name__)) + raise TypeError(f"can't compare '{type(x).__name__}' to '{type(y).__name__}'") def _divide_and_round(a, b): """divide a by b and round result to the nearest integer @@ -524,16 +517,13 @@ def __new__(cls, days=0, seconds=0, microseconds=0, if isinstance(microseconds, float): microseconds = round(microseconds + usdouble) seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += seconds else: microseconds = int(microseconds) seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += seconds microseconds = round(microseconds + usdouble) + days, seconds = divmod(seconds, 24*3600) + d += days + s += seconds assert isinstance(s, int) assert isinstance(microseconds, int) assert abs(s) <= 3 * 24 * 3600 @@ -569,9 +559,7 @@ def __repr__(self): args.append("microseconds=%d" % self._microseconds) if not args: args.append('0') - return "%s.%s(%s)" % (self.__class__.__module__, - self.__class__.__qualname__, - ', '.join(args)) + return f"{self.__class__.__module__}.{self.__class__.__qualname__}({', '.join(args)})" def __str__(self): mm, ss = divmod(self._seconds, 60) @@ -627,9 +615,7 @@ def __sub__(self, other): return NotImplemented def __rsub__(self, other): - if isinstance(other, timedelta): - return -self + other - return NotImplemented + return -self + other if isinstance(other, timedelta) else NotImplemented def __neg__(self): # for CPython compatibility, we cannot use @@ -642,10 +628,7 @@ def __pos__(self): return self def __abs__(self): - if self._days < 0: - return -self - else: - return self + return -self if self._days < 0 else self def __mul__(self, other): if isinstance(other, int): @@ -715,10 +698,7 @@ def __le__(self, other): return NotImplemented def __lt__(self, other): - if isinstance(other, timedelta): - return self._cmp(other) < 0 - else: - return NotImplemented + return self._cmp(other) < 0 if isinstance(other, timedelta) else NotImplemented def __ge__(self, other): if isinstance(other, timedelta): @@ -727,10 +707,7 @@ def __ge__(self, other): return NotImplemented def __gt__(self, other): - if isinstance(other, timedelta): - return self._cmp(other) > 0 - else: - return NotImplemented + return self._cmp(other) > 0 if isinstance(other, timedelta) else NotImplemented def _cmp(self, other): assert isinstance(other, timedelta) @@ -930,10 +907,8 @@ def strftime(self, fmt): def __format__(self, fmt): if not isinstance(fmt, str): - raise TypeError("must be str, not %s" % type(fmt).__name__) - if len(fmt) != 0: - return self.strftime(fmt) - return str(self) + raise TypeError(f"must be str, not {type(fmt).__name__}") + return self.strftime(fmt) if len(fmt) != 0 else str(self) def isoformat(self): """Return the date formatted according to ISO. @@ -993,29 +968,19 @@ def replace(self, year=None, month=None, day=None): # Comparisons of date objects with other. def __eq__(self, other): - if isinstance(other, date): - return self._cmp(other) == 0 - return NotImplemented + return self._cmp(other) == 0 if isinstance(other, date) else NotImplemented def __le__(self, other): - if isinstance(other, date): - return self._cmp(other) <= 0 - return NotImplemented + return self._cmp(other) <= 0 if isinstance(other, date) else NotImplemented def __lt__(self, other): - if isinstance(other, date): - return self._cmp(other) < 0 - return NotImplemented + return self._cmp(other) < 0 if isinstance(other, date) else NotImplemented def __ge__(self, other): - if isinstance(other, date): - return self._cmp(other) >= 0 - return NotImplemented + return self._cmp(other) >= 0 if isinstance(other, date) else NotImplemented def __gt__(self, other): - if isinstance(other, date): - return self._cmp(other) > 0 - return NotImplemented + return self._cmp(other) > 0 if isinstance(other, date) else NotImplemented def _cmp(self, other): assert isinstance(other, date) @@ -1152,8 +1117,7 @@ def fromutc(self, dt): dtdst = dt.dst() if dtdst is None: raise ValueError("fromutc() requires a non-None dst() result") - delta = dtoff - dtdst - if delta: + if delta := dtoff - dtdst: dt += delta dtdst = dt.dst() if dtdst is None: @@ -1164,13 +1128,11 @@ def fromutc(self, dt): # Pickle support. def __reduce__(self): - getinitargs = getattr(self, "__getinitargs__", None) - if getinitargs: + if getinitargs := getattr(self, "__getinitargs__", None): args = getinitargs() else: args = () - getstate = getattr(self, "__getstate__", None) - if getstate: + if getstate := getattr(self, "__getstate__", None): state = getstate() else: state = getattr(self, "__dict__", None) or None @@ -1246,8 +1208,11 @@ def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold tzinfo (default to None) fold (keyword only, default to zero) """ - if (isinstance(hour, (bytes, str)) and len(hour) == 6 and - ord(hour[0:1])&0x7F < 24): + if ( + isinstance(hour, (bytes, str)) + and len(hour) == 6 + and ord(hour[:1]) & 0x7F < 24 + ): # Pickle support if isinstance(hour, str): try: @@ -1316,28 +1281,16 @@ def __eq__(self, other): return NotImplemented def __le__(self, other): - if isinstance(other, time): - return self._cmp(other) <= 0 - else: - return NotImplemented + return self._cmp(other) <= 0 if isinstance(other, time) else NotImplemented def __lt__(self, other): - if isinstance(other, time): - return self._cmp(other) < 0 - else: - return NotImplemented + return self._cmp(other) < 0 if isinstance(other, time) else NotImplemented def __ge__(self, other): - if isinstance(other, time): - return self._cmp(other) >= 0 - else: - return NotImplemented + return self._cmp(other) >= 0 if isinstance(other, time) else NotImplemented def __gt__(self, other): - if isinstance(other, time): - return self._cmp(other) > 0 - else: - return NotImplemented + return self._cmp(other) > 0 if isinstance(other, time) else NotImplemented def _cmp(self, other, allow_mixed=False): assert isinstance(other, time) @@ -1370,22 +1323,19 @@ def _cmp(self, other, allow_mixed=False): def __hash__(self): """Hash.""" if self._hashcode == -1: - if self.fold: - t = self.replace(fold=0) - else: - t = self - tzoff = t.utcoffset() - if not tzoff: # zero or None - self._hashcode = hash(t._getstate()[0]) - else: + t = self.replace(fold=0) if self.fold else self + if tzoff := t.utcoffset(): h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, timedelta(hours=1)) assert not m % timedelta(minutes=1), "whole minute" m //= timedelta(minutes=1) - if 0 <= h < 24: - self._hashcode = hash(time(h, m, self.second, self.microsecond)) - else: - self._hashcode = hash((h, m, self.second, self.microsecond)) + self._hashcode = ( + hash(time(h, m, self.second, self.microsecond)) + if 0 <= h < 24 + else hash((h, m, self.second, self.microsecond)) + ) + else: + self._hashcode = hash(t._getstate()[0]) return self._hashcode # Conversion to string @@ -1411,7 +1361,7 @@ def __repr__(self): s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" if self._fold: assert s[-1:] == ")" - s = s[:-1] + ", fold=1)" + s = f"{s[:-1]}, fold=1)" return s def isoformat(self, timespec='auto'): @@ -1426,8 +1376,7 @@ def isoformat(self, timespec='auto'): """ s = _format_time(self._hour, self._minute, self._second, self._microsecond, timespec) - tz = self._tzstr() - if tz: + if tz := self._tzstr(): s += tz return s @@ -1458,10 +1407,8 @@ def strftime(self, fmt): def __format__(self, fmt): if not isinstance(fmt, str): - raise TypeError("must be str, not %s" % type(fmt).__name__) - if len(fmt) != 0: - return self.strftime(fmt) - return str(self) + raise TypeError(f"must be str, not {type(fmt).__name__}") + return self.strftime(fmt) if len(fmt) != 0 else str(self) # Timezone functions @@ -1529,10 +1476,7 @@ def _getstate(self, protocol=3): h += 128 basestate = bytes([h, self._minute, self._second, us1, us2, us3]) - if self._tzinfo is None: - return (basestate,) - else: - return (basestate, self._tzinfo) + return (basestate, ) if self._tzinfo is None else (basestate, self._tzinfo) def __setstate(self, string, tzinfo): if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): @@ -1724,7 +1668,7 @@ def fromisoformat(cls, date_string): raise TypeError('fromisoformat: argument must be str') # Split this at the separator - dstr = date_string[0:10] + dstr = date_string[:10] tstr = date_string[11:] try: @@ -1783,25 +1727,19 @@ def local(u): t2 = local(u2) if t2 == t: return u2 - if t1 == t: - return u1 - # We have found both offsets a and b, but neither t - a nor t - b is - # a solution. This means t is in the gap. - return (max, min)[self.fold](u1, u2) + return u1 if t1 == t else (max, min)[self.fold](u1, u2) def timestamp(self): "Return POSIX timestamp as float" - if self._tzinfo is None: - s = self._mktime() - return s + self.microsecond / 1e6 - else: + if self._tzinfo is not None: return (self - _EPOCH).total_seconds() + s = self._mktime() + return s + self.microsecond / 1e6 def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." - offset = self.utcoffset() - if offset: + if offset := self.utcoffset(): self -= offset y, m, d = self.year, self.month, self.day hh, mm, ss = self.hour, self.minute, self.second @@ -1915,8 +1853,7 @@ def isoformat(self, sep='T', timespec='auto'): self._microsecond, timespec)) off = self.utcoffset() - tz = _format_offset(off) - if tz: + if tz := _format_offset(off): s += tz return s @@ -1929,15 +1866,13 @@ def __repr__(self): del L[-1] if L[-1] == 0: del L[-1] - s = "%s.%s(%s)" % (self.__class__.__module__, - self.__class__.__qualname__, - ", ".join(map(str, L))) + s = f'{self.__class__.__module__}.{self.__class__.__qualname__}({", ".join(map(str, L))})' if self._tzinfo is not None: assert s[-1:] == ")" s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" if self._fold: assert s[-1:] == ")" - s = s[:-1] + ", fold=1)" + s = f"{s[:-1]}, fold=1)" return s def __str__(self): @@ -2064,7 +1999,7 @@ def _cmp(self, other, allow_mixed=False): diff = self - other # this will take offsets into account if diff.days < 0: return -1 - return diff and 1 or 0 + return 1 if diff else 0 def __add__(self, other): "Add a datetime and a timedelta." @@ -2090,10 +2025,7 @@ def __add__(self, other): def __sub__(self, other): "Subtract two datetimes, or a datetime and a timedelta." if not isinstance(other, datetime): - if isinstance(other, timedelta): - return self + -other - return NotImplemented - + return self + -other if isinstance(other, timedelta) else NotImplemented days1 = self.toordinal() days2 = other.toordinal() secs1 = self._second + self._minute * 60 + self._hour * 3600 @@ -2113,10 +2045,7 @@ def __sub__(self, other): def __hash__(self): if self._hashcode == -1: - if self.fold: - t = self.replace(fold=0) - else: - t = self + t = self.replace(fold=0) if self.fold else self tzoff = t.utcoffset() if tzoff is None: self._hashcode = hash(t._getstate()[0]) @@ -2138,10 +2067,7 @@ def _getstate(self, protocol=3): basestate = bytes([yhi, ylo, m, self._day, self._hour, self._minute, self._second, us1, us2, us3]) - if self._tzinfo is None: - return (basestate,) - else: - return (basestate, self._tzinfo) + return (basestate, ) if self._tzinfo is None else (basestate, self._tzinfo) def __setstate(self, string, tzinfo): if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): @@ -2211,9 +2137,7 @@ def _create(cls, offset, name=None): def __getinitargs__(self): """pickle support""" - if self._name is None: - return (self._offset,) - return (self._offset, self._name) + return (self._offset, ) if self._name is None else (self._offset, self._name) def __eq__(self, other): if isinstance(other, timezone): @@ -2290,8 +2214,7 @@ def _name_from_offset(delta): hours, rest = divmod(delta, timedelta(hours=1)) minutes, rest = divmod(rest, timedelta(minutes=1)) seconds = rest.seconds - microseconds = rest.microseconds - if microseconds: + if microseconds := rest.microseconds: return (f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}' f'.{microseconds:06d}') if seconds: diff --git a/Lib/difflib.py b/Lib/difflib.py index 0b14d3c779..6153bd42d0 100644 --- a/Lib/difflib.py +++ b/Lib/difflib.py @@ -37,9 +37,7 @@ Match = _namedtuple('Match', 'a b size') def _calculate_ratio(matches, length): - if length: - return 2.0 * matches / length - return 1.0 + return 2.0 * matches / length if length else 1.0 class SequenceMatcher: @@ -315,9 +313,8 @@ def __chain_b(self): # Purge junk elements self.bjunk = junk = set() - isjunk = self.isjunk - if isjunk: - for elt in b2j.keys(): + if isjunk := self.isjunk: + for elt in b2j: if isjunk(elt): junk.add(elt) for elt in junk: # separate loop avoids separate list of keys @@ -617,7 +614,7 @@ def get_grouped_opcodes(self, n=3): group = [] i1, j1 = max(i1, i2-n), max(j1, j2-n) group.append((tag, i1, i2, j1 ,j2)) - if group and not (len(group)==1 and group[0][0] == 'equal'): + if group and (len(group) != 1 or group[0][0] != 'equal'): yield group def ratio(self): @@ -665,10 +662,7 @@ def quick_ratio(self): avail = {} availhas, matches = avail.__contains__, 0 for elt in self.a: - if availhas(elt): - numb = avail[elt] - else: - numb = fullbcount.get(elt, 0) + numb = avail[elt] if availhas(elt) else fullbcount.get(elt, 0) avail[elt] = numb - 1 if numb > 0: matches = matches + 1 @@ -717,7 +711,7 @@ def get_close_matches(word, possibilities, n=3, cutoff=0.6): ['except'] """ - if not n > 0: + if n <= 0: raise ValueError("n must be > 0: %r" % (n,)) if not 0.0 <= cutoff <= 1.0: raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,)) @@ -907,7 +901,7 @@ def compare(self, a, b): def _dump(self, tag, x, lo, hi): """Generate comparison results for a same-tagged range.""" for i in range(lo, hi): - yield '%s %s' % (tag, x[i]) + yield f'{tag} {x[i]}' def _plain_replace(self, a, alo, ahi, b, blo, bhi): assert alo < ahi and blo < bhi @@ -968,8 +962,8 @@ def _fancy_replace(self, a, alo, ahi, b, blo, bhi): # time it's called on a sequence pair; the expensive part # of the computation is cached by cruncher if cruncher.real_quick_ratio() > best_ratio and \ - cruncher.quick_ratio() > best_ratio and \ - cruncher.ratio() > best_ratio: + cruncher.quick_ratio() > best_ratio and \ + cruncher.ratio() > best_ratio: best_ratio, best_i, best_j = cruncher.ratio(), i, j if best_ratio < cutoff: # no non-identical "pretty close" pair @@ -1012,7 +1006,7 @@ def _fancy_replace(self, a, alo, ahi, b, blo, bhi): yield from self._qformat(aelt, belt, atags, btags) else: # the synch pair is identical - yield ' ' + aelt + yield f' {aelt}' # pump out diffs from after the synch point yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi) @@ -1048,11 +1042,11 @@ def _qformat(self, aline, bline, atags, btags): atags = _keep_original_ws(aline, atags).rstrip() btags = _keep_original_ws(bline, btags).rstrip() - yield "- " + aline + yield f"- {aline}" if atags: yield f"? {atags}\n" - yield "+ " + bline + yield f"+ {bline}" if btags: yield f"? {btags}\n" @@ -1120,10 +1114,10 @@ def _format_range_unified(start, stop): beginning = start + 1 # lines start numbering with one length = stop - start if length == 1: - return '{}'.format(beginning) + return f'{beginning}' if not length: beginning -= 1 # empty ranges begin at line just before the range - return '{},{}'.format(beginning, length) + return f'{beginning},{length}' def unified_diff(a, b, fromfile='', tofile='', fromfiledate='', tofiledate='', n=3, lineterm='\n'): @@ -1171,27 +1165,27 @@ def unified_diff(a, b, fromfile='', tofile='', fromfiledate='', for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n): if not started: started = True - fromdate = '\t{}'.format(fromfiledate) if fromfiledate else '' - todate = '\t{}'.format(tofiledate) if tofiledate else '' - yield '--- {}{}{}'.format(fromfile, fromdate, lineterm) - yield '+++ {}{}{}'.format(tofile, todate, lineterm) + fromdate = f'\t{fromfiledate}' if fromfiledate else '' + todate = f'\t{tofiledate}' if tofiledate else '' + yield f'--- {fromfile}{fromdate}{lineterm}' + yield f'+++ {tofile}{todate}{lineterm}' first, last = group[0], group[-1] file1_range = _format_range_unified(first[1], last[2]) file2_range = _format_range_unified(first[3], last[4]) - yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm) + yield f'@@ -{file1_range} +{file2_range} @@{lineterm}' for tag, i1, i2, j1, j2 in group: if tag == 'equal': for line in a[i1:i2]: - yield ' ' + line + yield f' {line}' continue if tag in {'replace', 'delete'}: for line in a[i1:i2]: - yield '-' + line + yield f'-{line}' if tag in {'replace', 'insert'}: for line in b[j1:j2]: - yield '+' + line + yield f'+{line}' ######################################################################## @@ -1206,8 +1200,8 @@ def _format_range_context(start, stop): if not length: beginning -= 1 # empty ranges begin at line just before the range if length <= 1: - return '{}'.format(beginning) - return '{},{}'.format(beginning, beginning + length - 1) + return f'{beginning}' + return f'{beginning},{beginning + length - 1}' # See http://www.unix.org/single_unix_specification/ def context_diff(a, b, fromfile='', tofile='', @@ -1260,16 +1254,16 @@ def context_diff(a, b, fromfile='', tofile='', for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n): if not started: started = True - fromdate = '\t{}'.format(fromfiledate) if fromfiledate else '' - todate = '\t{}'.format(tofiledate) if tofiledate else '' - yield '*** {}{}{}'.format(fromfile, fromdate, lineterm) - yield '--- {}{}{}'.format(tofile, todate, lineterm) + fromdate = f'\t{fromfiledate}' if fromfiledate else '' + todate = f'\t{tofiledate}' if tofiledate else '' + yield f'*** {fromfile}{fromdate}{lineterm}' + yield f'--- {tofile}{todate}{lineterm}' first, last = group[0], group[-1] - yield '***************' + lineterm + yield f'***************{lineterm}' file1_range = _format_range_context(first[1], last[2]) - yield '*** {} ****{}'.format(file1_range, lineterm) + yield f'*** {file1_range} ****{lineterm}' if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group): for tag, i1, i2, _, _ in group: @@ -1278,7 +1272,7 @@ def context_diff(a, b, fromfile='', tofile='', yield prefix[tag] + line file2_range = _format_range_context(first[3], last[4]) - yield '--- {} ----{}'.format(file2_range, lineterm) + yield f'--- {file2_range} ----{lineterm}' if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group): for tag, _, _, j1, j2 in group: @@ -1448,13 +1442,13 @@ def _make_line(lines, format_key, side, num_lines=[0,0]): def record_sub_info(match_object,sub_info=sub_info): sub_info.append([match_object.group(1)[0],match_object.span()]) return match_object.group(1) + change_re.sub(record_sub_info,markers) # process each tuple inserting our special marks that won't be # noticed by an xml/html escaper. for key,(begin,end) in reversed(sub_info): - text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:] + text = text[:begin] + '\0' + key + text[begin:end] + '\1' + text[end:] text = text[2:] - # Handle case of add/delete entire line else: text = lines.pop(0)[2:] # if line of text is just a newline, insert a space so there is @@ -1573,7 +1567,7 @@ def _line_pair_iterator(): fromlines,tolines=[],[] while True: # Collecting lines of text until we have a from/to pair - while (len(fromlines)==0 or len(tolines)==0): + while not fromlines or not tolines: try: from_line, to_line, found_diff = next(line_iterator) except StopIteration: @@ -1592,8 +1586,6 @@ def _line_pair_iterator(): line_pair_iterator = _line_pair_iterator() if context is None: yield from line_pair_iterator - # Handle case where user wants context differencing. We must do some - # storage of lines until we know for sure that they are to be yielded. else: context += 1 lines_to_write = 0 @@ -1603,7 +1595,7 @@ def _line_pair_iterator(): # we need for context. index, contextLines = 0, [None]*(context) found_diff = False - while(found_diff is False): + while not found_diff: try: from_line, to_line, found_diff = next(line_pair_iterator) except StopIteration: @@ -1815,14 +1807,12 @@ def _split_line(self,data_list,line_num,text): if text[i] == '\0': i += 1 mark = text[i] - i += 1 elif text[i] == '\1': - i += 1 mark = '' else: - i += 1 n += 1 + i += 1 # wrap point is inside text, break it up into separate lines line1 = text[:i] line2 = text[i:] @@ -1858,14 +1848,8 @@ def _line_wrapper(self,diffs): # yield from/to line in pairs inserting blank lines as # necessary when one side has more wrapped lines while fromlist or tolist: - if fromlist: - fromdata = fromlist.pop(0) - else: - fromdata = ('',' ') - if tolist: - todata = tolist.pop(0) - else: - todata = ('',' ') + fromdata = fromlist.pop(0) if fromlist else ('', ' ') + todata = tolist.pop(0) if tolist else ('', ' ') yield fromdata,todata,flag def _collect_lines(self,diffs): @@ -1899,7 +1883,7 @@ def _format_line(self,side,flag,linenum,text): """ try: linenum = '%d' % linenum - id = ' id="%s%s"' % (self._prefix[side],linenum) + id = f' id="{self._prefix[side]}{linenum}"' except TypeError: # handle blank lines where linenum is '>' or '' id = '' @@ -1909,8 +1893,7 @@ def _format_line(self,side,flag,linenum,text): # make space non-breakable so they don't get compressed or line wrapped text = text.replace(' ',' ').rstrip() - return '%s%s' \ - % (id,linenum,text) + return f'{linenum}{text}' def _make_prefix(self): """Create unique anchor prefixes""" @@ -1964,9 +1947,9 @@ def _convert_flags(self,fromlist,tolist,flaglist,context,numlines): fromlist = tolist = [' Empty File '] # if not a change on first line, drop a link if not flaglist[0]: - next_href[0] = 'f' % toprefix + next_href[0] = f'f' # redo the last link to link to the top - next_href[last] = 't' % (toprefix) + next_href[last] = f't' return fromlist,tolist,flaglist,next_href,next_id @@ -1997,10 +1980,7 @@ def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False, fromlines,tolines = self._tab_newline_replace(fromlines,tolines) # create diffs iterator which generates side by side from/to data - if context: - context_lines = numlines - else: - context_lines = None + context_lines = numlines if context else None diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk, charjunk=self._charjunk) @@ -2017,7 +1997,7 @@ def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False, s = [] fmt = ' %s%s' + \ - '%s%s\n' + '%s%s\n' for i in range(len(flaglist)): if flaglist[i] is None: # mdiff yields None on separator lines skip the bogus ones @@ -2028,11 +2008,7 @@ def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False, s.append( fmt % (next_id[i],next_href[i],fromlist[i], next_href[i],tolist[i])) if fromdesc or todesc: - header_row = '%s%s%s%s' % ( - '
', - '%s' % fromdesc, - '
', - '%s' % todesc) + header_row = f'
{fromdesc}
{todesc}' else: header_row = '' @@ -2042,10 +2018,10 @@ def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False, prefix=self._prefix[1]) return table.replace('\0+',''). \ - replace('\0-',''). \ - replace('\0^',''). \ - replace('\1',''). \ - replace('\t',' ') + replace('\0-',''). \ + replace('\0^',''). \ + replace('\1',''). \ + replace('\t',' ') del re diff --git a/Lib/doctest.py b/Lib/doctest.py index 65466b4983..f1e330957f 100644 --- a/Lib/doctest.py +++ b/Lib/doctest.py @@ -286,21 +286,17 @@ def _ellipsis_match(want, got): # Deal with exact matches possibly needed at one or both ends. startpos, endpos = 0, len(got) - w = ws[0] - if w: # starts with exact match - if got.startswith(w): - startpos = len(w) - del ws[0] - else: + if w := ws[0]: + if not got.startswith(w): return False - w = ws[-1] - if w: # ends with exact match - if got.endswith(w): - endpos -= len(w) - del ws[-1] - else: + startpos = len(w) + del ws[0] + if w := ws[-1]: + if not got.endswith(w): return False + endpos -= len(w) + del ws[-1] if startpos > endpos: # Exact end matches required more characters than we have, as in # _ellipsis_match('aa...aa', 'aaa') @@ -322,11 +318,7 @@ def _ellipsis_match(want, got): def _comment_line(line): "Return a commented form of the given line" - line = line.rstrip() - if line: - return '# '+line - else: - return '#' + return f'# {line}' if (line := line.rstrip()) else '#' def _strip_exception_details(msg): # Support for IGNORE_EXCEPTION_DETAIL. @@ -550,9 +542,7 @@ def __repr__(self): examples = '1 example' else: examples = '%d examples' % len(self.examples) - return ('<%s %s from %s:%s (%s)>' % - (self.__class__.__name__, - self.name, self.filename, self.lineno, examples)) + return f'<{self.__class__.__name__} {self.name} from {self.filename}:{self.lineno} ({examples})>' def __eq__(self, other): if type(self) is not type(other): @@ -725,13 +715,7 @@ def _parse_example(self, m, name, lineno): lineno + len(source_lines)) want = '\n'.join([wl[indent:] for wl in want_lines]) - # If `want` contains a traceback message, then extract it. - m = self._EXCEPTION_RE.match(want) - if m: - exc_msg = m.group('msg') - else: - exc_msg = None - + exc_msg = m.group('msg') if (m := self._EXCEPTION_RE.match(want)) else None # Extract options from the source. options = self._find_options(source, name, lineno) @@ -780,10 +764,7 @@ def _find_options(self, source, name, lineno): def _min_indent(self, s): "Return the minimum indentation of any non-blank line in `s`" indents = [len(indent) for indent in self._INDENT_RE.findall(s)] - if len(indents) > 0: - return min(indents) - else: - return 0 + return min(indents, default=0) def _check_prompt_blank(self, lines, indent, name, lineno): """ @@ -884,10 +865,10 @@ def find(self, obj, name=None, module=None, globs=None, extraglobs=None): # If name was not specified, then extract it from the object. if name is None: name = getattr(obj, '__name__', None) - if name is None: - raise ValueError("DocTestFinder.find: name must be given " - "when obj.__name__ doesn't exist: %r" % - (type(obj),)) + if name is None: + raise ValueError("DocTestFinder.find: name must be given " + "when obj.__name__ doesn't exist: %r" % + (type(obj),)) # Find the module that contains the given object (if obj is # a module, then module=obj.). Note: this may fail, in which @@ -909,7 +890,7 @@ def find(self, obj, name=None, module=None, globs=None, extraglobs=None): # Check to see if it's one of our special internal "files" # (see __patched_linecache_getlines). file = inspect.getfile(obj) - if not file[0]+file[-2:] == '<]>': file = None + if file[0] + file[-2:] != '<]>': file = None if file is None: source_lines = None else: @@ -927,10 +908,7 @@ def find(self, obj, name=None, module=None, globs=None, extraglobs=None): # Initialize globals, and merge in extraglobs. if globs is None: - if module is None: - globs = {} - else: - globs = module.__dict__.copy() + globs = {} if module is None else module.__dict__.copy() else: globs = globs.copy() if extraglobs is not None: @@ -993,7 +971,7 @@ def _find(self, tests, obj, name, module, source_lines, globs, seen): add them to `tests`. """ if self._verbose: - print('Finding tests in %s' % name) + print(f'Finding tests in {name}') # If we've already processed this object, then ignore it. if id(obj) in seen: @@ -1008,7 +986,7 @@ def _find(self, tests, obj, name, module, source_lines, globs, seen): # Look for tests in a module's contained objects. if inspect.ismodule(obj) and self._recurse: for valname, val in obj.__dict__.items(): - valname = '%s.%s' % (name, valname) + valname = f'{name}.{valname}' # Recurse to functions & classes. if ((self._is_routine(val) or inspect.isclass(val)) and @@ -1029,7 +1007,7 @@ def _find(self, tests, obj, name, module, source_lines, globs, seen): "must be strings, functions, methods, " "classes, or modules: %r" % (type(val),)) - valname = '%s.__test__.%s' % (name, valname) + valname = f'{name}.__test__.{valname}' self._find(tests, val, valname, module, source_lines, globs, seen) @@ -1044,7 +1022,7 @@ def _find(self, tests, obj, name, module, source_lines, globs, seen): if ((inspect.isroutine(val) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): - valname = '%s.%s' % (name, valname) + valname = f'{name}.{valname}' self._find(tests, val, valname, module, source_lines, globs, seen) @@ -1280,10 +1258,9 @@ def _failure_header(self, test, example): lineno = test.lineno + example.lineno + 1 else: lineno = '?' - out.append('File "%s", line %s, in %s' % - (test.filename, lineno, test.name)) + out.append(f'File "{test.filename}", line {lineno}, in {test.name}') else: - out.append('Line %s, in %s' % (example.lineno+1, test.name)) + out.append(f'Line {example.lineno + 1}, in {test.name}') out.append('Failed example:') source = example.source out.append(_indent(source)) @@ -1433,11 +1410,10 @@ def __record_outcome(self, test, f, t): r'\[(?P\d+)\]>$') def __patched_linecache_getlines(self, filename, module_globals=None): m = self.__LINECACHE_FILENAME_RE.match(filename) - if m and m.group('name') == self.test.name: - example = self.test.examples[int(m.group('examplenum'))] - return example.source.splitlines(keepends=True) - else: + if not m or m.group('name') != self.test.name: return self.save_linecache_getlines(filename, module_globals) + example = self.test.examples[int(m.group('examplenum'))] + return example.source.splitlines(keepends=True) def run(self, test, compileflags=None, out=None, clear_globs=True): """ @@ -2077,10 +2053,7 @@ class doctest.Tester, then merges the results into (or creates) name = os.path.basename(filename) # Assemble the globals. - if globs is None: - globs = {} - else: - globs = globs.copy() + globs = {} if globs is None else globs.copy() if extraglobs is not None: globs.update(extraglobs) if '__name__' not in globs: @@ -2184,9 +2157,9 @@ def __init__(self, test, optionflags=0, setUp=None, tearDown=None, self._dt_tearDown = tearDown def setUp(self): - test = self._dt_test - if self._dt_setUp is not None: + test = self._dt_test + self._dt_setUp(test) def tearDown(self): @@ -2225,10 +2198,7 @@ def runTest(self): def format_failure(self, err): test = self._dt_test - if test.lineno is None: - lineno = 'unknown line number' - else: - lineno = '%s' % test.lineno + lineno = 'unknown line number' if test.lineno is None else f'{test.lineno}' lname = '.'.join(test.name.split('.')[-1:]) return ('Failed doctest test for %s\n' ' File "%s", line %s, in %s\n\n%s' @@ -2326,12 +2296,12 @@ def __hash__(self): def __repr__(self): name = self._dt_test.name.split('.') - return "%s (%s)" % (name[-1], '.'.join(name[:-1])) + return f"{name[-1]} ({'.'.join(name[:-1])})" __str__ = object.__str__ def shortDescription(self): - return "Doctest: " + self._dt_test.name + return f"Doctest: {self._dt_test.name}" class SkipDocTestCase(DocTestCase): def __init__(self, module): @@ -2345,7 +2315,7 @@ def test_skip(self): pass def shortDescription(self): - return "Skipping tests from %s" % self.module.__name__ + return f"Skipping tests from {self.module.__name__}" __str__ = shortDescription @@ -2436,11 +2406,7 @@ def format_failure(self, err): def DocFileTest(path, module_relative=True, package=None, globs=None, parser=DocTestParser(), encoding=None, **options): - if globs is None: - globs = {} - else: - globs = globs.copy() - + globs = {} if globs is None else globs.copy() if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") @@ -2595,11 +2561,9 @@ def script_from_examples(s): if isinstance(piece, Example): # Add the example's source code (strip trailing NL) output.append(piece.source[:-1]) - # Add the expected output: - want = piece.want - if want: + if want := piece.want: output.append('# Expected:') - output += ['## '+l for l in want.split('\n')[:-1]] + output += [f'## {l}' for l in want.split('\n')[:-1]] else: # Add non-example text. output += [_comment_line(l) @@ -2627,8 +2591,7 @@ def testsource(module, name): if not test: raise ValueError(name, "not found in tests") test = test[0] - testsrc = script_from_examples(test.docstring) - return testsrc + return script_from_examples(test.docstring) def debug_src(src, pm=False, globs=None): """Debug a single doctest docstring, in argument `src`'""" @@ -2639,11 +2602,7 @@ def debug_script(src, pm=False, globs=None): "Debug a test script. `src` is the script, as a string." import pdb - if globs: - globs = globs.copy() - else: - globs = {} - + globs = globs.copy() if globs else {} if pm: try: exec(src, globs, globs) diff --git a/Lib/enum.py b/Lib/enum.py index 31afdd3a24..700b8a9b55 100644 --- a/Lib/enum.py +++ b/Lib/enum.py @@ -98,8 +98,7 @@ def __setitem__(self, key, value): else: value = list(value) self._ignore = value - already = set(value) & set(self._member_names) - if already: + if already := set(value) & set(self._member_names): raise ValueError( '_ignore_ cannot specify already set names: %r' % (already, ) @@ -179,9 +178,7 @@ def __new__(metacls, cls, bases, classdict): # adjust the sunders _order_ = classdict.pop('_order_', None) - # check for illegal enum names (any others?) - invalid_names = set(enum_members) & {'mro', ''} - if invalid_names: + if invalid_names := set(enum_members) & {'mro', ''}: raise ValueError('Invalid enum member name: {0}'.format( ','.join(invalid_names))) @@ -220,7 +217,7 @@ def __new__(metacls, cls, bases, classdict): if member_type is not object: methods = ('__getnewargs_ex__', '__getnewargs__', '__reduce_ex__', '__reduce__') - if not any(m in member_type.__dict__ for m in methods): + if all(m not in member_type.__dict__ for m in methods): _make_class_unpicklable(enum_class) # instantiate them, checking for duplicates as we go @@ -229,10 +226,7 @@ def __new__(metacls, cls, bases, classdict): # auto-numbering ;) for member_name in classdict._member_names: value = enum_members[member_name] - if not isinstance(value, tuple): - args = (value, ) - else: - args = value + args = (value, ) if not isinstance(value, tuple) else value if member_type is tuple: # special case for tuple enums args = (args, ) # wrap it one more time if not use_args: @@ -242,10 +236,7 @@ def __new__(metacls, cls, bases, classdict): else: enum_member = __new__(enum_class, *args) if not hasattr(enum_member, '_value_'): - if member_type is object: - enum_member._value_ = value - else: - enum_member._value_ = member_type(*args) + enum_member._value_ = value if member_type is object else member_type(*args) value = enum_member._value_ enum_member._name_ = member_name enum_member.__objclass__ = enum_class @@ -310,7 +301,7 @@ def __bool__(self): """ return True - def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): + def __call__(self, value, names=None, *, module=None, qualname=None, type=None, start=1): """ Either returns an existing member, or creates a new enum class. @@ -336,29 +327,24 @@ def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, s `type`, if set, will be mixed in as the first base class. """ if names is None: # simple value lookup - return cls.__new__(cls, value) + return self.__new__(self, value) # otherwise, functional API: we're creating a new Enum type - return cls._create_( - value, - names, - module=module, - qualname=qualname, - type=type, - start=start, - ) + return self._create_( + value, names, module=module, qualname=qualname, type=type, start=start + ) - def __contains__(cls, member): + def __contains__(self, member): if not isinstance(member, Enum): raise TypeError( - "unsupported operand type(s) for 'in': '%s' and '%s'" % ( - type(member).__qualname__, cls.__class__.__qualname__)) - return isinstance(member, cls) and member._name_ in cls._member_map_ + f"unsupported operand type(s) for 'in': '{type(member).__qualname__}' and '{self.__class__.__qualname__}'" + ) + return isinstance(member, self) and member._name_ in self._member_map_ def __delattr__(cls, attr): # nicer error message when someone tries to delete an attribute # (see issue19025). if attr in cls._member_map_: - raise AttributeError("%s: cannot delete Enum member." % cls.__name__) + raise AttributeError(f"{cls.__name__}: cannot delete Enum member.") super().__delattr__(attr) def __dir__(self): @@ -367,7 +353,7 @@ def __dir__(self): + self._member_names_ ) - def __getattr__(cls, name): + def __getattr__(self, name): """ Return the enum member matching `name` @@ -379,21 +365,21 @@ def __getattr__(cls, name): if _is_dunder(name): raise AttributeError(name) try: - return cls._member_map_[name] + return self._member_map_[name] except KeyError: raise AttributeError(name) from None - def __getitem__(cls, name): - return cls._member_map_[name] + def __getitem__(self, name): + return self._member_map_[name] - def __iter__(cls): + def __iter__(self): """ Returns members in definition order. """ return (cls._member_map_[name] for name in cls._member_names_) - def __len__(cls): - return len(cls._member_names_) + def __len__(self): + return len(self._member_names_) @property def __members__(cls): @@ -405,10 +391,10 @@ def __members__(cls): """ return MappingProxyType(cls._member_map_) - def __repr__(cls): - return "" % cls.__name__ + def __repr__(self): + return "" % self.__name__ - def __reversed__(cls): + def __reversed__(self): """ Returns members in reverse definition order. """ @@ -427,7 +413,7 @@ def __setattr__(cls, name, value): raise AttributeError('Cannot reassign members.') super().__setattr__(name, value) - def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1): + def _create_(self, class_name, names, *, module=None, qualname=None, type=None, start=1): """ Convenience method to create a new Enum class. @@ -439,9 +425,9 @@ def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, s * An iterable of (member name, value) pairs. * A mapping of member name -> value pairs. """ - metacls = cls.__class__ - bases = (cls, ) if type is None else (type, cls) - _, first_enum = cls._get_mixins_(cls, bases) + metacls = self.__class__ + bases = (self, ) if type is None else (type, self) + _, first_enum = self._get_mixins_(self, bases) classdict = metacls.__prepare__(class_name, bases) # special processing needed for names? @@ -480,7 +466,7 @@ def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, s return enum_class - def _convert_(cls, name, module, filter, source=None): + def _convert_(self, name, module, filter, source=None): """ Create a new Enum subclass that replaces a collection of global constants """ @@ -490,10 +476,7 @@ def _convert_(cls, name, module, filter, source=None): # also, replace the __reduce_ex__ method so unpickling works in # previous Python versions module_globals = vars(sys.modules[module]) - if source: - source = vars(source) - else: - source = module_globals + source = vars(source) if source else module_globals # _value2member_map_ is populated in the same order every time # for a consistent reverse mapping of number to name when there # are multiple names for the same number. @@ -507,17 +490,17 @@ def _convert_(cls, name, module, filter, source=None): except TypeError: # unless some values aren't comparable, in which case sort by name members.sort(key=lambda t: t[0]) - cls = cls(name, members, module=module) - cls.__reduce_ex__ = _reduce_ex_by_name - module_globals.update(cls.__members__) - module_globals[name] = cls - return cls + self = self(name, members, module=module) + self.__reduce_ex__ = _reduce_ex_by_name + module_globals |= self.__members__ + module_globals[name] = self + return self - def _convert(cls, *args, **kwargs): + def _convert(self, *args, **kwargs): import warnings warnings.warn("_convert is deprecated and will be removed in 3.9, use " "_convert_ instead.", DeprecationWarning, stacklevel=2) - return cls._convert_(*args, **kwargs) + return self._convert_(*args, **kwargs) @staticmethod def _check_for_existing_members(class_name, bases): @@ -615,10 +598,7 @@ def _find_new_(classdict, member_type, first_enum): # if a non-object.__new__ is used then whatever value/tuple was # assigned to the enum member name will be passed to __new__ and to the # new enum member's __init__ - if __new__ is object.__new__: - use_args = False - else: - use_args = True + use_args = __new__ is not object.__new__ return __new__, save_new, use_args @@ -656,19 +636,18 @@ def __new__(cls, value): result = None if isinstance(result, cls): return result - else: - ve_exc = ValueError("%r is not a valid %s" % (value, cls.__name__)) - if result is None and exc is None: - raise ve_exc - elif exc is None: - exc = TypeError( - 'error in %s._missing_: returned %r instead of None or a valid member' - % (cls.__name__, result) - ) - exc.__context__ = ve_exc - raise exc - - def _generate_next_value_(name, start, count, last_values): + ve_exc = ValueError("%r is not a valid %s" % (value, cls.__name__)) + if result is None and exc is None: + raise ve_exc + elif exc is None: + exc = TypeError( + 'error in %s._missing_: returned %r instead of None or a valid member' + % (cls.__name__, result) + ) + exc.__context__ = ve_exc + raise exc + + def _generate_next_value_(self, start, count, last_values): """ Generate the next value when not given. @@ -694,7 +673,7 @@ def __repr__(self): self.__class__.__name__, self._name_, self._value_) def __str__(self): - return "%s.%s" % (self.__class__.__name__, self._name_) + return f"{self.__class__.__name__}.{self._name_}" def __dir__(self): """ @@ -763,7 +742,7 @@ class Flag(Enum): Support for flags """ - def _generate_next_value_(name, start, count, last_values): + def _generate_next_value_(self, start, count, last_values): """ Generate the next value when not given. @@ -821,8 +800,8 @@ def __contains__(self, other): """ if not isinstance(other, self.__class__): raise TypeError( - "unsupported operand type(s) for 'in': '%s' and '%s'" % ( - type(other).__qualname__, self.__class__.__qualname__)) + f"unsupported operand type(s) for 'in': '{type(other).__qualname__}' and '{self.__class__.__qualname__}'" + ) return other._value_ & self._value_ == other._value_ def __repr__(self): @@ -839,15 +818,12 @@ def __repr__(self): def __str__(self): cls = self.__class__ if self._name_ is not None: - return '%s.%s' % (cls.__name__, self._name_) + return f'{cls.__name__}.{self._name_}' members, uncovered = _decompose(cls, self._value_) if len(members) == 1 and members[0]._name_ is None: return '%s.%r' % (cls.__name__, members[0]._value_) else: - return '%s.%s' % ( - cls.__name__, - '|'.join([str(m._name_ or m._value_) for m in members]), - ) + return f"{cls.__name__}.{'|'.join([str(m._name_ or m._value_) for m in members])}" def __bool__(self): return bool(self._value_) @@ -888,8 +864,7 @@ def _missing_(cls, value): """ if not isinstance(value, int): raise ValueError("%r is not a valid %s" % (value, cls.__name__)) - new_member = cls._create_pseudo_member_(value) - return new_member + return cls._create_pseudo_member_(value) @classmethod def _create_pseudo_member_(cls, value): @@ -927,8 +902,7 @@ def _create_pseudo_member_(cls, value): def __or__(self, other): if not isinstance(other, (self.__class__, int)): return NotImplemented - result = self.__class__(self._value_ | self.__class__(other)._value_) - return result + return self.__class__(self._value_ | self.__class__(other)._value_) def __and__(self, other): if not isinstance(other, (self.__class__, int)): @@ -945,8 +919,7 @@ def __xor__(self, other): __rxor__ = __xor__ def __invert__(self): - result = self.__class__(~self._value_) - return result + return self.__class__(~self._value_) def _high_bit(value): @@ -959,13 +932,14 @@ def unique(enumeration): """ Class decorator for enumerations ensuring unique member values. """ - duplicates = [] - for name, member in enumeration.__members__.items(): - if name != member.name: - duplicates.append((name, member.name)) - if duplicates: + if duplicates := [ + (name, member.name) + for name, member in enumeration.__members__.items() + if name != member.name + ]: alias_details = ', '.join( - ["%s -> %s" % (alias, name) for (alias, name) in duplicates]) + [f"{alias} -> {name}" for (alias, name) in duplicates] + ) raise ValueError('duplicate values found in %r: %s' % (enumeration, alias_details)) return enumeration @@ -1008,6 +982,4 @@ def _decompose(flag, value): return members, not_covered def _power_of_two(value): - if value < 1: - return False - return value == 2 ** _high_bit(value) + return False if value < 1 else value == 2 ** _high_bit(value) diff --git a/Lib/filecmp.py b/Lib/filecmp.py index 950b2afd4c..1a933c723b 100644 --- a/Lib/filecmp.py +++ b/Lib/filecmp.py @@ -127,14 +127,8 @@ class dircmp: def __init__(self, a, b, ignore=None, hide=None): # Initialize self.left = a self.right = b - if hide is None: - self.hide = [os.curdir, os.pardir] # Names never to be shown - else: - self.hide = hide - if ignore is None: - self.ignore = DEFAULT_IGNORES - else: - self.ignore = ignore + self.hide = [os.curdir, os.pardir] if hide is None else hide + self.ignore = DEFAULT_IGNORES if ignore is None else ignore def phase0(self): # Compare everything except common subdirectories self.left_list = _filter(os.listdir(self.left), diff --git a/Lib/fileinput.py b/Lib/fileinput.py index 2ce2f91143..ae8e1ff2be 100644 --- a/Lib/fileinput.py +++ b/Lib/fileinput.py @@ -190,10 +190,7 @@ def __init__(self, files=None, inplace=False, backup="", *, else: if files is None: files = sys.argv[1:] - if not files: - files = ('-',) - else: - files = tuple(files) + files = ('-', ) if not files else tuple(files) self._files = files self._inplace = inplace self._backup = backup @@ -253,8 +250,7 @@ def __iter__(self): def __next__(self): while True: - line = self._readline() - if line: + if line := self._readline(): self._filelineno += 1 return line if not self._file: @@ -278,13 +274,12 @@ def __getitem__(self, i): raise IndexError("end of input reached") def nextfile(self): - savestdout = self._savestdout - self._savestdout = None - if savestdout: + if savestdout := self._savestdout: sys.stdout = savestdout output = self._output self._output = None + self._savestdout = None try: if output: output.close() @@ -320,10 +315,7 @@ def readline(self): def _readline(self): if not self._files: - if 'b' in self._mode: - return b'' - else: - return '' + return b'' if 'b' in self._mode else '' self._filename = self._files[0] self._files = self._files[1:] self._startlineno = self.lineno() @@ -333,61 +325,60 @@ def _readline(self): self._backupfilename = 0 # EncodingWarning is emitted in __init__() already - if "b" not in self._mode: - encoding = self._encoding or "locale" - else: - encoding = None - + encoding = self._encoding or "locale" if "b" not in self._mode else None if self._filename == '-': self._filename = '' - if 'b' in self._mode: - self._file = getattr(sys.stdin, 'buffer', sys.stdin) - else: - self._file = sys.stdin + self._file = ( + getattr(sys.stdin, 'buffer', sys.stdin) + if 'b' in self._mode + else sys.stdin + ) self._isstdin = True - else: - if self._inplace: - self._backupfilename = ( - os.fspath(self._filename) + (self._backup or ".bak")) + elif self._inplace: + self._backupfilename = ( + os.fspath(self._filename) + (self._backup or ".bak")) + try: + os.unlink(self._backupfilename) + except OSError: + pass + # The next few lines may raise OSError + os.rename(self._filename, self._backupfilename) + self._file = open(self._backupfilename, self._mode, + encoding=encoding, errors=self._errors) + try: + perm = os.fstat(self._file.fileno()).st_mode + except OSError: + self._output = open(self._filename, self._write_mode, + encoding=encoding, errors=self._errors) + else: + mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC + if hasattr(os, 'O_BINARY'): + mode |= os.O_BINARY + + fd = os.open(self._filename, mode, perm) + self._output = os.fdopen(fd, self._write_mode, + encoding=encoding, errors=self._errors) try: - os.unlink(self._backupfilename) + os.chmod(self._filename, perm) except OSError: pass - # The next few lines may raise OSError - os.rename(self._filename, self._backupfilename) - self._file = open(self._backupfilename, self._mode, - encoding=encoding, errors=self._errors) - try: - perm = os.fstat(self._file.fileno()).st_mode - except OSError: - self._output = open(self._filename, self._write_mode, - encoding=encoding, errors=self._errors) - else: - mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC - if hasattr(os, 'O_BINARY'): - mode |= os.O_BINARY - - fd = os.open(self._filename, mode, perm) - self._output = os.fdopen(fd, self._write_mode, - encoding=encoding, errors=self._errors) - try: - os.chmod(self._filename, perm) - except OSError: - pass - self._savestdout = sys.stdout - sys.stdout = self._output - else: - # This may raise OSError - if self._openhook: + self._savestdout = sys.stdout + sys.stdout = self._output + elif self._openhook: # Custom hooks made previous to Python 3.10 didn't have # encoding argument - if self._encoding is None: - self._file = self._openhook(self._filename, self._mode) - else: - self._file = self._openhook( - self._filename, self._mode, encoding=self._encoding, errors=self._errors) - else: - self._file = open(self._filename, self._mode, encoding=encoding, errors=self._errors) + self._file = ( + self._openhook(self._filename, self._mode) + if self._encoding is None + else self._openhook( + self._filename, + self._mode, + encoding=self._encoding, + errors=self._errors, + ) + ) + else: + self._file = open(self._filename, self._mode, encoding=encoding, errors=self._errors) self._readline = self._file.readline # hide FileInput._readline return self._readline() @@ -401,12 +392,11 @@ def filelineno(self): return self._filelineno def fileno(self): - if self._file: - try: - return self._file.fileno() - except ValueError: - return -1 - else: + if not self._file: + return -1 + try: + return self._file.fileno() + except ValueError: return -1 def isfirstline(self): @@ -449,8 +439,10 @@ def _test(): backup = False opts, args = getopt.getopt(sys.argv[1:], "ib:") for o, a in opts: - if o == '-i': inplace = True - if o == '-b': backup = a + if o == '-b': + backup = a + elif o == '-i': + inplace = True for line in input(args, inplace=inplace, backup=backup): if line[-1:] == '\n': line = line[:-1] if line[-1:] == '\r': line = line[:-1] diff --git a/Lib/fnmatch.py b/Lib/fnmatch.py index fee59bf73f..9fb3306ee4 100644 --- a/Lib/fnmatch.py +++ b/Lib/fnmatch.py @@ -58,13 +58,9 @@ def filter(names, pat): match = _compile_pattern(pat) if os.path is posixpath: # normcase on posix is NOP. Optimize it away from the loop. - for name in names: - if match(name): - result.append(name) + result.extend(name for name in names if match(name)) else: - for name in names: - if match(os.path.normcase(name)): - result.append(name) + result.extend(name for name in names if match(os.path.normcase(name))) return result def fnmatchcase(name, pat): @@ -89,7 +85,7 @@ def translate(pat): i, n = 0, len(pat) while i < n: c = pat[i] - i = i+1 + i += 1 if c == '*': # compress consecutive `*` into one if (not res) or res[-1] is not STAR: @@ -99,11 +95,11 @@ def translate(pat): elif c == '[': j = i if j < n and pat[j] == '!': - j = j+1 + j += 1 if j < n and pat[j] == ']': - j = j+1 + j += 1 while j < n and pat[j] != ']': - j = j+1 + j += 1 if j >= n: add('\\[') else: @@ -120,8 +116,7 @@ def translate(pat): chunks.append(pat[i:k]) i = k+1 k = k+3 - chunk = pat[i:j] - if chunk: + if chunk := pat[i:j]: chunks.append(chunk) else: chunks[-1] += '-' @@ -145,7 +140,7 @@ def translate(pat): add('.') else: if stuff[0] == '!': - stuff = '^' + stuff[1:] + stuff = f'^{stuff[1:]}' elif stuff[0] in ('^', '['): stuff = '\\' + stuff add(f'[{stuff}]') diff --git a/Lib/formatter.py b/Lib/formatter.py index e2394de8c2..e86f59d88a 100644 --- a/Lib/formatter.py +++ b/Lib/formatter.py @@ -173,9 +173,7 @@ def format_roman(self, case, counter): s = s + ones[index]*x label = s + label index = index + 1 - if case == 'I': - return label.upper() - return label + return label.upper() if case == 'I' else label def add_flowing_data(self, data): if not data: return @@ -186,14 +184,13 @@ def add_flowing_data(self, data): return elif prespace or self.softspace: if not data: - if not self.nospace: - self.softspace = 1 - self.parskip = 0 + self.softspace = 1 + self.parskip = 0 return if not self.nospace: - data = ' ' + data + data = f' {data}' self.hard_break = self.nospace = self.para_end = \ - self.parskip = self.have_label = 0 + self.parskip = self.have_label = 0 self.softspace = postspace self.writer.send_flowing_data(data) @@ -250,10 +247,7 @@ def push_font(self, font): def pop_font(self): if self.font_stack: del self.font_stack[-1] - if self.font_stack: - font = self.font_stack[-1] - else: - font = None + font = self.font_stack[-1] if self.font_stack else None self.writer.new_font(font) def push_margin(self, margin): @@ -267,10 +261,7 @@ def pop_margin(self): if self.margin_stack: del self.margin_stack[-1] fstack = [m for m in self.margin_stack if m] - if fstack: - margin = fstack[-1] - else: - margin = None + margin = fstack[-1] if fstack else None self.writer.new_margin(margin, len(fstack)) def set_spacing(self, spacing): diff --git a/Lib/fractions.py b/Lib/fractions.py index e4fcc8901b..7823287b79 100644 --- a/Lib/fractions.py +++ b/Lib/fractions.py @@ -24,9 +24,7 @@ def gcd(a, b): warnings.warn('fractions.gcd() is deprecated. Use math.gcd() instead.', DeprecationWarning, 2) if type(a) is int is type(b): - if (b or a) < 0: - return -math.gcd(a, b) - return math.gcd(a, b) + return -math.gcd(a, b) if (b or a) < 0 else math.gcd(a, b) return _gcd(a, b) def _gcd(a, b): @@ -137,18 +135,15 @@ def __new__(cls, numerator=0, denominator=None, *, _normalize=True): raise ValueError('Invalid literal for Fraction: %r' % numerator) numerator = int(m.group('num') or '0') - denom = m.group('denom') - if denom: + if denom := m.group('denom'): denominator = int(denom) else: denominator = 1 - decimal = m.group('decimal') - if decimal: + if decimal := m.group('decimal'): scale = 10**len(decimal) numerator = numerator * scale + int(decimal) denominator *= scale - exp = m.group('exp') - if exp: + if exp := m.group('exp'): exp = int(exp) if exp >= 0: numerator *= 10**exp @@ -175,7 +170,7 @@ def __new__(cls, numerator=0, denominator=None, *, _normalize=True): "Rational instances") if denominator == 0: - raise ZeroDivisionError('Fraction(%s, 0)' % numerator) + raise ZeroDivisionError(f'Fraction({numerator}, 0)') if _normalize: if type(numerator) is int is type(denominator): # *very* normal case @@ -274,10 +269,7 @@ def limit_denominator(self, max_denominator=1000000): k = (max_denominator-q0)//q1 bound1 = Fraction(p0+k*p1, q0+k*q1) bound2 = Fraction(p1, q1) - if abs(bound2 - self) <= abs(bound1-self): - return bound2 - else: - return bound1 + return bound2 if abs(bound2 - self) <= abs(bound1-self) else bound1 @property def numerator(a): @@ -289,15 +281,14 @@ def denominator(a): def __repr__(self): """repr(self)""" - return '%s(%s, %s)' % (self.__class__.__name__, - self._numerator, self._denominator) + return f'{self.__class__.__name__}({self._numerator}, {self._denominator})' def __str__(self): """str(self)""" if self._denominator == 1: return str(self._numerator) else: - return '%s/%s' % (self._numerator, self._denominator) + return f'{self._numerator}/{self._denominator}' def _operator_fallbacks(monomorphic_operator, fallback_operator): """Generates forward and reverse operators given a purely-rational @@ -388,7 +379,8 @@ def forward(a, b): return fallback_operator(complex(a), b) else: return NotImplemented - forward.__name__ = '__' + fallback_operator.__name__ + '__' + + forward.__name__ = f'__{fallback_operator.__name__}__' forward.__doc__ = monomorphic_operator.__doc__ def reverse(b, a): @@ -401,58 +393,56 @@ def reverse(b, a): return fallback_operator(complex(a), complex(b)) else: return NotImplemented - reverse.__name__ = '__r' + fallback_operator.__name__ + '__' + + reverse.__name__ = f'__r{fallback_operator.__name__}__' reverse.__doc__ = monomorphic_operator.__doc__ return forward, reverse - def _add(a, b): + def _add(self, b): """a + b""" - da, db = a.denominator, b.denominator - return Fraction(a.numerator * db + b.numerator * da, - da * db) + da, db = self.denominator, b.denominator + return Fraction(self.numerator * db + b.numerator * da, da * db) __add__, __radd__ = _operator_fallbacks(_add, operator.add) - def _sub(a, b): + def _sub(self, b): """a - b""" - da, db = a.denominator, b.denominator - return Fraction(a.numerator * db - b.numerator * da, - da * db) + da, db = self.denominator, b.denominator + return Fraction(self.numerator * db - b.numerator * da, da * db) __sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub) - def _mul(a, b): + def _mul(self, b): """a * b""" - return Fraction(a.numerator * b.numerator, a.denominator * b.denominator) + return Fraction(self.numerator * b.numerator, self.denominator * b.denominator) __mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul) - def _div(a, b): + def _div(self, b): """a / b""" - return Fraction(a.numerator * b.denominator, - a.denominator * b.numerator) + return Fraction(self.numerator * b.denominator, self.denominator * b.numerator) __truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv) - def _floordiv(a, b): + def _floordiv(self, b): """a // b""" - return (a.numerator * b.denominator) // (a.denominator * b.numerator) + return self.numerator * b.denominator // (self.denominator * b.numerator) __floordiv__, __rfloordiv__ = _operator_fallbacks(_floordiv, operator.floordiv) - def _divmod(a, b): + def _divmod(self, b): """(a // b, a % b)""" - da, db = a.denominator, b.denominator - div, n_mod = divmod(a.numerator * db, da * b.numerator) + da, db = self.denominator, b.denominator + div, n_mod = divmod(self.numerator * db, da * b.numerator) return div, Fraction(n_mod, da * db) __divmod__, __rdivmod__ = _operator_fallbacks(_divmod, divmod) - def _mod(a, b): + def _mod(self, b): """a % b""" - da, db = a.denominator, b.denominator - return Fraction((a.numerator * db) % (b.numerator * da), da * db) + da, db = self.denominator, b.denominator + return Fraction(self.numerator * db % (b.numerator * da), da * db) __mod__, __rmod__ = _operator_fallbacks(_mod, operator.mod) @@ -486,47 +476,44 @@ def __pow__(a, b): else: return float(a) ** b - def __rpow__(b, a): + def __rpow__(self, a): """a ** b""" - if b._denominator == 1 and b._numerator >= 0: + if self._denominator == 1 and self._numerator >= 0: # If a is an int, keep it that way if possible. - return a ** b._numerator + return a**self._numerator if isinstance(a, numbers.Rational): - return Fraction(a.numerator, a.denominator) ** b + return Fraction(a.numerator, a.denominator)**self - if b._denominator == 1: - return a ** b._numerator + return a**self._numerator if self._denominator == 1 else a**float(self) - return a ** float(b) - - def __pos__(a): + def __pos__(self): """+a: Coerces a subclass instance to Fraction""" - return Fraction(a._numerator, a._denominator, _normalize=False) + return Fraction(self._numerator, self._denominator, _normalize=False) - def __neg__(a): + def __neg__(self): """-a""" - return Fraction(-a._numerator, a._denominator, _normalize=False) + return Fraction(-self._numerator, self._denominator, _normalize=False) - def __abs__(a): + def __abs__(self): """abs(a)""" - return Fraction(abs(a._numerator), a._denominator, _normalize=False) + return Fraction(abs(self._numerator), self._denominator, _normalize=False) - def __trunc__(a): + def __trunc__(self): """trunc(a)""" - if a._numerator < 0: - return -(-a._numerator // a._denominator) + if self._numerator < 0: + return -(-self._numerator // self._denominator) else: - return a._numerator // a._denominator + return self._numerator // self._denominator - def __floor__(a): + def __floor__(self): """math.floor(a)""" - return a.numerator // a.denominator + return self.numerator // self.denominator - def __ceil__(a): + def __ceil__(self): """math.ceil(a)""" # The negations cleverly convince floordiv to return the ceiling. - return -(-a.numerator // a.denominator) + return -(-self.numerator // self.denominator) def __round__(self, ndigits=None): """round(self, ndigits) @@ -535,12 +522,11 @@ def __round__(self, ndigits=None): """ if ndigits is None: floor, remainder = divmod(self.numerator, self.denominator) - if remainder * 2 < self.denominator: - return floor - elif remainder * 2 > self.denominator: - return floor + 1 - # Deal with the half case: - elif floor % 2 == 0: + if ( + remainder * 2 < self.denominator + or remainder * 2 <= self.denominator + and floor % 2 == 0 + ): return floor else: return floor + 1 @@ -556,41 +542,28 @@ def __round__(self, ndigits=None): def __hash__(self): """hash(self)""" - # XXX since this method is expensive, consider caching the result - - # In order to make sure that the hash of a Fraction agrees - # with the hash of a numerically equal integer, float or - # Decimal instance, we follow the rules for numeric hashes - # outlined in the documentation. (See library docs, 'Built-in - # Types'). - - # dinv is the inverse of self._denominator modulo the prime - # _PyHASH_MODULUS, or 0 if self._denominator is divisible by - # _PyHASH_MODULUS. - dinv = pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS) - if not dinv: - hash_ = _PyHASH_INF - else: + if dinv := pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS): hash_ = abs(self._numerator) * dinv % _PyHASH_MODULUS + else: + hash_ = _PyHASH_INF result = hash_ if self >= 0 else -hash_ return -2 if result == -1 else result - def __eq__(a, b): + def __eq__(self, b): """a == b""" if type(b) is int: - return a._numerator == b and a._denominator == 1 + return self._numerator == b and self._denominator == 1 if isinstance(b, numbers.Rational): - return (a._numerator == b.numerator and - a._denominator == b.denominator) + return self._numerator == b.numerator and self._denominator == b.denominator if isinstance(b, numbers.Complex) and b.imag == 0: b = b.real if isinstance(b, float): if math.isnan(b) or math.isinf(b): # comparisons with an infinity or nan should behave in # the same way for any finite a, so treat a as zero. - return 0.0 == b + return b == 0.0 else: - return a == a.from_float(b) + return self == self.from_float(b) else: # Since a doesn't know how to compare with b, let's give b # a chance to compare itself with a. @@ -618,27 +591,27 @@ def _richcmp(self, other, op): else: return NotImplemented - def __lt__(a, b): + def __lt__(self, b): """a < b""" - return a._richcmp(b, operator.lt) + return self._richcmp(b, operator.lt) - def __gt__(a, b): + def __gt__(self, b): """a > b""" - return a._richcmp(b, operator.gt) + return self._richcmp(b, operator.gt) - def __le__(a, b): + def __le__(self, b): """a <= b""" - return a._richcmp(b, operator.le) + return self._richcmp(b, operator.le) - def __ge__(a, b): + def __ge__(self, b): """a >= b""" - return a._richcmp(b, operator.ge) + return self._richcmp(b, operator.ge) - def __bool__(a): + def __bool__(self): """a != 0""" # bpo-39274: Use bool() because (a._numerator != 0) can return an # object which is not a bool. - return bool(a._numerator) + return bool(self._numerator) # support for pickling, copy, and deepcopy diff --git a/Lib/ftplib.py b/Lib/ftplib.py index 58a46bca4a..d558cb305f 100644 --- a/Lib/ftplib.py +++ b/Lib/ftplib.py @@ -337,14 +337,13 @@ def ntransfercmd(self, cmd, rest=None): marker used to tell the server to skip over any data up to the given marker. """ - size = None if self.passiveserver: host, port = self.makepasv() conn = socket.create_connection((host, port), self.timeout, source_address=self.source_address) try: if rest is not None: - self.sendcmd("REST %s" % rest) + self.sendcmd(f"REST {rest}") resp = self.sendcmd(cmd) # Some servers apparently send a 200 reply to # a LIST or STOR command, before the 150 reply @@ -362,7 +361,7 @@ def ntransfercmd(self, cmd, rest=None): else: with self.makeport() as sock: if rest is not None: - self.sendcmd("REST %s" % rest) + self.sendcmd(f"REST {rest}") resp = self.sendcmd(cmd) # See above. if resp[0] == '2': @@ -372,9 +371,7 @@ def ntransfercmd(self, cmd, rest=None): conn, sockaddr = sock.accept() if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT: conn.settimeout(self.timeout) - if resp[:3] == '150': - # this is conditional in case we received a 125 - size = parse150(resp) + size = parse150(resp) if resp[:3] == '150' else None return conn, size def transfercmd(self, cmd, rest=None): @@ -397,12 +394,12 @@ def login(self, user = '', passwd = '', acct = ''): # - We want to stop SPAM # - We don't want to let ftp sites to discriminate by the user, # host or country. - passwd = passwd + 'anonymous@' - resp = self.sendcmd('USER ' + user) + passwd = f'{passwd}anonymous@' + resp = self.sendcmd(f'USER {user}') if resp[0] == '3': - resp = self.sendcmd('PASS ' + passwd) + resp = self.sendcmd(f'PASS {passwd}') if resp[0] == '3': - resp = self.sendcmd('ACCT ' + acct) + resp = self.sendcmd(f'ACCT {acct}') if resp[0] != '2': raise error_reply(resp) return resp @@ -424,10 +421,10 @@ def retrbinary(self, cmd, callback, blocksize=8192, rest=None): self.voidcmd('TYPE I') with self.transfercmd(cmd, rest) as conn: while 1: - data = conn.recv(blocksize) - if not data: + if data := conn.recv(blocksize): + callback(data) + else: break - callback(data) # shutdown ssl layer if _SSLSocket is not None and isinstance(conn, _SSLSocket): conn.unwrap() @@ -530,14 +527,14 @@ def storlines(self, cmd, fp, callback=None): def acct(self, password): '''Send new account name.''' - cmd = 'ACCT ' + password + cmd = f'ACCT {password}' return self.voidcmd(cmd) def nlst(self, *args): '''Return a list of files in a given directory (default the current).''' cmd = 'NLST' for arg in args: - cmd = cmd + (' ' + arg) + cmd = f'{cmd} {arg}' files = [] self.retrlines(cmd, files.append) return files @@ -554,7 +551,7 @@ def dir(self, *args): args, func = args[:-1], args[-1] for arg in args: if arg: - cmd = cmd + (' ' + arg) + cmd = f'{cmd} {arg}' self.retrlines(cmd, func) def mlsd(self, path="", facts=[]): @@ -571,10 +568,7 @@ def mlsd(self, path="", facts=[]): ''' if facts: self.sendcmd("OPTS MLST " + ";".join(facts) + ";") - if path: - cmd = "MLSD %s" % path - else: - cmd = "MLSD" + cmd = f"MLSD {path}" if path else "MLSD" lines = [] self.retrlines(cmd, lines.append) for line in lines: @@ -587,14 +581,14 @@ def mlsd(self, path="", facts=[]): def rename(self, fromname, toname): '''Rename a file.''' - resp = self.sendcmd('RNFR ' + fromname) + resp = self.sendcmd(f'RNFR {fromname}') if resp[0] != '3': raise error_reply(resp) - return self.voidcmd('RNTO ' + toname) + return self.voidcmd(f'RNTO {toname}') def delete(self, filename): '''Delete a file.''' - resp = self.sendcmd('DELE ' + filename) + resp = self.sendcmd(f'DELE {filename}') if resp[:3] in {'250', '200'}: return resp else: @@ -610,38 +604,34 @@ def cwd(self, dirname): raise elif dirname == '': dirname = '.' # does nothing, but could return error - cmd = 'CWD ' + dirname + cmd = f'CWD {dirname}' return self.voidcmd(cmd) def size(self, filename): '''Retrieve the size of a file.''' # The SIZE command is defined in RFC-3659 - resp = self.sendcmd('SIZE ' + filename) + resp = self.sendcmd(f'SIZE {filename}') if resp[:3] == '213': s = resp[3:].strip() return int(s) def mkd(self, dirname): '''Make a directory, return its full pathname.''' - resp = self.voidcmd('MKD ' + dirname) + resp = self.voidcmd(f'MKD {dirname}') # fix around non-compliant implementations such as IIS shipped # with Windows server 2003 - if not resp.startswith('257'): - return '' - return parse257(resp) + return '' if not resp.startswith('257') else parse257(resp) def rmd(self, dirname): '''Remove a directory.''' - return self.voidcmd('RMD ' + dirname) + return self.voidcmd(f'RMD {dirname}') def pwd(self): '''Return current working directory.''' resp = self.voidcmd('PWD') # fix around non-compliant implementations such as IIS shipped # with Windows server 2003 - if not resp.startswith('257'): - return '' - return parse257(resp) + return '' if not resp.startswith('257') else parse257(resp) def quit(self): '''Quit, and close the connection.''' @@ -812,9 +802,7 @@ def parse150(resp): _150_re = re.compile( r"150 .* \((\d+) bytes\)", re.IGNORECASE | re.ASCII) m = _150_re.match(resp) - if not m: - return None - return int(m.group(1)) + return None if not m else int(m.group(1)) _227_re = None @@ -875,11 +863,11 @@ def parse257(resp): n = len(resp) while i < n: c = resp[i] - i = i+1 + i += 1 if c == '"': if i >= n or resp[i] != '"': break - i = i+1 + i += 1 dirname = dirname + c return dirname @@ -893,7 +881,7 @@ def ftpcp(source, sourcename, target, targetname = '', type = 'I'): '''Copy file from one FTP-instance to another.''' if not targetname: targetname = sourcename - type = 'TYPE ' + type + type = f'TYPE {type}' source.voidcmd(type) target.voidcmd(type) sourcehost, sourceport = parse227(source.sendcmd('PASV')) @@ -901,10 +889,10 @@ def ftpcp(source, sourcename, target, targetname = '', type = 'I'): # RFC 959: the user must "listen" [...] BEFORE sending the # transfer request. # So: STOR before RETR, because here the target is a "user". - treply = target.sendcmd('STOR ' + targetname) + treply = target.sendcmd(f'STOR {targetname}') if treply[:3] not in {'125', '150'}: raise error_proto # RFC 959 - sreply = source.sendcmd('RETR ' + sourcename) + sreply = source.sendcmd(f'RETR {sourcename}') if sreply[:3] not in {'125', '150'}: raise error_proto # RFC 959 source.voidresp() @@ -958,13 +946,13 @@ def test(): ftp.dir(file[2:]) elif file[:2] == '-d': cmd = 'CWD' - if file[2:]: cmd = cmd + ' ' + file[2:] + if file[2:]: + cmd = f'{cmd} {file[2:]}' resp = ftp.sendcmd(cmd) elif file == '-p': ftp.set_pasv(not ftp.passiveserver) else: - ftp.retrbinary('RETR ' + file, \ - sys.stdout.write, 1024) + ftp.retrbinary(f'RETR {file}', sys.stdout.write, 1024) ftp.quit() diff --git a/Lib/functools.py b/Lib/functools.py index 8decc874e1..e56ab3b043 100644 --- a/Lib/functools.py +++ b/Lib/functools.py @@ -101,9 +101,7 @@ def _le_from_lt(self, other, NotImplemented=NotImplemented): def _ge_from_lt(self, other, NotImplemented=NotImplemented): 'Return a >= b. Computed by @total_ordering from (not a < b).' op_result = self.__lt__(other) - if op_result is NotImplemented: - return op_result - return not op_result + return op_result if op_result is NotImplemented else not op_result def _ge_from_le(self, other, NotImplemented=NotImplemented): 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).' @@ -122,9 +120,7 @@ def _lt_from_le(self, other, NotImplemented=NotImplemented): def _gt_from_le(self, other, NotImplemented=NotImplemented): 'Return a > b. Computed by @total_ordering from (not a <= b).' op_result = self.__le__(other) - if op_result is NotImplemented: - return op_result - return not op_result + return op_result if op_result is NotImplemented else not op_result def _lt_from_gt(self, other, NotImplemented=NotImplemented): 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).' @@ -141,9 +137,7 @@ def _ge_from_gt(self, other, NotImplemented=NotImplemented): def _le_from_gt(self, other, NotImplemented=NotImplemented): 'Return a <= b. Computed by @total_ordering from (not a > b).' op_result = self.__gt__(other) - if op_result is NotImplemented: - return op_result - return not op_result + return op_result if op_result is NotImplemented else not op_result def _le_from_ge(self, other, NotImplemented=NotImplemented): 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).' @@ -162,9 +156,7 @@ def _gt_from_ge(self, other, NotImplemented=NotImplemented): def _lt_from_ge(self, other, NotImplemented=NotImplemented): 'Return a < b. Computed by @total_ordering from (not a >= b).' op_result = self.__ge__(other) - if op_result is NotImplemented: - return op_result - return not op_result + return op_result if op_result is NotImplemented else not op_result _convert = { '__lt__': [('__gt__', _gt_from_lt), @@ -701,23 +693,23 @@ def _c3_mro(cls, abcs=None): resulting MRO, their ordering depends on the order of types in *abcs*. """ - for i, base in enumerate(reversed(cls.__bases__)): - if hasattr(base, '__abstractmethods__'): - boundary = len(cls.__bases__) - i - break # Bases up to the last explicit ABC are considered first. - else: - boundary = 0 + boundary = next( + ( + len(cls.__bases__) - i + for i, base in enumerate(reversed(cls.__bases__)) + if hasattr(base, '__abstractmethods__') + ), + 0, + ) abcs = list(abcs) if abcs else [] explicit_bases = list(cls.__bases__[:boundary]) - abstract_bases = [] other_bases = list(cls.__bases__[boundary:]) - for base in abcs: - if issubclass(cls, base) and not any( - issubclass(b, base) for b in cls.__bases__ - ): - # If *cls* is the class that introduces behaviour described by - # an ABC *base*, insert said ABC to its MRO. - abstract_bases.append(base) + abstract_bases = [ + base + for base in abcs + if issubclass(cls, base) + and not any(issubclass(b, base) for b in cls.__bases__) + ] for base in abstract_bases: abcs.remove(base) explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] @@ -789,8 +781,7 @@ def _find_impl(cls, registry): if (t in registry and t not in cls.__mro__ and match not in cls.__mro__ and not issubclass(match, t)): - raise RuntimeError("Ambiguous dispatch: {} or {}".format( - match, t)) + raise RuntimeError(f"Ambiguous dispatch: {match} or {t}") break if t in registry: match = t diff --git a/Lib/genericpath.py b/Lib/genericpath.py index 309759af25..01192a5cf2 100644 --- a/Lib/genericpath.py +++ b/Lib/genericpath.py @@ -80,10 +80,7 @@ def commonprefix(m): m = tuple(map(os.fspath, m)) s1 = min(m) s2 = max(m) - for i, c in enumerate(s1): - if c != s2[i]: - return s1[:i] - return s1 + return next((s1[:i] for i, c in enumerate(s1) if c != s2[i]), s1) # Are two stat buffers (obtained from stat, fstat or lstat) # describing the same file? diff --git a/Lib/getopt.py b/Lib/getopt.py index 9d4cab1bac..5f6e1f68b7 100644 --- a/Lib/getopt.py +++ b/Lib/getopt.py @@ -81,10 +81,7 @@ def getopt(args, shortopts, longopts = []): """ opts = [] - if type(longopts) == type(""): - longopts = [longopts] - else: - longopts = list(longopts) + longopts = [longopts] if type(longopts) == type("") else list(longopts) while args and args[0].startswith('-') and args[0] != '-': if args[0] == '--': args = args[1:] @@ -113,11 +110,7 @@ def gnu_getopt(args, shortopts, longopts = []): opts = [] prog_args = [] - if isinstance(longopts, str): - longopts = [longopts] - else: - longopts = list(longopts) - + longopts = [longopts] if isinstance(longopts, str) else list(longopts) # Allow options after non-option arguments? if shortopts.startswith('+'): shortopts = shortopts[1:] @@ -136,13 +129,12 @@ def gnu_getopt(args, shortopts, longopts = []): opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) elif args[0][:1] == '-' and args[0] != '-': opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) + elif all_options_first: + prog_args += args + break else: - if all_options_first: - prog_args += args - break - else: - prog_args.append(args[0]) - args = args[1:] + prog_args.append(args[0]) + args = args[1:] return opts, prog_args @@ -162,7 +154,7 @@ def do_longs(opts, opt, longopts, args): optarg, args = args[0], args[1:] elif optarg is not None: raise GetoptError(_('option --%s must not have an argument') % opt, opt) - opts.append(('--' + opt, optarg or '')) + opts.append((f'--{opt}', optarg or '')) return opts, args # Return: @@ -175,7 +167,7 @@ def long_has_args(opt, longopts): # Is there an exact match? if opt in possibilities: return False, opt - elif opt + '=' in possibilities: + elif f'{opt}=' in possibilities: return True, opt # No exact match, so better be unique. if len(possibilities) > 1: @@ -201,7 +193,7 @@ def do_shorts(opts, optstring, shortopts, args): optarg, optstring = optstring, '' else: optarg = '' - opts.append(('-' + opt, optarg)) + opts.append((f'-{opt}', optarg)) return opts, args def short_has_arg(opt, shortopts): diff --git a/Lib/getpass.py b/Lib/getpass.py index 6970d8adfb..29b753e640 100644 --- a/Lib/getpass.py +++ b/Lib/getpass.py @@ -104,14 +104,11 @@ def win_getpass(prompt='Password: ', stream=None): pw = "" while 1: c = msvcrt.getwch() - if c == '\r' or c == '\n': + if c in ['\r', '\n']: break if c == '\003': raise KeyboardInterrupt - if c == '\b': - pw = pw[:-1] - else: - pw = pw + c + pw = pw[:-1] if c == '\b' else pw + c msvcrt.putwch('\r') msvcrt.putwch('\n') return pw @@ -132,8 +129,7 @@ def _raw_input(prompt="", stream=None, input=None): stream = sys.stderr if not input: input = sys.stdin - prompt = str(prompt) - if prompt: + if prompt := str(prompt): try: stream.write(prompt) except UnicodeEncodeError: @@ -160,8 +156,7 @@ def getuser(): """ for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'): - user = os.environ.get(name) - if user: + if user := os.environ.get(name): return user # If this fails, the exception will "explain" why diff --git a/Lib/gettext.py b/Lib/gettext.py index 4c3b80b023..a031526506 100644 --- a/Lib/gettext.py +++ b/Lib/gettext.py @@ -90,13 +90,13 @@ def _tokenize(plural): continue value = mo.group(kind) if kind == 'INVALID': - raise ValueError('invalid token in plural form: %s' % value) + raise ValueError(f'invalid token in plural form: {value}') yield value yield '' def _error(value): if value: - return ValueError('unexpected token in plural form: %s' % value) + return ValueError(f'unexpected token in plural form: {value}') else: return ValueError('unexpected end of plural form') @@ -120,11 +120,11 @@ def _parse(tokens, priority=-1): if nexttok == '(': sub, nexttok = _parse(tokens) - result = '%s(%s)' % (result, sub) + result = f'{result}({sub})' if nexttok != ')': raise ValueError('unbalanced parenthesis in plural form') elif nexttok == 'n': - result = '%s%s' % (result, nexttok) + result = f'{result}{nexttok}' else: try: value = int(nexttok, 10) @@ -140,23 +140,23 @@ def _parse(tokens, priority=-1): break # Break chained comparisons if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>=' - result = '(%s)' % result + result = f'({result})' # Replace some C operators by their Python equivalents op = _c2py_ops.get(nexttok, nexttok) right, nexttok = _parse(tokens, i + 1) - result = '%s %s %s' % (result, op, right) + result = f'{result} {op} {right}' j = i if j == priority == 4: # '<', '>', '<=', '>=' - result = '(%s)' % result + result = f'({result})' if nexttok == '?' and priority <= 0: if_true, nexttok = _parse(tokens, 0) if nexttok != ':': raise _error(nexttok) if_false, nexttok = _parse(tokens) - result = '%s if %s else %s' % (if_true, result, if_false) + result = f'{if_true} if {result} else {if_false}' if priority == 0: - result = '(%s)' % result + result = f'({result})' return result, nexttok @@ -164,12 +164,15 @@ def _as_int(n): try: i = round(n) except TypeError: - raise TypeError('Plural value must be an integer, got %s' % - (n.__class__.__name__,)) from None + raise TypeError( + f'Plural value must be an integer, got {n.__class__.__name__}' + ) from None import warnings - warnings.warn('Plural value must be an integer, got %s' % - (n.__class__.__name__,), - DeprecationWarning, 4) + warnings.warn( + f'Plural value must be an integer, got {n.__class__.__name__}', + DeprecationWarning, + 4, + ) return n def c2py(plural): @@ -269,9 +272,7 @@ def add_fallback(self, fallback): self._fallback = fallback def gettext(self, message): - if self._fallback: - return self._fallback.gettext(message) - return message + return self._fallback.gettext(message) if self._fallback else message def lgettext(self, message): if self._fallback: @@ -283,18 +284,12 @@ def lgettext(self, message): def ngettext(self, msgid1, msgid2, n): if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) - if n == 1: - return msgid1 - else: - return msgid2 + return msgid1 if n == 1 else msgid2 def lngettext(self, msgid1, msgid2, n): if self._fallback: return self._fallback.lngettext(msgid1, msgid2, n) - if n == 1: - tmsg = msgid1 - else: - tmsg = msgid2 + tmsg = msgid1 if n == 1 else msgid2 if self._output_charset: return tmsg.encode(self._output_charset) return tmsg.encode(locale.getpreferredencoding()) @@ -363,7 +358,7 @@ def _parse(self, fp): major_version, minor_version = self._get_versions(version) if major_version not in self.VERSIONS: - raise OSError(0, 'Bad version number ' + str(major_version), filename) + raise OSError(0, f'Bad version number {str(major_version)}', filename) # Now put all messages from the .mo file buffer into the catalog # dictionary. @@ -372,11 +367,10 @@ def _parse(self, fp): mend = moff + mlen tlen, toff = unpack(ii, buf[transidx:transidx+8]) tend = toff + tlen - if mend < buflen and tend < buflen: - msg = buf[moff:mend] - tmsg = buf[toff:tend] - else: + if mend >= buflen or tend >= buflen: raise OSError(0, 'File is corrupt', filename) + msg = buf[moff:mend] + tmsg = buf[toff:tend] # See if we're looking at GNU .mo conventions for metadata if mlen == 0: # Catalog description @@ -440,10 +434,7 @@ def lngettext(self, msgid1, msgid2, n): except KeyError: if self._fallback: return self._fallback.lngettext(msgid1, msgid2, n) - if n == 1: - tmsg = msgid1 - else: - tmsg = msgid2 + tmsg = msgid1 if n == 1 else msgid2 if self._output_charset: return tmsg.encode(self._output_charset) return tmsg.encode(locale.getpreferredencoding()) @@ -452,9 +443,7 @@ def gettext(self, message): missing = object() tmsg = self._catalog.get(message, missing) if tmsg is missing: - if self._fallback: - return self._fallback.gettext(message) - return message + return self._fallback.gettext(message) if self._fallback else message return tmsg def ngettext(self, msgid1, msgid2, n): @@ -463,10 +452,7 @@ def ngettext(self, msgid1, msgid2, n): except KeyError: if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) - if n == 1: - tmsg = msgid1 - else: - tmsg = msgid2 + tmsg = msgid1 if n == 1 else msgid2 return tmsg @@ -478,8 +464,7 @@ def find(domain, localedir=None, languages=None, all=False): if languages is None: languages = [] for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'): - val = os.environ.get(envar) - if val: + if val := os.environ.get(envar): languages = val.split(':') break if 'C' not in languages: @@ -491,14 +476,11 @@ def find(domain, localedir=None, languages=None, all=False): if nelang not in nelangs: nelangs.append(nelang) # select a language - if all: - result = [] - else: - result = None + result = [] if all else None for lang in nelangs: if lang == 'C': break - mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain) + mofile = os.path.join(localedir, lang, 'LC_MESSAGES', f'{domain}.mo') if os.path.exists(mofile): if all: result.append(mofile) @@ -603,10 +585,7 @@ def dngettext(domain, msgid1, msgid2, n): t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain)) except OSError: - if n == 1: - return msgid1 - else: - return msgid2 + return msgid1 if n == 1 else msgid2 return t.ngettext(msgid1, msgid2, n) def ldngettext(domain, msgid1, msgid2, n): @@ -614,10 +593,7 @@ def ldngettext(domain, msgid1, msgid2, n): try: t = translation(domain, _localedirs.get(domain, None), codeset=codeset) except OSError: - if n == 1: - tmsg = msgid1 - else: - tmsg = msgid2 + tmsg = msgid1 if n == 1 else msgid2 return tmsg.encode(codeset or locale.getpreferredencoding()) return t.lngettext(msgid1, msgid2, n) diff --git a/Lib/glob.py b/Lib/glob.py index a7256422d5..f9c172720d 100644 --- a/Lib/glob.py +++ b/Lib/glob.py @@ -42,19 +42,13 @@ def iglob(pathname, *, root_dir=None, dir_fd=None, recursive=False, """ sys.audit("glob.glob", pathname, recursive) sys.audit("glob.glob/2", pathname, recursive, root_dir, dir_fd) - if root_dir is not None: - root_dir = os.fspath(root_dir) - else: - root_dir = pathname[:0] + root_dir = os.fspath(root_dir) if root_dir is not None else pathname[:0] it = _iglob(pathname, root_dir, dir_fd, recursive, False, include_hidden=include_hidden) if not pathname or recursive and _isrecursive(pathname[:2]): - try: - s = next(it) # skip empty string - if s: + with contextlib.suppress(StopIteration): + if s := next(it): it = itertools.chain((s,), it) - except StopIteration: - pass return it def _iglob(pathname, root_dir, dir_fd, recursive, dironly, @@ -62,13 +56,13 @@ def _iglob(pathname, root_dir, dir_fd, recursive, dironly, dirname, basename = os.path.split(pathname) if not has_magic(pathname): assert not dironly - if basename: - if _lexists(_join(root_dir, pathname), dir_fd): - yield pathname - else: - # Patterns ending with a slash should match only directories - if _isdir(_join(root_dir, dirname), dir_fd): - yield pathname + if ( + basename + and _lexists(_join(root_dir, pathname), dir_fd) + or not basename + and _isdir(_join(root_dir, dirname), dir_fd) + ): + yield pathname return if not dirname: if recursive and _isrecursive(basename): @@ -87,10 +81,7 @@ def _iglob(pathname, root_dir, dir_fd, recursive, dironly, else: dirs = [dirname] if has_magic(basename): - if recursive and _isrecursive(basename): - glob_in_dir = _glob2 - else: - glob_in_dir = _glob1 + glob_in_dir = _glob2 if recursive and _isrecursive(basename) else _glob1 else: glob_in_dir = _glob0 for dirname in dirs: @@ -109,14 +100,13 @@ def _glob1(dirname, pattern, dir_fd, dironly, include_hidden=False): return fnmatch.filter(names, pattern) def _glob0(dirname, basename, dir_fd, dironly, include_hidden=False): - if basename: - if _lexists(_join(dirname, basename), dir_fd): - return [basename] - else: - # `os.path.split()` returns an empty basename for paths ending with a - # directory separator. 'q*x/' should match only directories. - if _isdir(dirname, dir_fd): - return [basename] + if ( + basename + and _lexists(_join(dirname, basename), dir_fd) + or not basename + and _isdir(dirname, dir_fd) + ): + return [basename] return [] # Following functions are not public but can be used by third-party code. @@ -158,14 +148,12 @@ def _iterdir(dirname, dir_fd, dironly): try: with os.scandir(arg) as it: for entry in it: - try: + with contextlib.suppress(OSError): if not dironly or entry.is_dir(): if fsencode is not None: yield fsencode(entry.name) else: yield entry.name - except OSError: - pass finally: if fd is not None: os.close(fd) @@ -230,10 +218,7 @@ def _ishidden(path): return path[0] in ('.', b'.'[0]) def _isrecursive(pattern): - if isinstance(pattern, bytes): - return pattern == b'**' - else: - return pattern == '**' + return pattern == b'**' if isinstance(pattern, bytes) else pattern == '**' def escape(pathname): """Escape all special characters. diff --git a/Lib/graphlib.py b/Lib/graphlib.py index 636545648e..1639ecaa94 100644 --- a/Lib/graphlib.py +++ b/Lib/graphlib.py @@ -97,13 +97,8 @@ def prepare(self): self._ready_nodes = [ i.node for i in self._node2info.values() if i.npredecessors == 0 ] - # ready_nodes is set before we look for cycles on purpose: - # if the user wants to catch the CycleError, that's fine, - # they can continue using the instance to grab as many - # nodes as possible before cycles block more progress - cycle = self._find_cycle() - if cycle: - raise CycleError(f"nodes are in a cycle", cycle) + if cycle := self._find_cycle(): + raise CycleError("nodes are in a cycle", cycle) def get_ready(self): """Return a tuple of all the nodes that are ready. diff --git a/Lib/gzip.py b/Lib/gzip.py index 5b20e5ba69..c13e880713 100644 --- a/Lib/gzip.py +++ b/Lib/gzip.py @@ -86,15 +86,14 @@ def __init__(self, f, prepend=b''): def read(self, size): if self._read is None: return self.file.read(size) + read = self._read if self._read + size <= self._length: - read = self._read self._read += size return self._buffer[read:self._read] else: - read = self._read self._read = None return self._buffer[read:] + \ - self.file.read(size-self._length+read) + self.file.read(size-self._length+read) def prepend(self, prepend=b''): if self._read is None: @@ -217,7 +216,7 @@ def filename(self): import warnings warnings.warn("use the name attribute", DeprecationWarning, 2) if self.mode == WRITE and self.name[-3:] != ".gz": - return self.name + ".gz" + return f"{self.name}.gz" return self.name @property @@ -227,7 +226,7 @@ def mtime(self): def __repr__(self): s = repr(self.fileobj) - return '' + return f'' def _init_write(self, filename): self.name = filename @@ -250,9 +249,7 @@ def _write_gzip_header(self, compresslevel): fname = fname[:-3] except UnicodeEncodeError: fname = b'' - flags = 0 - if fname: - flags = FNAME + flags = FNAME if fname else 0 self.fileobj.write(chr(flags).encode('latin-1')) mtime = self._write_mtime if mtime is None: @@ -338,8 +335,7 @@ def close(self): elif self.mode == READ: self._buffer.close() finally: - myfileobj = self.myfileobj - if myfileobj: + if myfileobj := self.myfileobj: self.myfileobj = None myfileobj.close() @@ -385,7 +381,7 @@ def seek(self, offset, whence=io.SEEK_SET): raise OSError('Negative seek in write mode') count = offset - self.offset chunk = b'\0' * 1024 - for i in range(count // 1024): + for _ in range(count // 1024): self.write(chunk) self.write(b'\0' * (count % 1024)) elif self.mode == READ: @@ -407,11 +403,11 @@ def _read_exact(fp, n): ''' data = fp.read(n) while len(data) < n: - b = fp.read(n - len(data)) - if not b: + if b := fp.read(n - len(data)): + data += b + else: raise EOFError("Compressed file ended before the " "end-of-stream marker was reached") - data += b return data @@ -533,8 +529,7 @@ def _read_eof(self): # stored is the true file size mod 2**32. crc32, isize = struct.unpack("', @@ -54,7 +55,7 @@ # We try to get them from dis to avoid duplication mod_dict = globals() for k, v in dis.COMPILER_FLAG_NAMES.items(): - mod_dict["CO_" + v] = k + mod_dict[f"CO_{v}"] = k # See Include/object.h TPFLAGS_IS_ABSTRACT = 1 << 20 @@ -175,9 +176,7 @@ def _has_code_flag(f, flag): while ismethod(f): f = f.__func__ f = functools._unwrap_partial(f) - if not isfunction(f): - return False - return bool(f.__code__.co_flags & flag) + return False if not isfunction(f) else bool(f.__code__.co_flags & flag) def isgeneratorfunction(obj): """Return true if the object is a user-defined generator function. @@ -328,10 +327,7 @@ def isabstract(object): def getmembers(object, predicate=None): """Return all members of an object as (name, value) pairs sorted by name. Optionally, only return members that satisfy a given predicate.""" - if isclass(object): - mro = (object,) + getmro(object) - else: - mro = () + mro = (object,) + getmro(object) if isclass(object) else () results = [] processed = set() names = dir(object) @@ -340,9 +336,11 @@ def getmembers(object, predicate=None): # attribute with the same name as a DynamicClassAttribute exists try: for base in object.__bases__: - for k, v in base.__dict__.items(): - if isinstance(v, types.DynamicClassAttribute): - names.append(k) + names.extend( + k + for k, v in base.__dict__.items() + if isinstance(v, types.DynamicClassAttribute) + ) except AttributeError: pass for key in names: @@ -408,9 +406,11 @@ def classify_class_attrs(cls): # this may result in duplicate entries if, for example, a virtual # attribute with the same name as a DynamicClassAttribute exists. for base in mro: - for k, v in base.__dict__.items(): - if isinstance(v, types.DynamicClassAttribute): - names.append(k) + names.extend( + k + for k, v in base.__dict__.items() + if isinstance(v, types.DynamicClassAttribute) + ) result = [] processed = set() @@ -540,9 +540,7 @@ def _findclass(func): return None for name in func.__qualname__.split('.')[:-1]: cls = getattr(cls, name) - if not isclass(cls): - return None - return cls + return None if not isclass(cls) else cls def _finddoc(obj): if isclass(obj): @@ -573,13 +571,11 @@ def _finddoc(obj): elif isbuiltin(obj): name = obj.__name__ self = obj.__self__ - if (isclass(self) and - self.__qualname__ + '.' + name == obj.__qualname__): + if isclass(self) and f'{self.__qualname__}.{name}' == obj.__qualname__: # classmethod cls = self else: cls = self.__class__ - # Should be tested before isdatadescriptor(). elif isinstance(obj, property): func = obj.fget name = func.__name__ @@ -621,9 +617,7 @@ def getdoc(object): doc = _finddoc(object) except (AttributeError, TypeError): return None - if not isinstance(doc, str): - return None - return cleandoc(doc) + return None if not isinstance(doc, str) else cleandoc(doc) def cleandoc(doc): """Clean up indentation from docstrings. @@ -638,8 +632,7 @@ def cleandoc(doc): # Find minimum indentation of any non-blank lines after first line. margin = sys.maxsize for line in lines[1:]: - content = len(line.lstrip()) - if content: + if content := len(line.lstrip()): indent = len(line) - content margin = min(margin, indent) # Remove indentation. @@ -676,9 +669,9 @@ def getfile(object): object = object.f_code if iscode(object): return object.co_filename - raise TypeError('module, class, method, function, traceback, frame, or ' - 'code object was expected, got {}'.format( - type(object).__name__)) + raise TypeError( + f'module, class, method, function, traceback, frame, or code object was expected, got {type(object).__name__}' + ) def getmodulename(path): """Return the module name for a given file, or None.""" @@ -687,10 +680,14 @@ def getmodulename(path): suffixes = [(-len(suffix), suffix) for suffix in importlib.machinery.all_suffixes()] suffixes.sort() # try longest suffixes first, in case they overlap - for neglen, suffix in suffixes: - if fname.endswith(suffix): - return fname[:neglen] - return None + return next( + ( + fname[:neglen] + for neglen, suffix in suffixes + if fname.endswith(suffix) + ), + None, + ) def getsourcefile(object): """Return the filename that can be used to locate an object's source. @@ -792,8 +789,7 @@ def findsource(object): if not (file.startswith('<') and file.endswith('>')): raise OSError('source code not available') - module = getmodule(object, file) - if module: + if module := getmodule(object, file): lines = linecache.getlines(file, module.__dict__) else: lines = linecache.getlines(file) @@ -811,21 +807,19 @@ def findsource(object): # that's most probably not inside a function definition. candidates = [] for i in range(len(lines)): - match = pat.match(lines[i]) - if match: + if match := pat.match(lines[i]): # if it's at toplevel, it's already the best one if lines[i][0] == 'c': return lines, i # else add whitespace to candidate list - candidates.append((match.group(1), i)) - if candidates: - # this will sort by whitespace, and by line number, - # less whitespace first - candidates.sort() - return lines, candidates[0][1] - else: + candidates.append((match[1], i)) + if not candidates: raise OSError('could not find class definition') + # this will sort by whitespace, and by line number, + # less whitespace first + candidates.sort() + return lines, candidates[0][1] if ismethod(object): object = object.__func__ if isfunction(object): @@ -856,20 +850,17 @@ def getcomments(object): return None if ismodule(object): - # Look for a comment block at the top of the file. - start = 0 - if lines and lines[0][:2] == '#!': start = 1 + start = 1 if lines and lines[0][:2] == '#!' else 0 while start < len(lines) and lines[start].strip() in ('', '#'): - start = start + 1 + start += 1 if start < len(lines) and lines[start][:1] == '#': comments = [] end = start while end < len(lines) and lines[end][:1] == '#': comments.append(lines[end].expandtabs()) - end = end + 1 + end += 1 return ''.join(comments) - # Look for a preceding block of comments at the same indentation. elif lnum > 0: indent = indentsize(lines[lnum]) end = lnum - 1 @@ -1020,9 +1011,7 @@ def getclasstree(classes, unique=False): if unique and parent in classes: break elif c not in roots: roots.append(c) - for parent in children: - if parent not in classes: - roots.append(parent) + roots.extend(parent for parent in children if parent not in classes) return walktree(roots, children, None) # ------------------------------------------------ argument list extraction @@ -1050,9 +1039,7 @@ def getargs(co): if co.co_flags & CO_VARARGS: varargs = co.co_varnames[nargs] nargs = nargs + 1 - varkw = None - if co.co_flags & CO_VARKEYWORDS: - varkw = co.co_varnames[nargs] + varkw = co.co_varnames[nargs] if co.co_flags & CO_VARKEYWORDS else None return Arguments(args + kwonlyargs, varargs, varkw) ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults') @@ -1201,7 +1188,7 @@ def formatannotation(annotation, base_module=None): if isinstance(annotation, type): if annotation.__module__ in ('builtins', base_module): return annotation.__qualname__ - return annotation.__module__+'.'+annotation.__qualname__ + return f'{annotation.__module__}.{annotation.__qualname__}' return repr(annotation) def formatannotationrelativeto(object): @@ -1210,14 +1197,7 @@ def _formatannotation(annotation): return formatannotation(annotation, module) return _formatannotation -def formatargspec(args, varargs=None, varkw=None, defaults=None, - kwonlyargs=(), kwonlydefaults={}, annotations={}, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - formatreturns=lambda text: ' -> ' + text, - formatannotation=formatannotation): +def formatargspec(args, varargs=None, varkw=None, defaults=None, kwonlyargs=(), kwonlydefaults={}, annotations={}, formatarg=str, formatvarargs=lambda name: f'*{name}', formatvarkw=lambda name: f'**{name}', formatvalue=lambda value: f'={repr(value)}', formatreturns=lambda text: f' -> {text}', formatannotation=formatannotation): """Format an argument spec from the values returned by getfullargspec. The first seven arguments are (args, varargs, varkw, defaults, @@ -1240,8 +1220,9 @@ def formatargspec(args, varargs=None, varkw=None, defaults=None, def formatargandannotation(arg): result = formatarg(arg) if arg in annotations: - result += ': ' + formatannotation(annotations[arg]) + result += f': {formatannotation(annotations[arg])}' return result + specs = [] if defaults: firstdefault = len(args) - len(defaults) @@ -1268,11 +1249,7 @@ def formatargandannotation(arg): result += formatreturns(formatannotation(annotations['return'])) return result -def formatargvalues(args, varargs, varkw, locals, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value)): +def formatargvalues(args, varargs, varkw, locals, formatarg=str, formatvarargs=lambda name: f'*{name}', formatvarkw=lambda name: f'**{name}', formatvalue=lambda value: f'={repr(value)}'): """Format an argument spec from the 4 values returned by getargvalues. The first four arguments are (args, varargs, varkw, locals). The @@ -1337,9 +1314,6 @@ def getcallargs(func, /, *positional, **named): spec = getfullargspec(func) args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec f_name = func.__name__ - arg2value = {} - - if ismethod(func) and func.__self__ is not None: # implicit 'self' (or 'cls' for classmethods) argument positional = (func.__self__,) + positional @@ -1348,8 +1322,7 @@ def getcallargs(func, /, *positional, **named): num_defaults = len(defaults) if defaults else 0 n = min(num_pos, num_args) - for i in range(n): - arg2value[args[i]] = positional[i] + arg2value = {args[i]: positional[i] for i in range(n)} if varargs: arg2value[varargs] = tuple(positional[n:]) possible_kwargs = set(args + kwonlyargs) @@ -1561,9 +1534,11 @@ def _shadowed_dict(klass): except KeyError: pass else: - if not (type(class_dict) is types.GetSetDescriptorType and - class_dict.__name__ == "__dict__" and - class_dict.__objclass__ is entry): + if ( + type(class_dict) is not types.GetSetDescriptorType + or class_dict.__name__ != "__dict__" + or class_dict.__objclass__ is not entry + ): return class_dict return _sentinel @@ -1633,9 +1608,7 @@ def getgeneratorstate(generator): return GEN_RUNNING if generator.gi_frame is None: return GEN_CLOSED - if generator.gi_frame.f_lasti == -1: - return GEN_CREATED - return GEN_SUSPENDED + return GEN_CREATED if generator.gi_frame.f_lasti == -1 else GEN_SUSPENDED def getgeneratorlocals(generator): @@ -1649,10 +1622,7 @@ def getgeneratorlocals(generator): raise TypeError("{!r} is not a Python generator".format(generator)) frame = getattr(generator, "gi_frame", None) - if frame is not None: - return generator.gi_frame.f_locals - else: - return {} + return generator.gi_frame.f_locals if frame is not None else {} # ------------------------------------------------ coroutine introspection @@ -1675,9 +1645,7 @@ def getcoroutinestate(coroutine): return CORO_RUNNING if coroutine.cr_frame is None: return CORO_CLOSED - if coroutine.cr_frame.f_lasti == -1: - return CORO_CREATED - return CORO_SUSPENDED + return CORO_CREATED if coroutine.cr_frame.f_lasti == -1 else CORO_SUSPENDED def getcoroutinelocals(coroutine): @@ -1687,10 +1655,7 @@ def getcoroutinelocals(coroutine): A dict is returned, with the keys the local variable names and values the bound values.""" frame = getattr(coroutine, "cr_frame", None) - if frame is not None: - return frame.f_locals - else: - return {} + return frame.f_locals if frame is not None else {} ############################################################################### @@ -1815,14 +1780,10 @@ def _signature_bound_method(sig): # Drop first parameter: # '(p1, p2[, ...])' -> '(p2[, ...])' params = params[1:] - else: - if kind is not _VAR_POSITIONAL: - # Unless we add a new parameter type we never - # get here - raise ValueError('invalid argument type') - # It's a var-positional parameter. - # Do nothing. '(*args[, ...])' -> '(*args[, ...])' - + elif kind is not _VAR_POSITIONAL: + # Unless we add a new parameter type we never + # get here + raise ValueError('invalid argument type') return sig.replace(parameters=params) @@ -1949,7 +1910,7 @@ def _signature_strip_non_python_syntax(signature): if delayed_comma: delayed_comma = False - if not ((type == OP) and (string == ')')): + if type != OP or string != ')': add(', ') add(string) if (string == ','): @@ -1971,7 +1932,7 @@ def _signature_fromstr(cls, obj, s, skip_bound_arg=True): clean_signature, self_parameter, last_positional_only = \ _signature_strip_non_python_syntax(s) - program = "def foo" + clean_signature + ": pass" + program = f"def foo{clean_signature}: pass" try: module = ast.parse(program) @@ -2105,12 +2066,11 @@ def _signature_from_builtin(cls, func, skip_bound_arg=True): raise TypeError("{!r} is not a Python builtin " "function".format(func)) - s = getattr(func, "__text_signature__", None) - if not s: + if s := getattr(func, "__text_signature__", None): + return _signature_fromstr(cls, func, s, skip_bound_arg) + else: raise ValueError("no signature found for builtin {!r}".format(func)) - return _signature_fromstr(cls, func, s, skip_bound_arg) - def _signature_from_function(cls, func, skip_bound_arg=True): """Private helper: constructs Signature for the given python function.""" @@ -2124,8 +2084,7 @@ def _signature_from_function(cls, func, skip_bound_arg=True): # of pure function: raise TypeError('{!r} is not a Python function'.format(func)) - s = getattr(func, "__text_signature__", None) - if s: + if s := getattr(func, "__text_signature__", None): return _signature_fromstr(cls, func, s, skip_bound_arg) Parameter = cls._parameter_cls @@ -2142,11 +2101,7 @@ def _signature_from_function(cls, func, skip_bound_arg=True): defaults = func.__defaults__ kwdefaults = func.__kwdefaults__ - if defaults: - pos_default_count = len(defaults) - else: - pos_default_count = 0 - + pos_default_count = len(defaults) if defaults else 0 parameters = [] non_default_count = pos_count - pos_default_count diff --git a/Lib/ipaddress.py b/Lib/ipaddress.py index 756f1bc38c..0d99e138a0 100644 --- a/Lib/ipaddress.py +++ b/Lib/ipaddress.py @@ -192,9 +192,7 @@ def _count_righthand_zero_bits(number, bits): The number of zero bits on the right hand side of the number. """ - if number == 0: - return bits - return min(bits, (~number & (number-1)).bit_length()) + return bits if number == 0 else min(bits, (~number & (number-1)).bit_length()) def summarize_address_range(first, last): @@ -227,8 +225,7 @@ def summarize_address_range(first, last): isinstance(last, _BaseAddress))): raise TypeError('first and last must be IP addresses, not networks') if first.version != last.version: - raise TypeError("%s and %s are not of the same version" % ( - first, last)) + raise TypeError(f"{first} and {last} are not of the same version") if first > last: raise ValueError('last IP address must be greater than first') @@ -245,8 +242,7 @@ def summarize_address_range(first, last): while first_int <= last_int: nbits = min(_count_righthand_zero_bits(first_int, ip_bits), (last_int - first_int + 1).bit_length() - 1) - net = ip((first_int, ip_bits - nbits)) - yield net + yield ip((first_int, ip_bits - nbits)) first_int += 1 << nbits if first_int - 1 == ip._ALL_ONES: break @@ -327,28 +323,21 @@ def collapse_addresses(addresses): for ip in addresses: if isinstance(ip, _BaseAddress): if ips and ips[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, ips[-1])) + raise TypeError(f"{ip} and {ips[-1]} are not of the same version") ips.append(ip) elif ip._prefixlen == ip._max_prefixlen: if ips and ips[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, ips[-1])) + raise TypeError(f"{ip} and {ips[-1]} are not of the same version") try: ips.append(ip.ip) except AttributeError: ips.append(ip.network_address) + elif nets and nets[-1]._version != ip._version: + raise TypeError(f"{ip} and {nets[-1]} are not of the same version") else: - if nets and nets[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - ip, nets[-1])) nets.append(ip) - # sort and dedup - ips = sorted(set(ips)) - - # find consecutive address ranges in the sorted sequence and summarize them - if ips: + if ips := sorted(set(ips)): for first, last in _find_address_range(ips): addrs.extend(summarize_address_range(first, last)) @@ -550,9 +539,7 @@ def _split_addr_prefix(cls, address): address = _split_optional_netmask(address) # Constructing from a tuple (addr, [mask]) - if len(address) > 1: - return address - return address[0], cls._max_prefixlen + return address if len(address) > 1 else (address[0], cls._max_prefixlen) def __reduce__(self): return self.__class__, (str(self),) @@ -585,11 +572,8 @@ def __lt__(self, other): if not isinstance(other, _BaseAddress): return NotImplemented if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - self, other)) - if self._ip != other._ip: - return self._ip < other._ip - return False + raise TypeError(f'{self} and {other} are not of the same version') + return self._ip < other._ip if self._ip != other._ip else False # Shorthand for Integer addition and subtraction. This is not # meant to ever support addition/subtraction of addresses. @@ -649,16 +633,8 @@ def __format__(self, fmt): # Set some defaults if fmt_base == 'n': - if self._version == 4: - fmt_base = 'b' # Binary is default for ipv4 - else: - fmt_base = 'x' # Hex is default for ipv6 - - if fmt_base == 'b': - padlen = self._max_prefixlen - else: - padlen = self._max_prefixlen // 4 - + fmt_base = 'b' if self._version == 4 else 'x' + padlen = self._max_prefixlen if fmt_base == 'b' else self._max_prefixlen // 4 if grouping: padlen += padlen // 4 - 1 @@ -717,13 +693,10 @@ def __lt__(self, other): if not isinstance(other, _BaseNetwork): return NotImplemented if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - self, other)) + raise TypeError(f'{self} and {other} are not of the same version') if self.network_address != other.network_address: return self.network_address < other.network_address - if self.netmask != other.netmask: - return self.netmask < other.netmask - return False + return self.netmask < other.netmask if self.netmask != other.netmask else False def __eq__(self, other): try: @@ -770,11 +743,11 @@ def with_prefixlen(self): @property def with_netmask(self): - return '%s/%s' % (self.network_address, self.netmask) + return f'{self.network_address}/{self.netmask}' @property def with_hostmask(self): - return '%s/%s' % (self.network_address, self.hostmask) + return f'{self.network_address}/{self.hostmask}' @property def num_addresses(self): @@ -829,21 +802,19 @@ def address_exclude(self, other): ValueError: If other is not completely contained by self. """ - if not self._version == other._version: - raise TypeError("%s and %s are not of the same version" % ( - self, other)) + if self._version != other._version: + raise TypeError(f"{self} and {other} are not of the same version") if not isinstance(other, _BaseNetwork): - raise TypeError("%s is not a network object" % other) + raise TypeError(f"{other} is not a network object") if not other.subnet_of(self): - raise ValueError('%s not contained in %s' % (other, self)) + raise ValueError(f'{other} not contained in {self}') if other == self: return # Make sure we're comparing the network of other. - other = other.__class__('%s/%s' % (other.network_address, - other.prefixlen)) + other = other.__class__(f'{other.network_address}/{other.prefixlen}') s1, s2 = self.subnets() while s1 != other and s2 != other: @@ -902,8 +873,7 @@ def compare_networks(self, other): """ # does this need to raise a ValueError? if self._version != other._version: - raise TypeError('%s and %s are not of the same type' % ( - self, other)) + raise TypeError(f'{self} and {other} are not of the same type') # self._version == other._version below here: if self.network_address < other.network_address: return -1 @@ -912,9 +882,7 @@ def compare_networks(self, other): # self.network_address == other.network_address below here: if self.netmask < other.netmask: return -1 - if self.netmask > other.netmask: - return 1 - return 0 + return 1 if self.netmask > other.netmask else 0 def _get_networks_key(self): """Network-only key function. @@ -976,8 +944,7 @@ def subnets(self, prefixlen_diff=1, new_prefix=None): end = int(self.broadcast_address) + 1 step = (int(self.hostmask) + 1) >> prefixlen_diff for new_addr in range(start, end, step): - current = self.__class__((new_addr, new_prefixlen)) - yield current + yield self.__class__((new_addr, new_prefixlen)) def supernet(self, prefixlen_diff=1, new_prefix=None): """The supernet containing the current network. @@ -1433,18 +1400,15 @@ def ip(self): @property def with_prefixlen(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self._prefixlen) + return f'{self._string_from_ip_int(self._ip)}/{self._prefixlen}' @property def with_netmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.netmask) + return f'{self._string_from_ip_int(self._ip)}/{self.netmask}' @property def with_hostmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.hostmask) + return f'{self._string_from_ip_int(self._ip)}/{self.hostmask}' class IPv4Network(_BaseV4, _BaseNetwork): @@ -1504,7 +1468,7 @@ def __init__(self, address, strict=True): packed = int(self.network_address) if packed & int(self.netmask) != packed: if strict: - raise ValueError('%s has host bits set' % self) + raise ValueError(f'{self} has host bits set') else: self.network_address = IPv4Address(packed & int(self.netmask)) @@ -1524,9 +1488,10 @@ def is_global(self): iana-ipv4-special-registry. """ - return (not (self.network_address in IPv4Network('100.64.0.0/10') and - self.broadcast_address in IPv4Network('100.64.0.0/10')) and - not self.is_private) + return ( + self.network_address not in IPv4Network('100.64.0.0/10') + or self.broadcast_address not in IPv4Network('100.64.0.0/10') + ) and not self.is_private class _IPv4Constants: @@ -1917,7 +1882,7 @@ def __init__(self, address): def __str__(self): ip_str = super().__str__() - return ip_str + '%' + self._scope_id if self._scope_id else ip_str + return f'{ip_str}%{self._scope_id}' if self._scope_id else ip_str def __hash__(self): return hash((self._ip, self._scope_id)) @@ -2051,9 +2016,11 @@ def ipv4_mapped(self): IPv4 mapped address. Return None otherwise. """ - if (self._ip >> 32) != 0xFFFF: - return None - return IPv4Address(self._ip & 0xFFFFFFFF) + return ( + None + if (self._ip >> 32) != 0xFFFF + else IPv4Address(self._ip & 0xFFFFFFFF) + ) @property def teredo(self): @@ -2137,18 +2104,15 @@ def ip(self): @property def with_prefixlen(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self._prefixlen) + return f'{self._string_from_ip_int(self._ip)}/{self._prefixlen}' @property def with_netmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.netmask) + return f'{self._string_from_ip_int(self._ip)}/{self.netmask}' @property def with_hostmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.hostmask) + return f'{self._string_from_ip_int(self._ip)}/{self.hostmask}' @property def is_unspecified(self): @@ -2213,7 +2177,7 @@ def __init__(self, address, strict=True): packed = int(self.network_address) if packed & int(self.netmask) != packed: if strict: - raise ValueError('%s has host bits set' % self) + raise ValueError(f'{self} has host bits set') else: self.network_address = IPv6Address(packed & int(self.netmask)) diff --git a/Lib/linecache.py b/Lib/linecache.py index 8f011b93af..84719a6b4e 100644 --- a/Lib/linecache.py +++ b/Lib/linecache.py @@ -31,9 +31,7 @@ def getline(filename, lineno, module_globals=None): Update the cache if it doesn't contain an entry for this file already.""" lines = getlines(filename, module_globals) - if 1 <= lineno <= len(lines): - return lines[lineno - 1] - return '' + return lines[lineno - 1] if 1 <= lineno <= len(lines) else '' def getlines(filename, module_globals=None): @@ -43,8 +41,7 @@ def getlines(filename, module_globals=None): if filename in cache: entry = cache[filename] if len(entry) != 1: - return cache[filename][2] - + return entry[2] try: return updatecache(filename, module_globals) except MemoryError: @@ -161,10 +158,7 @@ def lazycache(filename, module_globals): filename, and the filename must not be already cached. """ if filename in cache: - if len(cache[filename]) == 1: - return True - else: - return False + return len(cache[filename]) == 1 if not filename or (filename.startswith('<') and filename.endswith('>')): return False # Try for a __loader__, if available diff --git a/Lib/locale.py b/Lib/locale.py index 7a7694e1bf..94bac1d8fe 100644 --- a/Lib/locale.py +++ b/Lib/locale.py @@ -191,23 +191,19 @@ def _format(percent, value, grouping=False, monetary=False, *additional): # Transform formatted as locale number according to the locale settings def _localize(formatted, grouping=False, monetary=False): + seps = 0 # floats and decimal ints need special action! if '.' in formatted: - seps = 0 parts = formatted.split('.') if grouping: parts[0], seps = _group(parts[0], monetary=monetary) decimal_point = localeconv()[monetary and 'mon_decimal_point' or 'decimal_point'] formatted = decimal_point.join(parts) - if seps: - formatted = _strip_padding(formatted, seps) - else: - seps = 0 - if grouping: - formatted, seps = _group(formatted, monetary=monetary) - if seps: - formatted = _strip_padding(formatted, seps) + elif grouping: + formatted, seps = _group(formatted, monetary=monetary) + if seps: + formatted = _strip_padding(formatted, seps) return formatted def format_string(f, val, grouping=False, monetary=False): @@ -220,8 +216,8 @@ def format_string(f, val, grouping=False, monetary=False): percents = list(_percent_re.finditer(f)) new_f = _percent_re.sub('%s', f) + new_val = [] if isinstance(val, _collections_abc.Mapping): - new_val = [] for perc in percents: if perc.group()[-1]=='%': new_val.append('%') @@ -230,7 +226,6 @@ def format_string(f, val, grouping=False, monetary=False): else: if not isinstance(val, tuple): val = (val,) - new_val = [] i = 0 for perc in percents: if perc.group()[-1]=='%': @@ -275,7 +270,7 @@ def currency(val, symbol=True, grouping=False, international=False): s = _localize(f'{abs(val):.{digits}f}', grouping, monetary=True) # '<' and '>' are markers if the sign must be inserted between symbol and value - s = '<' + s + '>' + s = f'<{s}>' if symbol: smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] @@ -293,9 +288,7 @@ def currency(val, symbol=True, grouping=False, international=False): sign = conv[val<0 and 'negative_sign' or 'positive_sign'] if sign_pos == 0: - s = '(' + s + ')' - elif sign_pos == 1: - s = sign + s + s = f'({s})' elif sign_pos == 2: s = s + sign elif sign_pos == 3: @@ -318,14 +311,10 @@ def delocalize(string): conv = localeconv() - #First, get rid of the grouping - ts = conv['thousands_sep'] - if ts: + if ts := conv['thousands_sep']: string = string.replace(ts, '') - #next, replace the decimal point with a dot - dd = conv['decimal_point'] - if dd: + if dd := conv['decimal_point']: string = string.replace(dd, '.') return string @@ -360,10 +349,7 @@ def _test(): _setlocale = setlocale def _replace_encoding(code, encoding): - if '.' in code: - langname = code[:code.index('.')] - else: - langname = code + langname = code[:code.index('.')] if '.' in code else code # Convert the encoding to a C lib compatible encoding string norm_encoding = encodings.normalize_encoding(encoding) #print('norm encoding: %r' % norm_encoding) @@ -380,18 +366,18 @@ def _replace_encoding(code, encoding): if norm_encoding in locale_encoding_alias: encoding = locale_encoding_alias[norm_encoding] #print('found encoding %r' % encoding) - return langname + '.' + encoding + return f'{langname}.{encoding}' def _append_modifier(code, modifier): if modifier == 'euro': if '.' not in code: - return code + '.ISO8859-15' + return f'{code}.ISO8859-15' _, _, encoding = code.partition('.') if encoding in ('ISO8859-15', 'UTF-8'): return code if encoding == 'ISO8859-1': return _replace_encoding(code, 'ISO8859-15') - return code + '@' + modifier + return f'{code}@{modifier}' def normalize(localename): @@ -429,10 +415,10 @@ def normalize(localename): if encoding: norm_encoding = encoding.replace('-', '') norm_encoding = norm_encoding.replace('_', '') - lang_enc += '.' + norm_encoding + lang_enc += f'.{norm_encoding}' lookup_name = lang_enc if modifier: - lookup_name += '@' + modifier + lookup_name += f'@{modifier}' code = locale_alias.get(lookup_name, None) if code is not None: return code @@ -453,14 +439,14 @@ def normalize(localename): # Third try: langname (without encoding, possibly with modifier) lookup_name = langname if modifier: - lookup_name += '@' + modifier + lookup_name += f'@{modifier}' code = locale_alias.get(lookup_name, None) if code is not None: #print('lookup without encoding succeeded') if '@' not in code: return _replace_encoding(code, encoding) code, modifier = code.split('@', 1) - return _replace_encoding(code, encoding) + '@' + modifier + return f'{_replace_encoding(code, encoding)}@{modifier}' if modifier: # Fourth try: langname (without encoding and modifier) @@ -472,7 +458,7 @@ def normalize(localename): return _append_modifier(code, modifier) code, defmod = code.split('@', 1) if defmod.lower() == modifier: - return _replace_encoding(code, encoding) + '@' + defmod + return f'{_replace_encoding(code, encoding)}@{defmod}' return localename @@ -508,7 +494,7 @@ def _parse_localename(localename): # On macOS "LC_CTYPE=UTF-8" is a valid locale setting # for getting UTF-8 handling for text. return None, 'UTF-8' - raise ValueError('unknown locale: %s' % localename) + raise ValueError(f'unknown locale: {localename}') def _build_localename(localetuple): @@ -523,10 +509,7 @@ def _build_localename(localetuple): if language is None: language = 'C' - if encoding is None: - return language - else: - return language + '.' + encoding + return language if encoding is None else f'{language}.{encoding}' except (TypeError, ValueError): raise TypeError('Locale must be None, a string, or an iterable of ' 'two strings -- language code, encoding.') from None @@ -670,9 +653,7 @@ def getpreferredencoding(do_setlocale=True): warnings.warn( "UTF-8 Mode affects locale.getpreferredencoding(). Consider locale.getencoding() instead.", EncodingWarning, 2) - if sys.flags.utf8_mode: - return 'utf-8' - return getencoding() + return 'utf-8' if sys.flags.utf8_mode else getencoding() else: # On Unix, if CODESET is available, use that. def getpreferredencoding(do_setlocale=True): diff --git a/Lib/mailbox.py b/Lib/mailbox.py index 70da07ed2e..a193e2426e 100644 --- a/Lib/mailbox.py +++ b/Lib/mailbox.py @@ -52,10 +52,8 @@ def __delitem__(self, key): def discard(self, key): """If the keyed message exists, remove it.""" - try: + with contextlib.suppress(KeyError): self.remove(key) - except KeyError: - pass def __setitem__(self, key, message): """Replace the keyed message; raise KeyError if it doesn't exist.""" @@ -72,9 +70,8 @@ def __getitem__(self, key): """Return the keyed message; raise KeyError if it doesn't exist.""" if not self._factory: return self.get_message(key) - else: - with contextlib.closing(self.get_file(key)) as file: - return self._factory(file) + with contextlib.closing(self.get_file(key)) as file: + return self._factory(file) def get_message(self, key): """Return a Message representation or raise a KeyError.""" @@ -259,7 +256,7 @@ def _dump_message(self, message, target, mangle_from_=False): # Make sure the message ends with a newline target.write(linesep) else: - raise TypeError('Invalid message type: %s' % type(message)) + raise TypeError(f'Invalid message type: {type(message)}') __class_getitem__ = classmethod(GenericAlias) @@ -278,12 +275,11 @@ def __init__(self, dirname, factory=None, create=True): 'cur': os.path.join(self._path, 'cur'), } if not os.path.exists(self._path): - if create: - os.mkdir(self._path, 0o700) - for path in self._paths.values(): - os.mkdir(path, 0o700) - else: + if not create: raise NoSuchMailboxError(self._path) + os.mkdir(self._path, 0o700) + for path in self._paths.values(): + os.mkdir(path, 0o700) self._toc = {} self._toc_mtimes = {'cur': 0, 'new': 0} self._last_read = 0 # Records last time we read cur/new @@ -325,8 +321,7 @@ def add(self, message): except OSError as e: os.remove(tmp_file.name) if e.errno == errno.EEXIST: - raise ExternalClashError('Name clash with existing message: %s' - % dest) + raise ExternalClashError(f'Name clash with existing message: {dest}') else: raise return uniq @@ -338,10 +333,8 @@ def remove(self, key): def discard(self, key): """If the keyed message exists, remove it.""" # This overrides an inapplicable implementation in the superclass. - try: + with contextlib.suppress(KeyError, FileNotFoundError): self.remove(key) - except (KeyError, FileNotFoundError): - pass def __setitem__(self, key, message): """Replace the keyed message; raise KeyError if it doesn't exist.""" @@ -374,10 +367,7 @@ def get_message(self, key): """Return a Message representation or raise a KeyError.""" subpath = self._lookup(key) with open(os.path.join(self._path, subpath), 'rb') as f: - if self._factory: - msg = self._factory(f) - else: - msg = MaildirMessage(f) + msg = self._factory(f) if self._factory else MaildirMessage(f) subdir, name = os.path.split(subpath) msg.set_subdir(subdir) if self.colon in name: @@ -435,22 +425,25 @@ def close(self): def list_folders(self): """Return a list of folder names.""" - result = [] - for entry in os.listdir(self._path): - if len(entry) > 1 and entry[0] == '.' and \ - os.path.isdir(os.path.join(self._path, entry)): - result.append(entry[1:]) - return result + return [ + entry[1:] + for entry in os.listdir(self._path) + if len(entry) > 1 + and entry[0] == '.' + and os.path.isdir(os.path.join(self._path, entry)) + ] def get_folder(self, folder): """Return a Maildir instance for the named folder.""" - return Maildir(os.path.join(self._path, '.' + folder), - factory=self._factory, - create=False) + return Maildir( + os.path.join(self._path, f'.{folder}'), + factory=self._factory, + create=False, + ) def add_folder(self, folder): """Create a folder and return a Maildir instance representing it.""" - path = os.path.join(self._path, '.' + folder) + path = os.path.join(self._path, f'.{folder}') result = Maildir(path, factory=self._factory) maildirfolder_path = os.path.join(path, 'maildirfolder') if not os.path.exists(maildirfolder_path): @@ -460,16 +453,15 @@ def add_folder(self, folder): def remove_folder(self, folder): """Delete the named folder, which must be empty.""" - path = os.path.join(self._path, '.' + folder) + path = os.path.join(self._path, f'.{folder}') for entry in os.listdir(os.path.join(path, 'new')) + \ - os.listdir(os.path.join(path, 'cur')): + os.listdir(os.path.join(path, 'cur')): if len(entry) < 1 or entry[0] != '.': - raise NotEmptyError('Folder contains message(s): %s' % folder) + raise NotEmptyError(f'Folder contains message(s): {folder}') for entry in os.listdir(path): if entry != 'new' and entry != 'cur' and entry != 'tmp' and \ - os.path.isdir(os.path.join(path, entry)): - raise NotEmptyError("Folder contains subdirectory '%s': %s" % - (folder, entry)) + os.path.isdir(os.path.join(path, entry)): + raise NotEmptyError(f"Folder contains subdirectory '{folder}': {entry}") for root, dirs, files in os.walk(path, topdown=False): for entry in files: os.remove(os.path.join(root, entry)) @@ -495,21 +487,16 @@ def _create_tmp(self): hostname = hostname.replace('/', r'\057') if ':' in hostname: hostname = hostname.replace(':', r'\072') - uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(), - Maildir._count, hostname) + uniq = f"{int(now)}.M{int(now % 1 * 1000000.0)}P{os.getpid()}Q{Maildir._count}.{hostname}" path = os.path.join(self._path, 'tmp', uniq) try: os.stat(path) except FileNotFoundError: Maildir._count += 1 - try: + with contextlib.suppress(FileExistsError): return _create_carefully(path) - except FileExistsError: - pass - # Fall through to here if stat succeeded or open raised EEXIST. - raise ExternalClashError('Name clash prevented file creation: %s' % - path) + raise ExternalClashError(f'Name clash prevented file creation: {path}') def _refresh(self): """Update table of contents mapping.""" @@ -549,16 +536,14 @@ def _refresh(self): def _lookup(self, key): """Use TOC to return subpath for given key, or raise a KeyError.""" - try: + with contextlib.suppress(KeyError): if os.path.exists(os.path.join(self._path, self._toc[key])): return self._toc[key] - except KeyError: - pass self._refresh() try: return self._toc[key] except KeyError: - raise KeyError('No message with key: %s' % key) from None + raise KeyError(f'No message with key: {key}') from None # This method is for backward compatibility only. def next(self): @@ -683,11 +668,12 @@ def flush(self): self._pre_message_hook(new_file) new_start = new_file.tell() while True: - buffer = self._file.read(min(4096, - stop - self._file.tell())) - if not buffer: + if buffer := self._file.read( + min(4096, stop - self._file.tell()) + ): + new_file.write(buffer) + else: break - new_file.write(buffer) new_toc[key] = (new_start, new_file.tell()) self._post_message_hook(new_file) self._file_length = new_file.tell() @@ -744,7 +730,7 @@ def _lookup(self, key=None): try: return self._toc[key] except KeyError: - raise KeyError('No message with key: %s' % key) from None + raise KeyError(f'No message with key: {key}') from None def _append_message(self, message): """Append message to mailbox and return (start, stop) offsets.""" @@ -939,21 +925,17 @@ def __init__(self, path, factory=None, create=True): """Initialize an MH instance.""" Mailbox.__init__(self, path, factory, create) if not os.path.exists(self._path): - if create: - os.mkdir(self._path, 0o700) - os.close(os.open(os.path.join(self._path, '.mh_sequences'), - os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600)) - else: + if not create: raise NoSuchMailboxError(self._path) + os.mkdir(self._path, 0o700) + os.close(os.open(os.path.join(self._path, '.mh_sequences'), + os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600)) self._locked = False def add(self, message): """Add message and return assigned key.""" keys = self.keys() - if len(keys) == 0: - new_key = 1 - else: - new_key = max(keys) + 1 + new_key = 1 if len(keys) == 0 else max(keys) + 1 new_path = os.path.join(self._path, str(new_key)) f = _create_carefully(new_path) closed = False @@ -988,7 +970,7 @@ def remove(self, key): f = open(path, 'rb+') except OSError as e: if e.errno == errno.ENOENT: - raise KeyError('No message with key: %s' % key) + raise KeyError(f'No message with key: {key}') else: raise else: @@ -1002,7 +984,7 @@ def __setitem__(self, key, message): f = open(path, 'rb+') except OSError as e: if e.errno == errno.ENOENT: - raise KeyError('No message with key: %s' % key) + raise KeyError(f'No message with key: {key}') else: raise try: @@ -1028,7 +1010,7 @@ def get_message(self, key): f = open(os.path.join(self._path, str(key)), 'rb') except OSError as e: if e.errno == errno.ENOENT: - raise KeyError('No message with key: %s' % key) + raise KeyError(f'No message with key: {key}') else: raise with f: @@ -1053,7 +1035,7 @@ def get_bytes(self, key): f = open(os.path.join(self._path, str(key)), 'rb') except OSError as e: if e.errno == errno.ENOENT: - raise KeyError('No message with key: %s' % key) + raise KeyError(f'No message with key: {key}') else: raise with f: @@ -1071,7 +1053,7 @@ def get_file(self, key): f = open(os.path.join(self._path, str(key)), 'rb') except OSError as e: if e.errno == errno.ENOENT: - raise KeyError('No message with key: %s' % key) + raise KeyError(f'No message with key: {key}') else: raise return _ProxyFile(f) @@ -1115,11 +1097,11 @@ def close(self): def list_folders(self): """Return a list of folder names.""" - result = [] - for entry in os.listdir(self._path): - if os.path.isdir(os.path.join(self._path, entry)): - result.append(entry) - return result + return [ + entry + for entry in os.listdir(self._path) + if os.path.isdir(os.path.join(self._path, entry)) + ] def get_folder(self, folder): """Return an MH instance for the named folder.""" @@ -1137,10 +1119,8 @@ def remove_folder(self, folder): entries = os.listdir(path) if entries == ['.mh_sequences']: os.remove(os.path.join(path, '.mh_sequences')) - elif entries == []: - pass - else: - raise NotEmptyError('Folder not empty: %s' % self._path) + elif entries != []: + raise NotEmptyError(f'Folder not empty: {self._path}') os.rmdir(path) def get_sequences(self): @@ -1159,12 +1139,11 @@ def get_sequences(self): start, stop = (int(x) for x in spec.split('-')) keys.update(range(start, stop + 1)) results[name] = [key for key in sorted(keys) \ - if key in all_keys] + if key in all_keys] if len(results[name]) == 0: del results[name] except ValueError: - raise FormatError('Invalid sequence specification: %s' % - line.rstrip()) + raise FormatError(f'Invalid sequence specification: {line.rstrip()}') return results def set_sequences(self, sequences): @@ -1175,7 +1154,7 @@ def set_sequences(self, sequences): for name, keys in sequences.items(): if len(keys) == 0: continue - f.write(name + ':') + f.write(f'{name}:') prev = None completing = False for key in sorted(set(keys)): @@ -1185,9 +1164,9 @@ def set_sequences(self, sequences): f.write('-') elif completing: completing = False - f.write('%s %s' % (prev, key)) + f.write(f'{prev} {key}') else: - f.write(' %s' % key) + f.write(f' {key}') prev = key if completing: f.write(str(prev) + '\n') @@ -1214,7 +1193,7 @@ def pack(self): os.unlink(os.path.join(self._path, str(key))) prev += 1 self._next_key = prev + 1 - if len(changes) == 0: + if not changes: return for name, key_list in sequences.items(): for old, new in changes: @@ -1348,7 +1327,7 @@ def _generate_toc(self): in self._file.readline()[1:].split(b',') if label.strip()] label_lists.append(labels) - elif line == b'\037' or line == b'\037' + linesep: + elif line in [b'\037', b'\037' + linesep]: if len(stops) < len(starts): stops.append(line_pos - len(linesep)) elif not line: @@ -1426,10 +1405,10 @@ def _install_message(self, message): if line == b'\n' or not line: break while True: - buffer = orig_buffer.read(4096) # Buffer size is arbitrary. - if not buffer: + if buffer := orig_buffer.read(4096): + self._file.write(buffer.replace(b'\n', linesep)) + else: break - self._file.write(buffer.replace(b'\n', linesep)) elif isinstance(message, (bytes, str, io.StringIO)): if isinstance(message, io.StringIO): warnings.warn("Use of StringIO input is deprecated, " @@ -1438,7 +1417,7 @@ def _install_message(self, message): if isinstance(message, str): message = self._string_to_bytes(message) body_start = message.find(b'\n\n') + 2 - if body_start - 2 != -1: + if body_start != 1: self._file.write(message[:body_start].replace(b'\n', linesep)) self._file.write(b'*** EOOH ***' + linesep) self._file.write(message[:body_start].replace(b'\n', linesep)) @@ -1462,12 +1441,11 @@ def _install_message(self, message): line = line[:-1] + b'\n' self._file.write(line.replace(b'\n', linesep)) if line == b'\n' or not line: - if first_pass: - first_pass = False - self._file.write(b'*** EOOH ***' + linesep) - message.seek(original_pos) - else: + if not first_pass: break + first_pass = False + self._file.write(b'*** EOOH ***' + linesep) + message.seek(original_pos) while True: line = message.readline() if not line: @@ -1481,7 +1459,7 @@ def _install_message(self, message): line = line[:-1] + linesep self._file.write(line) else: - raise TypeError('Invalid message type: %s' % type(message)) + raise TypeError(f'Invalid message type: {type(message)}') stop = self._file.tell() return (start, stop) @@ -1506,7 +1484,7 @@ def __init__(self, message=None): elif message is None: email.message.Message.__init__(self) else: - raise TypeError('Invalid message type: %s' % type(message)) + raise TypeError(f'Invalid message type: {type(message)}') def _become_message(self, message): """Assume the non-format-specific state of message.""" @@ -1541,17 +1519,14 @@ def get_subdir(self): def set_subdir(self, subdir): """Set subdir to 'new' or 'cur'.""" - if subdir == 'new' or subdir == 'cur': + if subdir in ['new', 'cur']: self._subdir = subdir else: - raise ValueError("subdir must be 'new' or 'cur': %s" % subdir) + raise ValueError(f"subdir must be 'new' or 'cur': {subdir}") def get_flags(self): """Return as a string the flags that are set.""" - if self._info.startswith('2,'): - return self._info[2:] - else: - return '' + return self._info[2:] if self._info.startswith('2,') else '' def set_flags(self, flags): """Set the given flags and unset all others.""" @@ -1575,7 +1550,7 @@ def set_date(self, date): try: self._date = float(date) except ValueError: - raise TypeError("can't convert to float: %s" % date) from None + raise TypeError(f"can't convert to float: {date}") from None def get_info(self): """Get the message's "info" as a string.""" @@ -1586,7 +1561,7 @@ def set_info(self, info): if isinstance(info, str): self._info = info else: - raise TypeError('info must be a string: %s' % type(info)) + raise TypeError(f'info must be a string: {type(info)}') def _explain_to(self, message): """Copy Maildir-specific state to message insofar as possible.""" @@ -1625,11 +1600,8 @@ def _explain_to(self, message): message.add_label('answered') if 'P' in flags: message.add_label('forwarded') - elif isinstance(message, Message): - pass - else: - raise TypeError('Cannot convert to specified type: %s' % - type(message)) + elif not isinstance(message, Message): + raise TypeError(f'Cannot convert to specified type: {type(message)}') class _mboxMMDFMessage(Message): @@ -1655,7 +1627,7 @@ def set_from(self, from_, time_=None): if time_ is not None: if time_ is True: time_ = time.gmtime() - from_ += ' ' + time.asctime(time_) + from_ += f' {time.asctime(time_)}' self._from = from_ def get_flags(self): @@ -1710,11 +1682,9 @@ def _explain_to(self, message): del message['status'] del message['x-status'] maybe_date = ' '.join(self.get_from().split()[-5:]) - try: + with contextlib.suppress(ValueError, OverflowError): message.set_date(calendar.timegm(time.strptime(maybe_date, '%a %b %d %H:%M:%S %Y'))) - except (ValueError, OverflowError): - pass elif isinstance(message, _mboxMMDFMessage): message.set_flags(self.get_flags()) message.set_from(self.get_from()) @@ -1738,11 +1708,8 @@ def _explain_to(self, message): message.add_label('answered') del message['status'] del message['x-status'] - elif isinstance(message, Message): - pass - else: - raise TypeError('Cannot convert to specified type: %s' % - type(message)) + elif not isinstance(message, Message): + raise TypeError(f'Cannot convert to specified type: {type(message)}') class mboxMessage(_mboxMMDFMessage): @@ -1769,18 +1736,15 @@ def set_sequences(self, sequences): def add_sequence(self, sequence): """Add sequence to list of sequences including the message.""" - if isinstance(sequence, str): - if not sequence in self._sequences: - self._sequences.append(sequence) - else: - raise TypeError('sequence type must be str: %s' % type(sequence)) + if not isinstance(sequence, str): + raise TypeError(f'sequence type must be str: {type(sequence)}') + if sequence not in self._sequences: + self._sequences.append(sequence) def remove_sequence(self, sequence): """Remove sequence from the list of sequences including the message.""" - try: + with contextlib.suppress(ValueError): self._sequences.remove(sequence) - except ValueError: - pass def _explain_to(self, message): """Copy MH-specific state to message insofar as possible.""" @@ -1814,11 +1778,8 @@ def _explain_to(self, message): message.add_label('unseen') if 'replied' in sequences: message.add_label('answered') - elif isinstance(message, Message): - pass - else: - raise TypeError('Cannot convert to specified type: %s' % - type(message)) + elif not isinstance(message, Message): + raise TypeError(f'Cannot convert to specified type: {type(message)}') class BabylMessage(Message): @@ -1842,18 +1803,15 @@ def set_labels(self, labels): def add_label(self, label): """Add label to list of labels on the message.""" - if isinstance(label, str): - if label not in self._labels: - self._labels.append(label) - else: - raise TypeError('label must be a string: %s' % type(label)) + if not isinstance(label, str): + raise TypeError(f'label must be a string: {type(label)}') + if label not in self._labels: + self._labels.append(label) def remove_label(self, label): """Remove label from the list of labels on the message.""" - try: + with contextlib.suppress(ValueError): self._labels.remove(label) - except ValueError: - pass def get_visible(self): """Return a Message representation of visible headers.""" @@ -1909,11 +1867,8 @@ def _explain_to(self, message): message.set_visible(self.get_visible()) for label in self.get_labels(): message.add_label(label) - elif isinstance(message, Message): - pass - else: - raise TypeError('Cannot convert to specified type: %s' % - type(message)) + elif not isinstance(message, Message): + raise TypeError(f'Cannot convert to specified type: {type(message)}') class MMDFMessage(_mboxMMDFMessage): @@ -1926,10 +1881,7 @@ class _ProxyFile: def __init__(self, f, pos=None): """Initialize a _ProxyFile.""" self._file = f - if pos is None: - self._pos = f.tell() - else: - self._pos = pos + self._pos = f.tell() if pos is None else pos def read(self, size=None): """Read bytes.""" @@ -1957,10 +1909,10 @@ def readlines(self, sizehint=None): def __iter__(self): """Iterate over lines.""" while True: - line = self.readline() - if not line: + if line := self.readline(): + yield line + else: return - yield line def tell(self): """Return the position.""" @@ -2014,9 +1966,7 @@ def flush(self): def closed(self): if not hasattr(self, '_file'): return True - if not hasattr(self._file, 'closed'): - return False - return self._file.closed + return False if not hasattr(self._file, 'closed') else self._file.closed __class_getitem__ = classmethod(GenericAlias) @@ -2069,13 +2019,12 @@ def _lock_file(f, dotlock=True): fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB) except OSError as e: if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS): - raise ExternalClashError('lockf: lock unavailable: %s' % - f.name) + raise ExternalClashError(f'lockf: lock unavailable: {f.name}') else: raise if dotlock: try: - pre_lock = _create_temporary(f.name + '.lock') + pre_lock = _create_temporary(f'{f.name}.lock') pre_lock.close() except OSError as e: if e.errno in (errno.EACCES, errno.EROFS): @@ -2084,30 +2033,29 @@ def _lock_file(f, dotlock=True): raise try: try: - os.link(pre_lock.name, f.name + '.lock') + os.link(pre_lock.name, f'{f.name}.lock') dotlock_done = True except (AttributeError, PermissionError): - os.rename(pre_lock.name, f.name + '.lock') + os.rename(pre_lock.name, f'{f.name}.lock') dotlock_done = True else: os.unlink(pre_lock.name) except FileExistsError: os.remove(pre_lock.name) - raise ExternalClashError('dot lock unavailable: %s' % - f.name) + raise ExternalClashError(f'dot lock unavailable: {f.name}') except: if fcntl: fcntl.lockf(f, fcntl.LOCK_UN) if dotlock_done: - os.remove(f.name + '.lock') + os.remove(f'{f.name}.lock') raise def _unlock_file(f): """Unlock file f using lockf and dot locking.""" if fcntl: fcntl.lockf(f, fcntl.LOCK_UN) - if os.path.exists(f.name + '.lock'): - os.remove(f.name + '.lock') + if os.path.exists(f'{f.name}.lock'): + os.remove(f'{f.name}.lock') def _create_carefully(path): """Create a file if it doesn't exist and open for reading and writing.""" @@ -2119,9 +2067,9 @@ def _create_carefully(path): def _create_temporary(path): """Create a temp file based on path and open for reading and writing.""" - return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()), - socket.gethostname(), - os.getpid())) + return _create_carefully( + f'{path}.{int(time.time())}.{socket.gethostname()}.{os.getpid()}' + ) def _sync_flush(f): """Ensure changes to file f are physically on disk.""" diff --git a/crawl_sourcecode.py b/crawl_sourcecode.py index 2daad4f682..db7c0267a3 100644 --- a/crawl_sourcecode.py +++ b/crawl_sourcecode.py @@ -28,7 +28,7 @@ def print_node(node, indent=0): indents = ' ' * indent if isinstance(node, ast.AST): - lineno = 'row={}'.format(node.lineno) if hasattr(node, 'lineno') else '' + lineno = f'row={node.lineno}' if hasattr(node, 'lineno') else '' print(indents, "NODE", node.__class__.__name__, lineno) for field in node._fields: print(indents,'-', field) diff --git a/whats_left.py b/whats_left.py index 7f3ad80c63..905ecf3674 100755 --- a/whats_left.py +++ b/whats_left.py @@ -56,8 +56,7 @@ def parse_args(): help="print output as JSON (instead of line by line)", ) - args = parser.parse_args() - return args + return parser.parse_args() args = parse_args() @@ -151,9 +150,7 @@ def extra_info(obj): def name_sort_key(name): if name == "builtins": return "" - if name[0] == "_": - return name[1:] + "1" - return name + "2" + return f"{name[1:]}1" if name[0] == "_" else f"{name}2" def gen_methods(): @@ -207,10 +204,11 @@ def gen_methods(): methods = {} for typ_code in objects + iters: typ = eval(typ_code) - attrs = [] - for attr in dir(typ): - if attr_is_not_inherited(typ, attr): - attrs.append((attr, extra_info(getattr(typ, attr)))) + attrs = [ + (attr, extra_info(getattr(typ, attr))) + for attr in dir(typ) + if attr_is_not_inherited(typ, attr) + ] methods[typ.__name__] = (typ_code, extra_info(typ), attrs) output = "expected_methods = {\n" @@ -234,7 +232,7 @@ def scan_modules(): def callback(path, modname, desc, modules=modules): if modname and modname[-9:] == ".__init__": - modname = modname[:-9] + " (package)" + modname = f"{modname[:-9]} (package)" if modname.find(".") < 0: modules[modname] = 1