diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index ace888390..c8177eef8 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -160,4 +160,7 @@ jobs: - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 with: - files: ./coverage.xml, ./coverage.info \ No newline at end of file + files: ./coverage.xml, ./coverage.info + + - name: Run Pylint + run: make check-lint diff --git a/.pylintrc b/.pylintrc index 50b124b6e..46cd8d0a7 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,3 +1,29 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + [MASTER] # Use multiple processes to speed up Pylint. @@ -10,19 +36,16 @@ persistent=yes # user-friendly hints instead of false-positive error messages. suggestion-mode=yes +# Ignore problematic extensions +extension-pkg-whitelist=pydantic, + +# Dirs where we do not care about style +# ignore-paths= + [MESSAGES CONTROL] -disable=print-statement, - parameter-unpacking, - unpacking-in-except, - old-raise-syntax, - backtick, - long-suffix, - old-ne-operator, - old-octal-literal, - import-star-module-level, - non-ascii-bytes-literal, +disable=logging-fstring-interpolation, raw-checker-failed, bad-inline-option, locally-disabled, @@ -31,69 +54,6 @@ disable=print-statement, useless-suppression, deprecated-pragma, use-symbolic-message-instead, - apply-builtin, - basestring-builtin, - buffer-builtin, - cmp-builtin, - coerce-builtin, - execfile-builtin, - file-builtin, - long-builtin, - raw_input-builtin, - reduce-builtin, - standarderror-builtin, - unicode-builtin, - xrange-builtin, - coerce-method, - delslice-method, - getslice-method, - setslice-method, - no-absolute-import, - old-division, - dict-iter-method, - dict-view-method, - next-method-called, - metaclass-assignment, - indexing-exception, - raising-string, - reload-builtin, - oct-method, - hex-method, - nonzero-method, - cmp-method, - input-builtin, - round-builtin, - intern-builtin, - unichr-builtin, - map-builtin-not-iterating, - zip-builtin-not-iterating, - range-builtin-not-iterating, - filter-builtin-not-iterating, - using-cmp-argument, - eq-without-hash, - div-method, - idiv-method, - rdiv-method, - exception-message-attribute, - invalid-str-codec, - sys-max-int, - bad-python3-import, - deprecated-string-function, - deprecated-str-translate-call, - deprecated-itertools-function, - deprecated-types-field, - next-method-defined, - dict-items-not-iterating, - dict-keys-not-iterating, - dict-values-not-iterating, - deprecated-operator-function, - deprecated-urllib-function, - xreadlines-attribute, - deprecated-sys-function, - exception-escape, - comprehension-escape - bad-continuation, - invalid-name, too-many-instance-attributes, too-many-arguments, unused-argument, @@ -103,18 +63,26 @@ disable=print-statement, missing-function-docstring, too-many-branches, too-many-nested-blocks, - no-self-use, no-else-break, broad-except, - pointless-string-statement + pointless-string-statement, + too-few-public-methods, + fixme, # TODO: Enable after reaching a release + broad-exception-raised, # TODO: Enable after reaching a MVP enable=useless-object-inheritance, unused-variable, unused-import, - undefined-variable + unused-argument, + undefined-variable, not-callable, arguments-differ, - redefined-outer-name + redefined-outer-name, + bare-except, + +load-plugins=pylint.extensions.no_self_use, + pylint.extensions.eq_without_hash, + pylint.extensions.broad_try_clause, [REPORTS] @@ -138,7 +106,7 @@ logging-format-style=new # Logging modules to check that the string format arguments are in logging # function parameter format. -logging-modules=logging +logging-modules=logging, [VARIABLES] @@ -153,11 +121,11 @@ allow-global-unused-variables=yes # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. callbacks=cb_, - _cb + _cb, # A regular expression matching the name of dummy variables (i.e. expected to # not be used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$) # Argument names that match this expression will be ignored. Default to name # with leading underscore. @@ -168,7 +136,10 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +redefining-builtins-modules=six.moves, + past.builtins, + future.builtins, + builtins,io [FORMAT] @@ -187,18 +158,11 @@ indent-after-paren=4 indent-string=' ' # Maximum number of characters on a single line. -max-line-length=160 +max-line-length=88 # Maximum number of lines in a module. max-module-lines=1000 -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma, - dict-separator - # Allow the body of a class to be on the same line as the declaration if body # contains single statement. single-line-class-stmt=no @@ -213,15 +177,14 @@ single-line-if-stmt=no # Naming style matching correct argument names. argument-naming-style=snake_case -# Regular expression matching correct argument names. Overrides argument- -# naming-style. -#argument-rgx= +# Regular expression matching correct argument names. Overrides argument-naming-style. +# Same as `argument-naming-style=snake_case` but allow for two letter args names +# argument-rgx=([^\W\dA-Z][^\WA-Z]{1,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$ # Naming style matching correct attribute names. attr-naming-style=snake_case -# Regular expression matching correct attribute names. Overrides attr-naming- -# style. +# Regular expression matching correct attribute names. Overrides attr-naming-style. #attr-rgx= # Bad variable names which should always be refused, separated by a comma. @@ -230,7 +193,7 @@ bad-names=foo, baz, toto, tutu, - tata + tata, # Naming style matching correct class attribute names. class-attribute-naming-style=any @@ -249,9 +212,14 @@ function-naming-style=snake_case good-names=i, j, k, + v, + x, + e, ex, - Run, - _ + db, + ar, + _, + fn, # Include a hint for the correct naming format with invalid-name. include-naming-hint=no @@ -263,7 +231,8 @@ inlinevar-naming-style=any method-naming-style=snake_case # Naming style matching correct module names. -module-naming-style=snake_case +# usually snake_case +module-naming-style=any # List of decorators that produce properties, such as abc.abstractproperty. Add # to this list to register other decorators that produce valid properties. @@ -273,6 +242,9 @@ property-classes=abc.abstractproperty # Naming style matching correct variable names. variable-naming-style=snake_case +# Same as `variable-naming-style=snake_case` but allow for two letter vars +# variable-rgx=([^\W\dA-Z][^\WA-Z]{1,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$ + [STRING] @@ -321,6 +293,8 @@ known-third-party=enchant # Couples of modules and preferred modules, separated by a comma. preferred-modules= +ignored-modules=smartredis.smartredisPy + [CLASSES] @@ -328,7 +302,7 @@ preferred-modules= defining-attr-methods=__init__, __new__, setUp, - __post_init__ + __post_init__, # List of member names, which should be excluded from the protected access # warning. @@ -336,10 +310,34 @@ exclude-protected=_asdict, _fields, _replace, _source, - _make + _make, # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls # List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=cls +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=9 + +# Maximum number of locals for function / method body +max-locals=19 + +# Maximum number of return / yield for function / method body +max-returns=11 + +# Maximum number of branch for function / method body +max-branches=20 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of statements in a try-block +max-try-statements=7 + +# Maximum level of inheritance, bumping up to account for db mixins +max-parents=25 diff --git a/Makefile b/Makefile index b3b01dd02..62dd4b370 100644 --- a/Makefile +++ b/Makefile @@ -271,7 +271,7 @@ check-sort-imports: # help: check-lint - run static analysis checks .PHONY: check-lint check-lint: - @pylint --rcfile=.pylintrc ./src/python/module/smartredis ./tests/python + @pylint --rcfile=.pylintrc ./src/python/module/smartredis # help: diff --git a/doc/changelog.rst b/doc/changelog.rst index 7323235b0..e76efb7b4 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,14 +8,20 @@ To be released at some future point in time Description +- Update supported languages documentation +- Address code linting issues - Removed obsolete files - Improved clustered redis initialization Detailed Notes +- Update language support matrix in documentation to reflect updates from the last release (PR376_) +- Update pylint configuration and mitigate issues, run in CI pipeline (PR371_) - Deleted obsolete build and testing files that are no longer needed with the new build and test system (PR366_) - Reuse existing redis connection when mapping the Redis cluster (PR364_) +.. _PR376: https://github.com/CrayLabs/SmartRedis/pull/376 +.. _PR371: https://github.com/CrayLabs/SmartRedis/pull/371 .. _PR366: https://github.com/CrayLabs/SmartRedis/pull/366 .. _PR364: https://github.com/CrayLabs/SmartRedis/pull/364 diff --git a/doc/overview.rst b/doc/overview.rst index cd38b0038..a17cbdd31 100644 --- a/doc/overview.rst +++ b/doc/overview.rst @@ -48,10 +48,10 @@ below summarizes the language standards for each client. * - Language - Version/Standard * - Python - - 3.7, 3.8, 3.9 + - 3.7, 3.8, 3.9, 3.10 * - C++ - C++17 * - C - C99 * - Fortran - - Fortran 2003 + - Fortran 2018 (GNU/Intel), 2003 (PGI/Nvidia) diff --git a/pyproject.toml b/pyproject.toml index 65af379b1..0d0dbf027 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,6 +6,7 @@ requires = ["setuptools>=42", build-backend = "setuptools.build_meta" [tool.black] +line-length = 88 target-version = ['py37', 'py38', 'py39', 'py310'] exclude = ''' ( diff --git a/requirements-dev.txt b/requirements-dev.txt index f70e505a7..85379568d 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -4,7 +4,7 @@ pytest>=6.0.0 pytest-cov==2.10.1 black==23.3.0 isort==5.6.4 -pylint==2.6.0 +pylint>=2.10.0 breathe==4.26.0 torch>=1.7.1 sphinx-fortran==1.1.1 diff --git a/setup.cfg b/setup.cfg index 92aa2fc5c..2f4cf2dff 100644 --- a/setup.cfg +++ b/setup.cfg @@ -43,7 +43,7 @@ dev = pytest-cov==2.10.1 black==20.8b1 isort==5.6.4 - pylint==2.6.0 + pylint>=2.10.0 torch>=1.7.1 mypy>=1.4.0 diff --git a/src/python/module/smartredis/__init__.py b/src/python/module/smartredis/__init__.py index 595273629..611f2c91b 100644 --- a/src/python/module/smartredis/__init__.py +++ b/src/python/module/smartredis/__init__.py @@ -44,7 +44,7 @@ from .configoptions import ConfigOptions from .dataset import Dataset from .dataset_utils import DatasetConverter -from .logger import log_data, log_warning, log_error -from .srobject import SRObject from .logcontext import LogContext -from .smartredisPy import LLQuiet, LLInfo, LLDebug, LLDeveloper +from .logger import log_data, log_error, log_warning +from .smartredisPy import LLDebug, LLDeveloper, LLInfo, LLQuiet +from .srobject import SRObject diff --git a/src/python/module/smartredis/client.py b/src/python/module/smartredis/client.py index c41c5eaf7..b2223d06a 100644 --- a/src/python/module/smartredis/client.py +++ b/src/python/module/smartredis/client.py @@ -24,20 +24,20 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# pylint: disable=too-many-lines,too-many-public-methods import inspect import os import os.path as osp - import typing as t + import numpy as np from .dataset import Dataset -from .srobject import SRObject +from .error import RedisConnectionError from .smartredisPy import PyClient -from .util import Dtypes, init_default, exception_handler, typecheck - -from .error import * from .smartredisPy import RedisReplyError as PybindRedisReplyError +from .srobject import SRObject +from .util import Dtypes, exception_handler, init_default, typecheck class Client(SRObject): @@ -1733,8 +1733,8 @@ def __check_backend(backend: str) -> str: backend = backend.upper() if backend in ["TF", "TFLITE", "TORCH", "ONNX"]: return backend - else: - raise TypeError(f"Backend type {backend} unsupported") + + raise TypeError(f"Backend type {backend} unsupported") @staticmethod def __check_file(file: str) -> str: diff --git a/src/python/module/smartredis/configoptions.py b/src/python/module/smartredis/configoptions.py index 5b98483bb..b04551725 100644 --- a/src/python/module/smartredis/configoptions.py +++ b/src/python/module/smartredis/configoptions.py @@ -24,12 +24,13 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import typing as t + +from .error import RedisRuntimeError from .smartredisPy import PyConfigOptions from .util import exception_handler, typecheck -from .error import RedisRuntimeError -import typing as t -_notfactory = ( +_NOT_FACTORY = ( "Method called on a ConfigOptions object not created from a factory method" ) @@ -65,7 +66,8 @@ def set_configoptions(self, configoptions: PyConfigOptions) -> None: typecheck(configoptions, "configoptions", PyConfigOptions) self._config_opts = configoptions - def _is_factory_object(self) -> bool: + @property + def is_factory_object(self) -> bool: """Check whether this object was created via a factory method""" return self._is_created_via_factory @@ -88,7 +90,7 @@ def create_from_environment(cls, db_prefix: str) -> "ConfigOptions": typecheck(db_prefix, "db_prefix", str) factory_object = PyConfigOptions.create_from_environment(db_prefix) result = cls.from_pybind(factory_object) - result._is_created_via_factory = True + result._is_created_via_factory = True # pylint: disable=protected-access return result @exception_handler @@ -105,7 +107,7 @@ def get_integer_option(self, option_name: str) -> int: """ typecheck(option_name, "option_name", str) if not self._is_created_via_factory: - raise RedisRuntimeError(_notfactory) + raise RedisRuntimeError(_NOT_FACTORY) return self._config_opts.get_integer_option(option_name) @exception_handler @@ -122,7 +124,7 @@ def get_string_option(self, option_name: str) -> str: """ typecheck(option_name, "option_name", str) if not self._is_created_via_factory: - raise RedisRuntimeError(_notfactory) + raise RedisRuntimeError(_NOT_FACTORY) return self._config_opts.get_string_option(option_name) @exception_handler @@ -137,7 +139,7 @@ def is_configured(self, option_name: str) -> bool: """ typecheck(option_name, "option_name", str) if not self._is_created_via_factory: - raise RedisRuntimeError(_notfactory) + raise RedisRuntimeError(_NOT_FACTORY) return self._config_opts.is_configured(option_name) @exception_handler @@ -158,7 +160,7 @@ def override_integer_option(self, option_name: str, value: int) -> None: typecheck(option_name, "option_name", str) typecheck(value, "value", int) if not self._is_created_via_factory: - raise RedisRuntimeError(_notfactory) + raise RedisRuntimeError(_NOT_FACTORY) self._config_opts.override_integer_option(option_name, value) @exception_handler @@ -179,5 +181,5 @@ def override_string_option(self, option_name: str, value: str) -> None: typecheck(option_name, "option_name", str) typecheck(value, "value", str) if not self._is_created_via_factory: - raise RedisRuntimeError(_notfactory) + raise RedisRuntimeError(_NOT_FACTORY) self._config_opts.override_string_option(option_name, value) diff --git a/src/python/module/smartredis/dataset.py b/src/python/module/smartredis/dataset.py index 18edae1c6..60c6f39be 100644 --- a/src/python/module/smartredis/dataset.py +++ b/src/python/module/smartredis/dataset.py @@ -24,17 +24,14 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from numbers import Number - import typing as t + import numpy as np from .smartredisPy import PyDataset from .srobject import SRObject from .util import Dtypes, exception_handler, typecheck -from .error import * - class Dataset(SRObject): def __init__(self, name: str) -> None: diff --git a/src/python/module/smartredis/dataset_utils.py b/src/python/module/smartredis/dataset_utils.py index cad0697a9..6adc09e35 100644 --- a/src/python/module/smartredis/dataset_utils.py +++ b/src/python/module/smartredis/dataset_utils.py @@ -26,13 +26,12 @@ import functools import typing as t +from itertools import permutations from typing import TYPE_CHECKING from .dataset import Dataset +from .error import RedisRuntimeError from .util import typecheck -from itertools import permutations -from .error import * - if TYPE_CHECKING: # pragma: no cover # Import optional deps for intellisense @@ -40,11 +39,12 @@ # Type hint magic bits from typing_extensions import ParamSpec + _PR = ParamSpec("_PR") _RT = t.TypeVar("_RT") else: # Leave optional deps as nullish - xr = None + xr = None # pylint: disable=invalid-name # ----helper decorators ----- @@ -52,9 +52,9 @@ def _requires_xarray(fn: "t.Callable[_PR, _RT]") -> "t.Callable[_PR, _RT]": @functools.wraps(fn) def _import_xarray(*args: "_PR.args", **kwargs: "_PR.kwargs") -> "_RT": - global xr + global xr # pylint: disable=global-statement,invalid-name try: - import xarray as xr + import xarray as xr # pylint: disable=import-outside-toplevel except ImportError as e: raise RedisRuntimeError( "Optional package xarray must be installed; " @@ -143,72 +143,68 @@ def add_metadata_for_xarray( dataset.add_meta_string(f"_xarray_{name}_{sarg}", "null") @staticmethod - @_requires_xarray - def transform_to_xarray(dataset: Dataset) -> t.Dict: - """Transform a SmartRedis Dataset, with the appropriate metadata, - to an Xarray Dataarray - - :param dataset: a Dataset instance - :type dataset: Dataset - - :return: a dictionary of keys as the data field name and the - value as the built Xarray DataArray constructed using - fieldnames and appropriately formatted metadata. - rtype: dict - """ - typecheck(dataset, "dataset", Dataset) - + def _find_coord_vars(source_ds: Dataset) -> t.Dict[str, t.Dict]: + variable_names = source_ds.get_meta_strings("_xarray_data_names") coord_dict = {} coord_final = {} - variable_names = dataset.get_meta_strings("_xarray_data_names") # Check for data names that are equal to coordinate names. If any matches # are found, then those data variables are treated as coordinates variables for tensor_name, tensor_dname in list(permutations(variable_names, 2)): - for coordname in get_data(dataset, tensor_name, "coord"): + for coordname in get_data(source_ds, tensor_name, "coord"): if tensor_dname == coordname: # Remove coordinate data names from data names if tensor_dname in variable_names: variable_names.remove(tensor_dname) # Get coordinate dimensions in the appropriate format for Xarray coord_dims = [] - for coord_dim_field_name in get_data(dataset, tensor_dname, "dim"): + for coord_dim_field_name in get_data( + source_ds, tensor_dname, "dim" + ): coord_dims.append( - dataset.get_meta_strings(coord_dim_field_name)[0] + source_ds.get_meta_strings(coord_dim_field_name)[0] ) # Get coordinate attributes in the appropriate format for Xarray coord_attrs = {} for coord_attr_field_name in get_data( - dataset, tensor_dname, "attr" + source_ds, tensor_dname, "attr" ): - fieldname = dataset.get_meta_strings(coord_attr_field_name)[0] + fieldname = source_ds.get_meta_strings(coord_attr_field_name)[0] coord_attrs[coord_attr_field_name] = fieldname # Add dimensions, data, and attributes to the coordinate variable coord_dict[tensor_dname] = ( coord_dims, - dataset.get_tensor(tensor_dname), + source_ds.get_tensor(tensor_dname), coord_attrs, ) # Add coordinate names and relative values in the appropriate # form to add to Xarray coords variable coord_final[tensor_name] = coord_dict + return coord_final + + @staticmethod + def _construct_xarray( + source_ds: Dataset, coord_vars: t.Dict + ) -> t.Dict[str, "xr.DataArray"]: ret_xarray = {} + variable_names = source_ds.get_meta_strings("_xarray_data_names") + for variable_name in variable_names: - data_final = dataset.get_tensor(variable_name) + data_final = source_ds.get_tensor(variable_name) dims_final = [] # Extract dimensions in correct form - for dim_field_name in get_data(dataset, variable_name, "dim"): - dims_final.append(dataset.get_meta_strings(dim_field_name)[0]) + for dim_field_name in get_data(source_ds, variable_name, "dim"): + dims_final.append(source_ds.get_meta_strings(dim_field_name)[0]) attrs_final = {} # Extract attributes in correct form - for attr_field_name in get_data(dataset, variable_name, "attr"): - fieldname = dataset.get_meta_strings(attr_field_name)[0] + for attr_field_name in get_data(source_ds, variable_name, "attr"): + fieldname = source_ds.get_meta_strings(attr_field_name)[0] attrs_final[attr_field_name] = fieldname # Add coordinates to the correct data name - for name in coord_final.keys(): + for name, value in coord_vars.items(): if name == variable_name: - coords_final = coord_final.get(name) + coords_final = value # Construct a xr.DataArray using extracted dataset data, # append the dataarray to corresponding variable names @@ -221,3 +217,23 @@ def transform_to_xarray(dataset: Dataset) -> t.Dict: ) return ret_xarray + + @staticmethod + @_requires_xarray + def transform_to_xarray(dataset: Dataset) -> t.Dict[str, "xr.DataArray"]: + """Transform a SmartRedis Dataset, with the appropriate metadata, + to an Xarray Dataarray + + :param dataset: a Dataset instance + :type dataset: Dataset + + :return: a dictionary of keys as the data field name and the + value as the built Xarray DataArray constructed using + fieldnames and appropriately formatted metadata. + rtype: dict + """ + typecheck(dataset, "dataset", Dataset) + + coord_final = DatasetConverter._find_coord_vars(dataset) + ret_xarray = DatasetConverter._construct_xarray(dataset, coord_final) + return ret_xarray diff --git a/src/python/module/smartredis/error.py b/src/python/module/smartredis/error.py index b10373371..38abd1f79 100644 --- a/src/python/module/smartredis/error.py +++ b/src/python/module/smartredis/error.py @@ -56,6 +56,7 @@ class RedisReplyError(RuntimeError): def __init__(self, cpp_error: str, method: str = "", key: str = "") -> None: super().__init__(self._check_error(cpp_error, method, key)) + # pylint: disable=unused-argument @staticmethod def _check_error(cpp_error: str, method: str = "", key: str = "") -> str: msg = "" diff --git a/src/python/module/smartredis/logcontext.py b/src/python/module/smartredis/logcontext.py index 8ed84e179..645d3eeb8 100644 --- a/src/python/module/smartredis/logcontext.py +++ b/src/python/module/smartredis/logcontext.py @@ -27,8 +27,6 @@ from .smartredisPy import PyLogContext from .srobject import SRObject from .util import exception_handler, typecheck -from .error import * -import typing as t class LogContext(SRObject): @@ -60,7 +58,9 @@ def from_pybind(logcontext: PyLogContext) -> "LogContext": :rtype: LogContext """ typecheck(logcontext, "logcontext", PyLogContext) - new_logcontext = LogContext(logcontext._name) + new_logcontext = LogContext( + logcontext._name # pylint: disable=protected-access + ) new_logcontext.set_context(logcontext) return new_logcontext diff --git a/src/python/module/smartredis/logger.py b/src/python/module/smartredis/logger.py index e9b930d7c..7445ca883 100644 --- a/src/python/module/smartredis/logger.py +++ b/src/python/module/smartredis/logger.py @@ -24,7 +24,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from .smartredisPy import cpp_log_data, cpp_log_warning, cpp_log_error, SRLoggingLevel +from .smartredisPy import SRLoggingLevel, cpp_log_data, cpp_log_error, cpp_log_warning from .util import exception_handler, typecheck # Logging levels diff --git a/src/python/module/smartredis/srobject.py b/src/python/module/smartredis/srobject.py index 7b0bfa6ce..4ca4fa052 100644 --- a/src/python/module/smartredis/srobject.py +++ b/src/python/module/smartredis/srobject.py @@ -27,8 +27,6 @@ from .smartredisPy import PySRObject, SRLoggingLevel from .util import exception_handler, typecheck -from .error import * - class SRObject: def __init__(self, context: str) -> None: diff --git a/src/python/module/smartredis/util.py b/src/python/module/smartredis/util.py index e973fb789..c711bb150 100644 --- a/src/python/module/smartredis/util.py +++ b/src/python/module/smartredis/util.py @@ -24,16 +24,18 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from .error import * +import typing as t from functools import wraps + +import numpy as np + from .smartredisPy import RedisReplyError as PybindRedisReplyError from .smartredisPy import c_get_last_error_location -import numpy as np -import typing as t if t.TYPE_CHECKING: # Type hint magic bits from typing_extensions import ParamSpec + _PR = ParamSpec("_PR") _RT = t.TypeVar("_RT") @@ -102,7 +104,8 @@ def init_default( if init_value is None: return default if expected_type is not None and not isinstance(init_value, expected_type): - raise TypeError(f"Argument was of type {type(init_value)}, not {expected_type}") + msg = f"Argument was of type {type(init_value)}, not {expected_type}" + raise TypeError(msg) return init_value diff --git a/tests/python/test_address.py b/tests/python/test_address.py index e5bed1482..f684c3840 100644 --- a/tests/python/test_address.py +++ b/tests/python/test_address.py @@ -28,6 +28,7 @@ from smartredis import Client + def test_serialization(use_cluster, context): # get env var to set through client init ssdb = os.environ["SSDB"] diff --git a/tests/python/test_configoptions.py b/tests/python/test_configoptions.py index a042867c8..4a164331f 100644 --- a/tests/python/test_configoptions.py +++ b/tests/python/test_configoptions.py @@ -33,6 +33,7 @@ ##### # Test attempts to use API functions from non-factory object + def test_non_factory_configobject(): co = ConfigOptions() with pytest.raises(RedisRuntimeError): @@ -46,6 +47,7 @@ def test_non_factory_configobject(): with pytest.raises(RedisRuntimeError): _ = co.override_string_option("key", "value") + def test_options(monkeypatch): monkeypatch.setenv("test_integer_key", "42") monkeypatch.setenv("test_string_key", "charizard") @@ -58,8 +60,7 @@ def test_options(monkeypatch): _ = co.get_integer_option("test_integer_key_that_is_not_really_present") co.override_integer_option("test_integer_key_that_is_not_really_present", 42) assert co.is_configured("test_integer_key_that_is_not_really_present") - assert co.get_integer_option( - "test_integer_key_that_is_not_really_present") == 42 + assert co.get_integer_option("test_integer_key_that_is_not_really_present") == 42 # string option tests assert co.get_string_option("test_string_key") == "charizard" @@ -68,8 +69,10 @@ def test_options(monkeypatch): _ = co.get_string_option("test_string_key_that_is_not_really_present") co.override_string_option("test_string_key_that_is_not_really_present", "meowth") assert co.is_configured("test_string_key_that_is_not_really_present") - assert co.get_string_option( - "test_string_key_that_is_not_really_present") == "meowth" + assert ( + co.get_string_option("test_string_key_that_is_not_really_present") == "meowth" + ) + def test_options_with_prefix(monkeypatch): monkeypatch.setenv("prefixtest_integer_key", "42") @@ -92,5 +95,4 @@ def test_options_with_prefix(monkeypatch): _ = co.get_string_option("string_key_that_is_not_really_present") co.override_string_option("string_key_that_is_not_really_present", "meowth") assert co.is_configured("string_key_that_is_not_really_present") - assert co.get_string_option( - "string_key_that_is_not_really_present") == "meowth" + assert co.get_string_option("string_key_that_is_not_really_present") == "meowth" diff --git a/tests/python/test_dataset_aggregation.py b/tests/python/test_dataset_aggregation.py index 1c826dade..cc72657e3 100644 --- a/tests/python/test_dataset_aggregation.py +++ b/tests/python/test_dataset_aggregation.py @@ -25,11 +25,12 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os + import numpy as np import pytest +from smartredis import * from smartredis import Client, Dataset from smartredis.error import * -from smartredis import * def test_aggregation(use_cluster, context): @@ -55,69 +56,78 @@ def test_aggregation(use_cluster, context): # Confirm that poll for list length works correctly actual_length = num_datasets poll_result = client.poll_list_length(list_name, actual_length, 100, 5) - if (poll_result == False): + if poll_result == False: raise RuntimeError( f"Polling for list length of {actual_length} returned " - f"False for known length of {actual_length}.") + f"False for known length of {actual_length}." + ) log_data(context, LLDebug, "Polling 1") poll_result = client.poll_list_length(list_name, actual_length + 1, 100, 5) - if (poll_result == True): + if poll_result == True: raise RuntimeError( f"Polling for list length of {actual_length + 1} returned " - f"True for known length of {actual_length}.") + f"True for known length of {actual_length}." + ) log_data(context, LLDebug, "Polling 2") # Confirm that poll for greater than or equal list length works correctly poll_result = client.poll_list_length_gte(list_name, actual_length - 1, 100, 5) - if (poll_result == False): + if poll_result == False: raise RuntimeError( f"Polling for list length greater than or equal to {actual_length - 1} " - f"returned False for known length of {actual_length}.") + f"returned False for known length of {actual_length}." + ) log_data(context, LLDebug, "Polling 3") poll_result = client.poll_list_length_gte(list_name, actual_length, 100, 5) - if (poll_result == False): + if poll_result == False: raise RuntimeError( f"Polling for list length greater than or equal to {actual_length} " - f"returned False for known length of {actual_length}.") + f"returned False for known length of {actual_length}." + ) log_data(context, LLDebug, "Polling 4") poll_result = client.poll_list_length_gte(list_name, actual_length + 1, 100, 5) - if (poll_result == True): + if poll_result == True: raise RuntimeError( f"Polling for list length greater than or equal to {actual_length + 1} " - f"returned True for known length of {actual_length}.") + f"returned True for known length of {actual_length}." + ) log_data(context, LLDebug, "Polling 5") # Confirm that poll for less than or equal list length works correctly poll_result = client.poll_list_length_lte(list_name, actual_length - 1, 100, 5) - if (poll_result == True): + if poll_result == True: raise RuntimeError( f"Polling for list length less than or equal to {actual_length - 1} " - f"returned True for known length of {actual_length}.") + f"returned True for known length of {actual_length}." + ) log_data(context, LLDebug, "Polling 6") poll_result = client.poll_list_length_lte(list_name, actual_length, 100, 5) - if (poll_result == False): + if poll_result == False: raise RuntimeError( f"Polling for list length less than or equal to {actual_length} " - f"returned False for known length of {actual_length}.") + f"returned False for known length of {actual_length}." + ) log_data(context, LLDebug, "Polling 7") poll_result = client.poll_list_length_lte(list_name, actual_length + 1, 100, 5) - if (poll_result == False): + if poll_result == False: raise RuntimeError( f"Polling for list length less than or equal to {actual_length + 1} " - f"returned False for known length of {actual_length}.") + f"returned False for known length of {actual_length}." + ) log_data(context, LLDebug, "Polling 8") # Check the list length list_length = client.get_list_length(list_name) - if (list_length != actual_length): + if list_length != actual_length: raise RuntimeError( f"The list length of {list_length} does not match expected " - f"value of {actual_length}.") + f"value of {actual_length}." + ) log_data(context, LLDebug, "List length check") # Retrieve datasets via the aggregation list @@ -125,11 +135,13 @@ def test_aggregation(use_cluster, context): if len(datasets) != list_length: raise RuntimeError( f"The number of datasets received {len(datasets)} " - f"does not match expected value of {list_length}.") + f"does not match expected value of {list_length}." + ) for ds in datasets: check_dataset(ds) log_data(context, LLDebug, "DataSet retrieval") + # ------------ helper functions --------------------------------- @@ -144,6 +156,7 @@ def create_dataset(name): dataset.add_meta_scalar("test_scalar", scalar) return dataset + def check_dataset(ds): comp_array = np.array([1, 2, 3, 4]) tensor_name = "test_array" diff --git a/tests/python/test_dataset_methods.py b/tests/python/test_dataset_methods.py index 457dcb0c9..d60e19134 100644 --- a/tests/python/test_dataset_methods.py +++ b/tests/python/test_dataset_methods.py @@ -27,13 +27,13 @@ import numpy as np from smartredis import Dataset + def test_serialize_dataset(): - """Test serializing a dataset - """ + """Test serializing a dataset""" dataset = Dataset("test-dataset") - data = np.uint8([2,4,8]) + data = np.uint8([2, 4, 8]) dataset.add_tensor("u8_tensor", data) - data = np.double([2.0,4.1,8.3, 5.6]) + data = np.double([2.0, 4.1, 8.3, 5.6]) dataset.add_tensor("double_tensor", data) dataset.add_meta_scalar("float2_scalar", float(3.1415926535)) dataset.add_meta_scalar("float_scalar", np.double(3.1415926535)) @@ -95,6 +95,7 @@ def test_add_get_strings(mock_data): data = mock_data.create_metadata_strings(10) add_get_strings(dataset, data) + def test_dataset_inspection(context): d = Dataset(context) data = np.uint8([[2, 4, 6, 8], [1, 3, 5, 7]]) @@ -127,6 +128,7 @@ def test_dataset_inspection(context): assert str == d.get_metadata_field_type("metastring") assert np.uint32 == d.get_metadata_field_type("u32_scalar") + # ------- Helper Functions ----------------------------------------------- diff --git a/tests/python/test_dataset_ops.py b/tests/python/test_dataset_ops.py index a516465b5..2ab7d684f 100644 --- a/tests/python/test_dataset_ops.py +++ b/tests/python/test_dataset_ops.py @@ -25,6 +25,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os + import numpy as np import pytest from smartredis import Client, Dataset @@ -124,14 +125,12 @@ def test_delete_dataset(use_cluster, context): def test_rename_nonexisting_dataset(use_cluster, context): - client = Client(None, use_cluster, logger_name=context) with pytest.raises(RedisReplyError): client.rename_dataset("not-a-tensor", "still-not-a-tensor") def test_copy_nonexistant_dataset(use_cluster, context): - client = Client(None, use_cluster, logger_name=context) with pytest.raises(RedisReplyError): client.copy_dataset("not-a-tensor", "still-not-a-tensor") diff --git a/tests/python/test_errors.py b/tests/python/test_errors.py index 5bb3b3f49..49ef686c3 100644 --- a/tests/python/test_errors.py +++ b/tests/python/test_errors.py @@ -129,9 +129,11 @@ def test_bad_device(use_cluster, context): with pytest.raises(TypeError): c.set_script("key", "some_script", device="not-a-gpu") + ##### # Test type errors from bad parameter types to Client API calls + def test_bad_type_put_tensor(use_cluster, context): c = Client(None, use_cluster, logger_name=context) array = np.array([1, 2, 3, 4]) @@ -221,6 +223,7 @@ def test_bad_type_set_function(use_cluster, context): with pytest.raises(TypeError): c.set_function("key", bad_function, 42) + def test_bad_type_set_function_multigpu(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): @@ -232,9 +235,10 @@ def test_bad_type_set_function_multigpu(use_cluster, context): with pytest.raises(TypeError): c.set_function_multigpu("key", bad_function, 0, "not an integer") with pytest.raises(ValueError): - c.set_function_multigpu("key", bad_function, -1, 1) # invalid first GPU + c.set_function_multigpu("key", bad_function, -1, 1) # invalid first GPU with pytest.raises(ValueError): - c.set_function_multigpu("key", bad_function, 0, 0) # invalid num GPUs + c.set_function_multigpu("key", bad_function, 0, 0) # invalid num GPUs + def test_bad_type_set_script(use_cluster, context): c = Client(None, use_cluster, logger_name=context) @@ -248,6 +252,7 @@ def test_bad_type_set_script(use_cluster, context): with pytest.raises(TypeError): c.set_script(key, script, 42) + def test_bad_type_set_script_multigpu(use_cluster, context): c = Client(None, use_cluster, logger_name=context) key = "key_for_script" @@ -267,6 +272,7 @@ def test_bad_type_set_script_multigpu(use_cluster, context): with pytest.raises(ValueError): c.set_script_multigpu(key, script, first_gpu, 0) + def test_bad_type_set_script_from_file(use_cluster, context): c = Client(None, use_cluster, logger_name=context) key = "key_for_script" @@ -279,6 +285,7 @@ def test_bad_type_set_script_from_file(use_cluster, context): with pytest.raises(TypeError): c.set_script_from_file(key, scriptfile, 42) + def test_bad_type_set_script_from_file_multigpu(use_cluster, context): c = Client(None, use_cluster, logger_name=context) key = "key_for_script" @@ -294,6 +301,7 @@ def test_bad_type_set_script_from_file_multigpu(use_cluster, context): with pytest.raises(TypeError): c.set_script_from_file_multigpu(key, scriptfile, first_gpu, "not an integer") + def test_bad_type_get_script(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): @@ -334,11 +342,17 @@ def test_bad_type_run_script_multigpu(use_cluster, context): with pytest.raises(TypeError): c.run_script_multigpu(key, fn_name, inputs, 42, offset, first_gpu, num_gpus) with pytest.raises(TypeError): - c.run_script_multigpu(key, fn_name, inputs, outputs, "not an integer", first_gpu, num_gpus) + c.run_script_multigpu( + key, fn_name, inputs, outputs, "not an integer", first_gpu, num_gpus + ) with pytest.raises(TypeError): - c.run_script_multigpu(key, fn_name, inputs, outputs, offset, "not an integer", num_gpus) + c.run_script_multigpu( + key, fn_name, inputs, outputs, offset, "not an integer", num_gpus + ) with pytest.raises(TypeError): - c.run_script_multigpu(key, fn_name, inputs, outputs, offset, first_gpu, "not an integer") + c.run_script_multigpu( + key, fn_name, inputs, outputs, offset, first_gpu, "not an integer" + ) with pytest.raises(ValueError): c.run_script_multigpu(key, fn_name, inputs, outputs, offset, -1, num_gpus) with pytest.raises(ValueError): @@ -367,10 +381,13 @@ def test_bad_type_set_model(mock_model, use_cluster, context): with pytest.raises(TypeError): c.set_model("simple_cnn", model, "TORCH", "CPU", batch_size="not_an_integer") with pytest.raises(TypeError): - c.set_model("simple_cnn", model, "TORCH", "CPU", min_batch_size="not_an_integer") + c.set_model( + "simple_cnn", model, "TORCH", "CPU", min_batch_size="not_an_integer" + ) with pytest.raises(TypeError): c.set_model("simple_cnn", model, "TORCH", "CPU", tag=42) + def test_bad_type_set_model_multigpu(mock_model, use_cluster, context): c = Client(None, use_cluster, logger_name=context) model = mock_model.create_torch_cnn() @@ -390,9 +407,13 @@ def test_bad_type_set_model_multigpu(mock_model, use_cluster, context): with pytest.raises(ValueError): c.set_model_multigpu("simple_cnn", model, "TORCH", 0, 0) with pytest.raises(TypeError): - c.set_model_multigpu("simple_cnn", model, "TORCH", 0, 1, batch_size="not_an_integer") + c.set_model_multigpu( + "simple_cnn", model, "TORCH", 0, 1, batch_size="not_an_integer" + ) with pytest.raises(TypeError): - c.set_model_multigpu("simple_cnn", model, "TORCH", 0, 1, min_batch_size="not_an_integer") + c.set_model_multigpu( + "simple_cnn", model, "TORCH", 0, 1, min_batch_size="not_an_integer" + ) with pytest.raises(TypeError): c.set_model_multigpu("simple_cnn", model, "TORCH", 0, 1, tag=42) @@ -413,12 +434,17 @@ def test_bad_type_set_model_from_file(use_cluster, context): with pytest.raises(TypeError): c.set_model_from_file("simple_cnn", modelfile, "TORCH", "BAD_DEVICE") with pytest.raises(TypeError): - c.set_model_from_file("simple_cnn", modelfile, "TORCH", "CPU", batch_size="not_an_integer") + c.set_model_from_file( + "simple_cnn", modelfile, "TORCH", "CPU", batch_size="not_an_integer" + ) with pytest.raises(TypeError): - c.set_model_from_file("simple_cnn", modelfile, "TORCH", "CPU", min_batch_size="not_an_integer") + c.set_model_from_file( + "simple_cnn", modelfile, "TORCH", "CPU", min_batch_size="not_an_integer" + ) with pytest.raises(TypeError): c.set_model_from_file("simple_cnn", modelfile, "TORCH", "CPU", tag=42) + def test_bad_type_set_model_from_file_multigpu(use_cluster, context): modelfile = "bad filename but right parameter type" c = Client(None, use_cluster, logger_name=context) @@ -429,18 +455,29 @@ def test_bad_type_set_model_from_file_multigpu(use_cluster, context): with pytest.raises(TypeError): c.set_model_from_file_multigpu("simple_cnn", modelfile, 42, 0, 1) with pytest.raises(TypeError): - c.set_model_from_file_multigpu("simple_cnn", modelfile, "UNSUPPORTED_ENGINE", 0, 1) + c.set_model_from_file_multigpu( + "simple_cnn", modelfile, "UNSUPPORTED_ENGINE", 0, 1 + ) with pytest.raises(TypeError): - c.set_model_from_file_multigpu("simple_cnn", modelfile, "TORCH", "not an integer", 1) + c.set_model_from_file_multigpu( + "simple_cnn", modelfile, "TORCH", "not an integer", 1 + ) with pytest.raises(TypeError): - c.set_model_from_file_multigpu("simple_cnn", modelfile, "TORCH", 0, "not an integer") + c.set_model_from_file_multigpu( + "simple_cnn", modelfile, "TORCH", 0, "not an integer" + ) with pytest.raises(TypeError): - c.set_model_from_file_multigpu("simple_cnn", modelfile, "TORCH", 0, 1, batch_size="not_an_integer") + c.set_model_from_file_multigpu( + "simple_cnn", modelfile, "TORCH", 0, 1, batch_size="not_an_integer" + ) with pytest.raises(TypeError): - c.set_model_from_file_multigpu("simple_cnn", modelfile, "TORCH", 0, 1, min_batch_size="not_an_integer") + c.set_model_from_file_multigpu( + "simple_cnn", modelfile, "TORCH", 0, 1, min_batch_size="not_an_integer" + ) with pytest.raises(TypeError): c.set_model_from_file_multigpu("simple_cnn", modelfile, "TORCH", 0, 1, tag=42) + def test_bad_type_run_model(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): @@ -462,12 +499,13 @@ def test_bad_type_run_model_multigpu(use_cluster, context): with pytest.raises(ValueError): c.run_model_multigpu("simple_cnn", 0, 0, 0) + def test_bad_type_delete_model_multigpu(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): c.delete_model_multigpu(42, 0, 1) with pytest.raises(TypeError): - c.delete_model_multigpu("simple_cnn", "not an integer", 1) + c.delete_model_multigpu("simple_cnn", "not an integer", 1) with pytest.raises(TypeError): c.delete_model_multigpu("simple_cnn", 0, "not an integer") with pytest.raises(ValueError): @@ -475,13 +513,14 @@ def test_bad_type_delete_model_multigpu(use_cluster, context): with pytest.raises(ValueError): c.delete_model_multigpu("simple_cnn", 0, 0) + def test_bad_type_delete_script_multigpu(use_cluster, context): c = Client(None, use_cluster, logger_name=context) script_name = "my_script" with pytest.raises(TypeError): c.delete_script_multigpu(42, 0, 1) with pytest.raises(TypeError): - c.delete_script_multigpu(script_name, "not an integer", 1) + c.delete_script_multigpu(script_name, "not an integer", 1) with pytest.raises(TypeError): c.delete_script_multigpu(script_name, 0, "not an integer") with pytest.raises(ValueError): @@ -489,6 +528,7 @@ def test_bad_type_delete_script_multigpu(use_cluster, context): with pytest.raises(ValueError): c.delete_script_multigpu(script_name, 0, 0) + def test_bad_type_tensor_exists(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): @@ -655,16 +695,19 @@ def test_bad_type_save(use_cluster, context): with pytest.raises(TypeError): c.save("not a list") + def test_bad_type_append_to_list(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): c.append_to_list(42, 42) + def test_bad_type_delete_list(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): c.delete_list(42) + def test_bad_type_copy_list(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): @@ -672,6 +715,7 @@ def test_bad_type_copy_list(use_cluster, context): with pytest.raises(TypeError): c.copy_list("src", 42) + def test_bad_type_rename_list(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): @@ -679,11 +723,13 @@ def test_bad_type_rename_list(use_cluster, context): with pytest.raises(TypeError): c.rename_list("src", 42) + def test_bad_type_get_list_length(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): c.get_list_length(42) + def test_bad_type_poll_list_length(use_cluster, context): c = Client(None, use_cluster, logger_name=context) name = "mylist" @@ -699,6 +745,7 @@ def test_bad_type_poll_list_length(use_cluster, context): with pytest.raises(TypeError): c.poll_list_length(name, len, pollfreq, "not an integer") + def test_bad_type_poll_list_length_gte(use_cluster, context): c = Client(None, use_cluster, logger_name=context) name = "mylist" @@ -714,6 +761,7 @@ def test_bad_type_poll_list_length_gte(use_cluster, context): with pytest.raises(TypeError): c.poll_list_length_gte(name, len, pollfreq, "not an integer") + def test_bad_type_poll_list_length_lte(use_cluster, context): c = Client(None, use_cluster, logger_name=context) name = "mylist" @@ -729,11 +777,13 @@ def test_bad_type_poll_list_length_lte(use_cluster, context): with pytest.raises(TypeError): c.poll_list_length_lte(name, len, pollfreq, "not an integer") + def test_bad_type_get_datasets_from_list(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): c.get_datasets_from_list(42) + def test_bad_type_get_dataset_list_range(use_cluster, context): c = Client(None, use_cluster, logger_name=context) listname = "my_list" @@ -746,9 +796,8 @@ def test_bad_type_get_dataset_list_range(use_cluster, context): with pytest.raises(TypeError): c.get_dataset_list_range(listname, start_index, "not an integer") -@pytest.mark.parametrize("log_fn", [ - (log_data,), (log_warning,), (log_error,) -]) + +@pytest.mark.parametrize("log_fn", [(log_data,), (log_warning,), (log_error,)]) def test_bad_type_log_function(use_cluster, context, log_fn): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): @@ -758,6 +807,7 @@ def test_bad_type_log_function(use_cluster, context, log_fn): with pytest.raises(TypeError): log_fn("test_bad_type_log_function", LLInfo, 42) + def test_bad_type_client_log(use_cluster, context): c = Client(None, use_cluster, logger_name=context) with pytest.raises(TypeError): @@ -773,6 +823,7 @@ def test_bad_type_client_log(use_cluster, context): with pytest.raises(TypeError): c.log_error(LLInfo, 42) + def test_bad_type_dataset_log(context): d = Dataset(context) with pytest.raises(TypeError): @@ -788,6 +839,7 @@ def test_bad_type_dataset_log(context): with pytest.raises(TypeError): d.log_error(LLInfo, 42) + def test_bad_type_logcontext_log(context): lc = LogContext(context) with pytest.raises(TypeError): @@ -803,13 +855,16 @@ def test_bad_type_logcontext_log(context): with pytest.raises(TypeError): lc.log_error(LLInfo, 42) + ##### # Test type errors from bad parameter types to Dataset API calls + def test_bad_type_dataset(): with pytest.raises(TypeError): d = Dataset(42) + def test_bad_type_add_tensor(): d = Dataset("test-dataset") with pytest.raises(TypeError): @@ -836,8 +891,7 @@ def test_set_data_wrong_type(): def test_add_tensor_wrong_type(): - """A call to Dataset.add_tensor is made with the wrong type - """ + """A call to Dataset.add_tensor is made with the wrong type""" d = Dataset("test_dataset") data = np.array([1, 2, 3, 4]) with pytest.raises(TypeError): @@ -845,27 +899,26 @@ def test_add_tensor_wrong_type(): with pytest.raises(TypeError): d.add_tensor("tensorname", 42) + def test_get_tensor_wrong_type(): - """A call to Dataset.get_tensor is made with the wrong type - """ + """A call to Dataset.get_tensor is made with the wrong type""" d = Dataset("test_dataset") with pytest.raises(TypeError): d.get_tensor(42) def test_add_meta_scalar_wrong_type(): - """A call to Dataset.add_meta_scalar is made with the wrong type - """ + """A call to Dataset.add_meta_scalar is made with the wrong type""" d = Dataset("test_dataset") data = np.array([1, 2, 3, 4]) with pytest.raises(TypeError): d.add_meta_scalar(42, 42) with pytest.raises(TypeError): - d.add_meta_scalar("scalarname", data) # array, not scalar + d.add_meta_scalar("scalarname", data) # array, not scalar + def test_add_meta_string_wrong_type(): - """A call to Dataset.add_meta_string is made with the wrong type - """ + """A call to Dataset.add_meta_string is made with the wrong type""" d = Dataset("test_dataset") with pytest.raises(TypeError): d.add_meta_string(42, "metastring") @@ -874,65 +927,69 @@ def test_add_meta_string_wrong_type(): def test_get_meta_scalars_wrong_type(): - """A call to Dataset.get_meta_scalars is made with the wrong type - """ + """A call to Dataset.get_meta_scalars is made with the wrong type""" d = Dataset("test_dataset") with pytest.raises(TypeError): d.get_meta_scalars(42) def test_get_meta_strings_wrong_type(): - """A call to Dataset.get_meta_strings is made with the wrong type - """ + """A call to Dataset.get_meta_strings is made with the wrong type""" d = Dataset("test_dataset") with pytest.raises(TypeError): d.get_meta_strings(42) + def test_get_tensor_type_wrong_type(): - """A call to Dataset.get_tensor_type is made with the wrong type - """ + """A call to Dataset.get_tensor_type is made with the wrong type""" d = Dataset("test_dataset") with pytest.raises(TypeError): d.get_tensor_type(42) + def test_get_metadata_field_type_wrong_type(): - """A call to Dataset.get_metadata_field_type is made with the wrong type - """ + """A call to Dataset.get_metadata_field_type is made with the wrong type""" d = Dataset("test_dataset") with pytest.raises(TypeError): d.get_metadata_field_type(42) + def test_get_tensor_names_wrong_type(): - """A call to Dataset.get_tensor_names is made with the wrong type - """ + """A call to Dataset.get_tensor_names is made with the wrong type""" d = Dataset("test_dataset") with pytest.raises(TypeError): d.get_tensor_names(42) + ##### # Test type errors from bad parameter types to ConfigOptions API calls + def test_create_from_environment_wrong_type(): with pytest.raises(TypeError): _ = ConfigOptions.create_from_environment(42) + def test_get_integer_option_wrong_type(): co = ConfigOptions() key = "intval" with pytest.raises(TypeError): _ = co.get_integer_option(42) + def test_get_string_option_wrong_type(): co = ConfigOptions() key = "stringval" with pytest.raises(TypeError): _ = co.get_string_option(42) + def test_is_configured_wrong_type(): co = ConfigOptions() with pytest.raises(TypeError): _ = co.is_configured(42) + def test_override_integer_option_wrong_type(): co = ConfigOptions() key = "intval" @@ -942,6 +999,7 @@ def test_override_integer_option_wrong_type(): with pytest.raises(TypeError): _ = co.override_integer_option(key, "not an integer") + def test_override_string_option_wrong_type(): co = ConfigOptions() key = "stringval" @@ -951,9 +1009,11 @@ def test_override_string_option_wrong_type(): with pytest.raises(TypeError): _ = co.override_string_option(key, 42) + #### # Utility functions + def bad_function(data): """Bad function which only raises an exception""" return False diff --git a/tests/python/test_logging.py b/tests/python/test_logging.py index 100016cc2..1caddcf1a 100644 --- a/tests/python/test_logging.py +++ b/tests/python/test_logging.py @@ -24,39 +24,36 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import pytest from smartredis import * from smartredis.error import * -import pytest -@pytest.mark.parametrize("log_level", [ - LLQuiet, LLInfo, LLDebug, LLDeveloper -]) +# fmt: off + +@pytest.mark.parametrize("log_level", [LLQuiet, LLInfo, LLDebug, LLDeveloper]) def test_logging_string(use_cluster, context, log_level): log_data(context, log_level, f"This is data logged from a string ({log_level.name})") log_warning(context, log_level, f"This is a warning logged from a string ({log_level.name})") log_error(context, log_level, f"This is an error logged from a string ({log_level.name})") -@pytest.mark.parametrize("log_level", [ - LLQuiet, LLInfo, LLDebug, LLDeveloper -]) + +@pytest.mark.parametrize("log_level", [LLQuiet, LLInfo, LLDebug, LLDeveloper]) def test_logging_client(use_cluster, context, log_level): c = Client(None, use_cluster, logger_name=context) c.log_data(log_level, f"This is data logged from a client ({log_level.name})") c.log_warning(log_level, f"This is a warning logged from a client ({log_level.name})") c.log_error(log_level, f"This is an error logged from a client ({log_level.name})") -@pytest.mark.parametrize("log_level", [ - LLQuiet, LLInfo, LLDebug, LLDeveloper -]) + +@pytest.mark.parametrize("log_level", [LLQuiet, LLInfo, LLDebug, LLDeveloper]) def test_logging_dataset(context, log_level): d = Dataset(context) d.log_data(log_level, f"This is data logged from a dataset ({log_level.name})") d.log_warning(log_level, f"This is a warning logged from a dataset ({log_level.name})") d.log_error(log_level, f"This is an error logged from a dataset ({log_level.name})") -@pytest.mark.parametrize("log_level", [ - LLQuiet, LLInfo, LLDebug, LLDeveloper -]) + +@pytest.mark.parametrize("log_level", [LLQuiet, LLInfo, LLDebug, LLDeveloper]) def test_logging_logcontext(context, log_level): lc = LogContext(context) lc.log_data(log_level, f"This is data logged from a logcontext ({log_level.name})") diff --git a/tests/python/test_nonkeyed_cmd.py b/tests/python/test_nonkeyed_cmd.py index 79a280df0..fc05d2e19 100644 --- a/tests/python/test_nonkeyed_cmd.py +++ b/tests/python/test_nonkeyed_cmd.py @@ -87,6 +87,7 @@ def test_dbcluster_info_command(mock_model, use_cluster, context): with pytest.raises(RedisRuntimeError): client.get_ai_info(address, "bad_key") + def test_flushdb_command(use_cluster, context): # from within the testing framework, there is no way # of knowing each db node that is being used, so skip @@ -165,8 +166,12 @@ def test_save_command(use_cluster, mock_data, context): # for each address, check that the timestamp of the last SAVE increases after calling Client::save for address in addresses: - save_time_before = client.get_db_node_info([address])[0]["Persistence"]["rdb_last_save_time"] + save_time_before = client.get_db_node_info([address])[0]["Persistence"][ + "rdb_last_save_time" + ] client.save([address]) - save_time_after = client.get_db_node_info([address])[0]["Persistence"]["rdb_last_save_time"] + save_time_after = client.get_db_node_info([address])[0]["Persistence"][ + "rdb_last_save_time" + ] assert save_time_before <= save_time_after diff --git a/tests/python/test_prefixing.py b/tests/python/test_prefixing.py index 68388e912..a8bb1deba 100644 --- a/tests/python/test_prefixing.py +++ b/tests/python/test_prefixing.py @@ -25,10 +25,11 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os -import numpy as np +import numpy as np from smartredis import Client, Dataset + def test_prefixing(use_cluster, context, monkeypatch): # configure prefix variables monkeypatch.setenv("SSKEYOUT", "prefix_test") diff --git a/tests/python/test_put_get_tensor.py b/tests/python/test_put_get_tensor.py index f067e17f0..9f4c6b781 100644 --- a/tests/python/test_put_get_tensor.py +++ b/tests/python/test_put_get_tensor.py @@ -25,6 +25,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os + import numpy as np from smartredis import Client diff --git a/tests/python/test_tensor_ops.py b/tests/python/test_tensor_ops.py index f25dde36b..62402be41 100644 --- a/tests/python/test_tensor_ops.py +++ b/tests/python/test_tensor_ops.py @@ -25,6 +25,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os + import numpy as np import pytest from smartredis import Client @@ -77,14 +78,12 @@ def test_delete_tensor(use_cluster, context): def test_rename_nonexisting_key(use_cluster, context): - client = Client(None, use_cluster, logger_name=context) with pytest.raises(RedisReplyError): client.rename_tensor("not-a-tensor", "still-not-a-tensor") def test_copy_nonexistant_key(use_cluster, context): - client = Client(None, use_cluster, logger_name=context) with pytest.raises(RedisReplyError): client.copy_tensor("not-a-tensor", "still-not-a-tensor")