Skip to content

Commit

Permalink
chore: cleanup stale files, unused code (msk-mind#385)
Browse files Browse the repository at this point in the history
Co-authored-by: kohlia <[email protected]>
  • Loading branch information
armaank and kohlia authored Dec 1, 2023
1 parent 6e65f5b commit ea79b81
Show file tree
Hide file tree
Showing 9 changed files with 1 addition and 720 deletions.
70 changes: 0 additions & 70 deletions src/luna/.cli_template.py

This file was deleted.

82 changes: 0 additions & 82 deletions src/luna/common/constants.py

This file was deleted.

134 changes: 0 additions & 134 deletions src/luna/common/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,31 +159,6 @@ def wrapper(*args, **kwargs):
return wrapper


def to_sql_field(s):
filter1 = s.replace(".", "_").replace(" ", "_")
filter2 = "".join(e for e in filter1 if e.isalnum() or e == "_")
return filter2


def to_sql_value(s):
if isinstance(s, str):
return f"'{s}'"
if not s == s:
return "Null"
if s is None:
return "Null"
else:
return f"{s}"


def clean_nested_colname(s):
"""
Removes map name for MapType columns.
e.g. metadata.SeriesInstanceUID -> SeriesInstanceUID
"""
return s[s.find(".") + 1 :]


def generate_uuid(urlpath: str, prefix, storage_options={}):
"""
Returns hash of the file given path, preceded by the prefix.
Expand All @@ -199,39 +174,6 @@ def generate_uuid(urlpath: str, prefix, storage_options={}):
return "-".join(prefix)


def rebase_schema_numeric(df):
"""
Tries to convert all columns in a dataframe to numeric types, if possible, with integer types taking precident
Note: this is an in-place operation
Args:
df (pd.DataFrame): dataframe to convert columns
"""
for col in df.columns:
if df[col].dtype != object:
continue

df[col] = df[col].astype(float, errors="ignore")


def rebase_schema_mixed(df):
"""
Tries to convert all columns with mixed types to strings.
Note: this is an in-place operation
Args:
df (pd.DataFrame): dataframe to convert columns
"""
for col in df.columns:
mixed = (df[[col]].applymap(type) != df[[col]].iloc[0].apply(type)).any(axis=1)
if len(df[mixed]) > 0:
df[col] = df[col].astype(str)
if df[col].dtype == list:
df[col] = df[col].astype(str)


def generate_uuid_binary(content, prefix):
"""
Returns hash of the binary, preceded by the prefix.
Expand Down Expand Up @@ -267,68 +209,6 @@ def generate_uuid_dict(json_str, prefix):
return "-".join(prefix)


def does_not_contain(token, value):
"""
Validate that `token` is not a substring of `value`
:param: token: string e.g. : | .
:param: value: dictionary, list, or str
"""
if isinstance(value, str):
if token in value:
raise ValueError(f"{value} cannot contain {token}")

if isinstance(value, list):
if any([token in v for v in value]):
raise ValueError(str(value) + f" cannot contain {token}")

if isinstance(value, dict):
if any(
[
isinstance(key, str)
and token in key
or isinstance(val, str)
and token in val
for key, val in value.items()
]
):
raise ValueError(str(value) + f" cannot contain {token}")

return True


def replace_token(token, token_replacement, value):
"""
Replace `token` with `token_replacement` in `value`
:param: token: string e.g. : | .
:param: token_replacement: string e.g. _ -
:param: value: dictionary, list, or str
"""
if isinstance(value, str):
return value.replace(token, token_replacement)

if isinstance(value, list):
new_value = []
for v in value:
new_value.append(v.replace(token, token_replacement))
return new_value

if isinstance(value, dict):
new_value = {}
for key, val in value.items():
new_key, new_val = key, val
if isinstance(key, str):
new_key = key.replace(token, token_replacement)
if isinstance(val, str):
new_val = val.replace(token, token_replacement)
new_value[new_key] = new_val

return new_value

return value


def grouper(iterable, n):
"""Turn an iterable into an iterable of iterables
Expand All @@ -349,20 +229,6 @@ def grouper(iterable, n):
]


def get_method_data(cohort_id, method_id):
"""
Return method dict
:param: cohort_id: string
:param: method_id: string
"""

method_dir = os.path.join(os.environ["MIND_GPFS_DIR"], "data", cohort_id, "methods")
with open(os.path.join(method_dir, f"{method_id}.json")) as json_file:
method_config = json.load(json_file)["params"]
return method_config


def get_absolute_path(module_path, relative_path):
"""Given the path to a module file and the path, relative to the module file, of another file
that needs to be referenced in the module, this method returns the absolute path of the file
Expand Down
Empty file.
Loading

0 comments on commit ea79b81

Please sign in to comment.