diff --git a/README.md b/README.md index bedeaae..e7757c4 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,16 @@ BibTeX entry: pages = {107398}, } +# News +* 2025/08 PyMIC has contained the implementation of [`DMSPS`][dmsps_paper], a state-of-the-art weakly supervised segmentation method by learning from scribble annotations. +* 2025/05 Several self-supervised learning methods have been provided in PyMIC, including [`VolF`][volf_paper], [`VoCo`][voco_paper] and [`Vox2Vec`][vox2vec_paper]. +* 2025/01 Novel architectures are available now, such as `UMamba`, `VMUNet`, `SwinUNet`, `TransUNet` and `UNETR++`. + +[dmsps_paper]: https://www.sciencedirect.com/science/article/pii/S1361841524001993 +[volf_paper]: https://arxiv.org/abs/2306.16925 +[voco_paper]: https://arxiv.org/abs/2402.17300 +[vox2vec_paper]:https://conferences.miccai.org/2023/papers/712-Paper3421.html + # Features PyMIC provides flixible modules for medical image computing tasks including classification and segmentation. It currently provides the following functions: * Support for annotation-efficient image segmentation, especially for semi-supervised, self-supervised, self-supervised, weakly-supervised and noisy-label learning. @@ -33,9 +43,10 @@ PyMIC provides flixible modules for medical image computing tasks including clas # Usage ## Requirement -* [Pytorch][torch_link] version >=1.0.1 +* [Pytorch][torch_link] version >=1.13.1 * [TensorboardX][tbx_link] to visualize training performance * Some common python packages such as Numpy, Pandas, SimpleITK +* causal-conv1d>=1.5.0 and mamba-ssm>=2.2.4 are required if you want to use Mamba in PyMIC. * See `requirements.txt` for details. [torch_link]:https://pytorch.org/ @@ -47,10 +58,10 @@ Run the following command to install the latest released version of PyMIC: ```bash pip install PYMIC ``` -To install a specific version of PYMIC such as 0.5.0, run: +To install a specific version of PYMIC such as 0.5.4, run: ```bash -pip install PYMIC==0.5.0 +pip install PYMIC==0.5.4 ``` Alternatively, you can download the source code for the latest version. Run the following command to compile and install: @@ -76,8 +87,11 @@ Using PyMIC, it becomes easy to develop deep learning models for different proje 4, [UGIR][ugir] (MICCAI 2020) Uncertainty-guided interactive refinement for medical image segmentation. +5, [DMSPS][dmsps] (MedIA 2024) Weakly supervised segmentation by learning from scribbles. + [myops]: https://github.com/HiLab-git/MyoPS2020 [coplenet]:https://github.com/HiLab-git/COPLE-Net [hn_gtv]: https://github.com/HiLab-git/Head-Neck-GTV [ugir]: https://github.com/HiLab-git/UGIR +[dmsps]: https://github.com/HiLab-git/DMSPS diff --git a/pymic/net_run/noisy_label/nll_clslsr.py b/pymic/net_run/noisy_label/nll_clslsr.py index c977eba..3f1059e 100644 --- a/pymic/net_run/noisy_label/nll_clslsr.py +++ b/pymic/net_run/noisy_label/nll_clslsr.py @@ -165,15 +165,20 @@ def get_confidence_map(cfg_file): transform_list.append(one_transform) data_transform = transforms.Compose(transform_list) + stage_dir = config['dataset']['train_dir'] csv_file = config['dataset']['train_csv'] modal_num = config['dataset'].get('modal_num', 1) - stage_dir = config['dataset']['train_dir'] + stage_dim = config['dataset'].get('train_dim', 3) + lab_key = config['dataset'].get('train_label_key', 'label') + dataset = NiftyDataset(root_dir = stage_dir, - csv_file = csv_file, - modal_num = modal_num, - with_label= True, - transform = data_transform, - task = agent.task_type) + csv_file = csv_file, + modal_num = modal_num, + image_dim = stage_dim, + allow_missing_modal = False, + label_key = lab_key, + transform = data_transform, + task = agent.task_type) agent.set_datasets(None, None, dataset) agent.transform_list = transform_list diff --git a/pymic/net_run/noisy_label/nll_dast.py b/pymic/net_run/noisy_label/nll_dast.py index 938e10a..95203ba 100644 --- a/pymic/net_run/noisy_label/nll_dast.py +++ b/pymic/net_run/noisy_label/nll_dast.py @@ -129,13 +129,17 @@ def get_noisy_dataset_from_config(self): data_transform = transforms.Compose(transform_list) modal_num = self.config['dataset'].get('modal_num', 1) - csv_file = self.config['dataset'].get('train_csv_noise', None) + stage_dim = self.config['dataset'].get('train_dim', 3) + lab_key = self.config['dataset'].get('train_label_key', 'label') + csv_file = self.config['dataset'].get('train_csv_noise', None) dataset = NiftyDataset(root_dir = self.config['dataset']['train_dir'], - csv_file = csv_file, - modal_num = modal_num, - with_label= True, - transform = data_transform , - task = self.task_type) + csv_file = csv_file, + modal_num = modal_num, + image_dim = stage_dim, + allow_missing_modal = False, + label_key = lab_key, + transform = data_transform, + task = self.task_type) return dataset diff --git a/pymic/net_run/predict.py b/pymic/net_run/predict.py index e618be6..d63cbad 100644 --- a/pymic/net_run/predict.py +++ b/pymic/net_run/predict.py @@ -21,23 +21,26 @@ def main(): exit() parser = argparse.ArgumentParser() parser.add_argument("cfg", help="configuration file for testing") - parser.add_argument("-test_csv", help="the csv file for testing images", - required=False, default=None) - parser.add_argument("-output_dir", help="the output dir for inference results", + parser.add_argument("--test_csv", help="the csv file for testing images", required=False, default=None) - parser.add_argument("-ckpt_dir", help="the dir for trained model", + parser.add_argument("--test_dir", help="the dir for testing images", required=False, default=None) - parser.add_argument("-ckpt_mode", help="the mode for chekpoint: 0-latest, 1-best, 2-customized", + parser.add_argument("--output_dir", help="the output dir for inference results", required=False, default=None) - parser.add_argument("-ckpt_name", help="the name chekpoint if ckpt_mode = 2", + parser.add_argument("--ckpt_dir", help="the dir for trained model", required=False, default=None) - parser.add_argument("-gpus", help="the gpus for runing, e.g., [0]", + parser.add_argument("--ckpt_mode", help="the mode for chekpoint: 0-latest, 1-best, 2-customized", + required=False, default=None) + parser.add_argument("--ckpt_name", help="the name chekpoint if ckpt_mode = 2", + required=False, default=None) + parser.add_argument("--gpus", help="the gpus for runing, e.g., [0]", required=False, default=None) args = parser.parse_args() if(not os.path.isfile(args.cfg)): raise ValueError("The config file does not exist: " + args.cfg) config = parse_config(args) config = synchronize_config(config) + print(config) log_dir = config['testing']['output_dir'] if(not os.path.exists(log_dir)): os.makedirs(log_dir, exist_ok=True) diff --git a/pymic/net_run/semi_sup/ssl_abstract.py b/pymic/net_run/semi_sup/ssl_abstract.py index 69a09fd..4925859 100644 --- a/pymic/net_run/semi_sup/ssl_abstract.py +++ b/pymic/net_run/semi_sup/ssl_abstract.py @@ -52,12 +52,16 @@ def get_unlabeled_dataset_from_config(self): self.transform_list.append(one_transform) data_transform = transforms.Compose(self.transform_list) - csv_file = self.config['dataset'].get('train_csv_unlab', None) + csv_file = self.config['dataset'].get('train_csv_unlab', None) + stage_dim = self.config['dataset'].get('train_dim', 3) dataset = NiftyDataset(root_dir = train_dir, csv_file = csv_file, modal_num = modal_num, - with_label= False, - transform = data_transform ) + image_dim = stage_dim, + allow_missing_modal = False, + label_key = None, + transform = data_transform, + task = self.task_type) return dataset def create_dataset(self): diff --git a/pymic/net_run/train.py b/pymic/net_run/train.py index ec0002f..d98145a 100644 --- a/pymic/net_run/train.py +++ b/pymic/net_run/train.py @@ -54,15 +54,15 @@ def main(): exit() parser = argparse.ArgumentParser() parser.add_argument("cfg", help="configuration file for training") - parser.add_argument("-train_csv", help="the csv file for training images", + parser.add_argument("--train_csv", help="the csv file for training images", required=False, default=None) - parser.add_argument("-valid_csv", help="the csv file for validation images", + parser.add_argument("--valid_csv", help="the csv file for validation images", required=False, default=None) - parser.add_argument("-ckpt_dir", help="the output dir for trained model", + parser.add_argument("--ckpt_dir", help="the output dir for trained model", required=False, default=None) - parser.add_argument("-iter_max", help="the maximal iteration number for training", + parser.add_argument("--iter_max", help="the maximal iteration number for training", required=False, default=None) - parser.add_argument("-gpus", help="the gpus for runing, e.g., [0]", + parser.add_argument("--gpus", help="the gpus for runing, e.g., [0]", required=False, default=None) args = parser.parse_args() if(not os.path.isfile(args.cfg)): diff --git a/pymic/transform/crop4voco.py b/pymic/transform/crop4voco.py new file mode 100644 index 0000000..6c52ca7 --- /dev/null +++ b/pymic/transform/crop4voco.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, division + +import torch +import json +import math +import random +import numpy as np +from scipy import ndimage +from pymic import TaskType +from pymic.transform.abstract_transform import AbstractTransform +from pymic.transform.crop import CenterCrop +from pymic.transform.intensity import * +from pymic.util.image_process import * + +def get_position_label(roi=96, num_crops=4): + half = roi // 2 + max_roi = roi * num_crops + center_x, center_y = np.random.randint(low=half, high=max_roi - half), \ + np.random.randint(low=half, high=max_roi - half) + + x_min, x_max = center_x - half, center_x + half + y_min, y_max = center_y - half, center_y + half + + total_area = roi * roi + labels = [] + for j in range(num_crops): + for i in range(num_crops): + crop_x_min, crop_x_max = i * roi, (i + 1) * roi + crop_y_min, crop_y_max = j * roi, (j + 1) * roi + + dx = min(crop_x_max, x_max) - max(crop_x_min, x_min) + dy = min(crop_y_max, y_max) - max(crop_y_min, y_min) + if dx <= 0 or dy <= 0: + area = 0 + else: + area = (dx * dy) / total_area + labels.append(area) + + labels = np.asarray(labels).reshape(1, num_crops * num_crops) + return x_min, y_min, labels + +class Crop4VoCo(CenterCrop): + """ + Randomly crop an volume into two views with augmentation. This is used for + self-supervised pretraining such as DeSD. + + The arguments should be written in the `params` dictionary, and it has the + following fields: + + :param `DualViewCrop_output_size`: (list/tuple) Desired output size [D, H, W]. + The output channel is the same as the input channel. + :param `DualViewCrop_scale_lower_bound`: (list/tuple) Lower bound of the range of scale + for each dimension. e.g. (1.0, 0.5, 0.5). + param `DualViewCrop_scale_upper_bound`: (list/tuple) Upper bound of the range of scale + for each dimension. e.g. (1.0, 2.0, 2.0). + :param `DualViewCrop_inverse`: (optional, bool) Is inverse transform needed for inference. + Default is `False`. Currently, the inverse transform is not supported, and + this transform is assumed to be used only during training stage. + """ + def __init__(self, params): + roi_size = params.get('Crop4VoCo_roi_size'.lower(), 64) + if isinstance(roi_size, int): + self.roi_size = [roi_size] * 3 + else: + self.roi_size = roi_size + self.roi_num = params.get('Crop4VoCo_roi_num'.lower(), 2) + self.base_num = params.get('Crop4VoCo_base_num'.lower(), 4) + + self.inverse = params.get('Crop4VoCo_inverse'.lower(), False) + self.task = params['Task'.lower()] + + def __call__(self, sample): + image = sample['image'] + channel, input_size = image.shape[0], image.shape[1:] + input_dim = len(input_size) + # print(input_size, self.roi_size) + assert(input_size[0] == self.roi_size[0]) + assert(input_size[1] == self.roi_size[1] * self.base_num) + assert(input_size[2] == self.roi_size[2] * self.base_num) + + base_num, roi_num, roi_size = self.base_num, self.roi_num, self.roi_size + base_crops, roi_crops, roi_labels = [], [], [] + crop_size = [channel] + list(roi_size) + for j in range(base_num): + for i in range(base_num): + crop_min = [0, 0, roi_size[1]*j, roi_size[2]*i] + crop_max = [crop_min[d] + crop_size[d] for d in range(4)] + crop_out = crop_ND_volume_with_bounding_box(image, crop_min, crop_max) + base_crops.append(crop_out) + + for i in range(roi_num): + x_min, y_min, label = get_position_label(self.roi_size[2], base_num) + # print('label', label) + crop_min = [0, 0, y_min, x_min] + crop_max = [crop_min[d] + crop_size[d] for d in range(4)] + crop_out = crop_ND_volume_with_bounding_box(image, crop_min, crop_max) + roi_crops.append(crop_out) + roi_labels.append(label) + roi_labels = np.concatenate(roi_labels, 0).reshape(roi_num, base_num * base_num) + + base_crops = np.stack(base_crops, 0) + roi_crops = np.stack(roi_crops, 0) + sample['image'] = base_crops, roi_crops, roi_labels + return sample + + \ No newline at end of file diff --git a/pymic/util/evaluation_cls.py b/pymic/util/evaluation_cls.py index a65953a..686a811 100644 --- a/pymic/util/evaluation_cls.py +++ b/pymic/util/evaluation_cls.py @@ -176,13 +176,13 @@ def main(): :param pred_prob_csv: (str) The csv file for prediction probability. """ parser = argparse.ArgumentParser() - parser.add_argument("-cfg", help="configuration file for evaluation", + parser.add_argument("--cfg", help="configuration file for evaluation", required=False, default=None) - parser.add_argument("-metric", help="evaluation metrics, e.g., accuracy, or [accuracy, auc]", + parser.add_argument("--metric", help="evaluation metrics, e.g., accuracy, or [accuracy, auc]", required=False, default=None) - parser.add_argument("-gt_csv", help="csv file for ground truth", + parser.add_argument("--gt_csv", help="csv file for ground truth", required=False, default=None) - parser.add_argument("-pred_prob_csv", help="csv file for probability prediction", + parser.add_argument("--pred_prob_csv", help="csv file for probability prediction", required=False, default=None) args = parser.parse_args() print(args) diff --git a/requirements.txt b/requirements.txt index cac47f3..e70fc83 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,14 @@ h5py matplotlib>=3.1.2 -numpy>=1.17.4 -pandas>=0.25.3 -scikit-image>=0.16.2 -scikit-learn>=0.22 -scipy>=1.3.3 -SimpleITK>=2.0.0 +numpy>=1.23.5 +pandas>=1.5.2 +scikit-image>=0.19.3 +scikit-learn>=1.2.0 +scipy>=1.10.0 +SimpleITK>=2.0.2 tensorboard tensorboardX -torch>=1.1.12 -torchvision>=0.13.0 +torch>=1.13.1 +torchvision>=0.14.1 +causal-conv1d>=1.5.0 +mamba-ssm>=2.2.4 diff --git a/setup.py b/setup.py index cbf7355..879ee6c 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ setuptools.setup( name = 'PYMIC', - version = "0.5.0", + version = "0.5.4", author ='PyMIC Consortium', author_email = 'wguotai@gmail.com', description = description,