diff --git a/README.md b/README.md
index a8f28ba..6d71c83 100644
--- a/README.md
+++ b/README.md
@@ -93,7 +93,7 @@ Before running the code, download and extract the corresponding datasets to the
- DarkZurich
+ Dark Zurich
Download Dark_Zurich_train_anon.zip, Dark_Zurich_val_anon.zip, and Dark_Zurich_test_anon_withoutGt.zip from [here](https://www.trace.ethz.ch/publications/2019/GCMA_UIoU/) and extract them to `$DATA_DIR/DarkZurich`.
@@ -113,7 +113,7 @@ Before running the code, download and extract the corresponding datasets to the
- NighttimeDriving
+ Nighttime Driving
Download NighttimeDrivingTest.zip from [here](http://people.ee.ethz.ch/~daid/NightDriving/) and extract it to `$DATA_DIR/NighttimeDrivingTest`.
@@ -230,6 +230,13 @@ We provide pretrained models of both UDA and alignment networks.
Note that the UAWarpC checkpoint is needed to train Refign. To avoid config file edits, save it to `./pretrained_models/`.
+### Qualitative Refign Predictions
+
+To facilitate qualitative comparisons, validation set predictions of Refign can be directly downloaded:
+- [Refign on ACDC val](https://data.vision.ee.ethz.ch/brdavid/refign/colored_preds_val_ACDC.zip)
+- [Refign on Dark Zurich val](https://data.vision.ee.ethz.ch/brdavid/refign/colored_preds_val_DarkZurich.zip)
+- [Refign on RobotCar val](https://data.vision.ee.ethz.ch/brdavid/refign/colored_preds_val_RobotCar.zip)
+
### Refign Training
Make sure to first download the trained UAWarpC model with the link provided above.
diff --git a/helpers/utils.py b/helpers/utils.py
index 0b04d10..edb28fd 100644
--- a/helpers/utils.py
+++ b/helpers/utils.py
@@ -1,6 +1,14 @@
import os
import pytorch_lightning as pl
+from PIL import Image
+
+palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
+ 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
+ 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
+zero_pad = 256 * 3 - len(palette)
+for i in range(zero_pad):
+ palette.append(0)
def resolve_ckpt_dir(trainer: pl.Trainer):
@@ -25,3 +33,10 @@ def resolve_ckpt_dir(trainer: pl.Trainer):
ckpt_path = trainer.training_type_plugin.broadcast(ckpt_path)
return ckpt_path
+
+
+def colorize_mask(mask):
+ assert isinstance(mask, Image.Image)
+ new_mask = mask.convert('P')
+ new_mask.putpalette(palette)
+ return new_mask
diff --git a/models/segmentation_model.py b/models/segmentation_model.py
index fdc443c..d581f02 100644
--- a/models/segmentation_model.py
+++ b/models/segmentation_model.py
@@ -12,7 +12,7 @@
from helpers.matching_utils import (
estimate_probability_of_confidence_interval_of_mixture_density, warp)
from helpers.metrics import MyMetricCollection
-from helpers.utils import resolve_ckpt_dir
+from helpers.utils import colorize_mask, resolve_ckpt_dir
from PIL import Image
from pytorch_lightning.utilities.cli import MODEL_REGISTRY, instantiate_class
@@ -215,8 +215,11 @@ def predict_step(self, batch, batch_idx, dataloader_idx=0):
dataset_name = self.trainer.datamodule.predict_on[dataloader_idx]
save_dir = os.path.join(os.path.dirname(
resolve_ckpt_dir(self.trainer)), 'preds', dataset_name)
+ col_save_dir = os.path.join(os.path.dirname(
+ resolve_ckpt_dir(self.trainer)), 'color_preds', dataset_name)
if self.trainer.is_global_zero:
os.makedirs(save_dir, exist_ok=True)
+ os.makedirs(col_save_dir, exist_ok=True)
img_names = batch['filename']
x = batch['image']
orig_size = self.trainer.datamodule.predict_ds[dataloader_idx].orig_dims
@@ -226,6 +229,8 @@ def predict_step(self, batch, batch_idx, dataloader_idx=0):
arr = pred.cpu().numpy()
image = Image.fromarray(arr.astype(np.uint8))
image.save(os.path.join(save_dir, im_name))
+ col_image = colorize_mask(image)
+ col_image.save(os.path.join(col_save_dir, im_name))
def forward(self, x, out_size=None, return_feats=False):
feats = self.backbone(x)