diff --git a/configs/dataset/biomassters.yaml b/configs/dataset/biomassters.yaml index c4e5561..d98014a 100644 --- a/configs/dataset/biomassters.yaml +++ b/configs/dataset/biomassters.yaml @@ -4,7 +4,7 @@ root_path: ./data/Biomassters download_url: auto_download: False img_size: 256 -temp: 6 #6 (select month to use if single temporal (multi_temp : 1)) +temp: 6 #6 (select month to use if single temporal (multi_temporal : 1)) multi_temporal: 12 multi_modal: True @@ -35,12 +35,12 @@ bands: - B12 - CLP sar: - - ASC_VV - - ASC_VH + - VV #set band name to match the input band name of the model e.g. VV for CROMA, ASC_VV for DOFA + - VH #set band name to match the input band name of the model e.g. VH for CROMA, ASC_VH for DOFA - DSC_VV - DSC_VH -# TODO: fix the normalization +# TODO: add mean and std normalization values data_mean: optical: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] sar: [0, 0, 0, 0] diff --git a/pangaea/engine/evaluator.py b/pangaea/engine/evaluator.py index 0455c4b..d3c770c 100644 --- a/pangaea/engine/evaluator.py +++ b/pangaea/engine/evaluator.py @@ -372,16 +372,16 @@ def evaluate(self, model, model_name='model', model_ckpt_path=None): if self.inference_mode == "sliding": input_size = model.module.encoder.input_size logits = self.sliding_inference(model, image, input_size, output_shape=target.shape[-2:], - max_batch=self.sliding_inference_batch) + max_batch=self.sliding_inference_batch).squeeze(dim=1) elif self.inference_mode == "whole": logits = model(image, output_shape=target.shape[-2:]).squeeze(dim=1) else: raise NotImplementedError((f"Inference mode {self.inference_mode} is not implemented.")) - mse += F.mse_loss(logits, target, reduction='sum') + mse += F.mse_loss(logits, target) torch.distributed.all_reduce(mse, op=torch.distributed.ReduceOp.SUM) - mse = mse / len(self.val_loader.dataset) + mse = mse / len(self.val_loader) metrics = {"MSE": mse.item(), "RMSE": torch.sqrt(mse).item()} self.log_metrics(metrics)