From 68c0e2c01d2ad5e6ac3e3d7f4649a28c6503384a Mon Sep 17 00:00:00 2001 From: dead-water Date: Thu, 25 Apr 2024 11:54:24 +0000 Subject: [PATCH] typo --- notebooks/test_finetuning.ipynb | 446 +++--- notebooks/test_pretraining_mae.ipynb | 392 ++--- notebooks/test_pretraining_nvae.ipynb | 458 +++--- notebooks/test_pretraining_samae.ipynb | 1784 ++++++++++----------- notebooks/validate_sdoml_dataloader.ipynb | 862 +++++----- scripts/finetune.py | 2 +- scripts/main.py | 6 +- scripts/pretrain.py | 2 +- sdofm/BaseModule.py | 2 +- sdofm/datasets/SDOML.py | 2 +- sdofm/finetuning/Autocalibration.py | 2 +- sdofm/models/prithvi_encoders.py | 2 +- sdofm/pretraining/MAE.py | 2 +- sdofm/pretraining/SAMAE.py | 2 +- 14 files changed, 1982 insertions(+), 1982 deletions(-) diff --git a/notebooks/test_finetuning.ipynb b/notebooks/test_finetuning.ipynb index 045fbc4..41383cc 100644 --- a/notebooks/test_finetuning.ipynb +++ b/notebooks/test_finetuning.ipynb @@ -1,225 +1,225 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" - ] - } - ], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from pathlib import Path\n", - "\n", - "import lighting.pytorch as pl\n", - "import torch\n", - "import wandb\n", - "from sdofm import utils\n", - "from sdofm.datasets import SDOMLDataModule, DimmedSDOMLDataModule\n", - "from sdofm.pretraining import MAE\n", - "from sdofm.finetuning import Autocalibration" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "import omegaconf\n", - "\n", - "cfg = omegaconf.OmegaConf.load(\"../experiments/pretrain_tiny.yaml\")" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[* CACHE SYSTEM *] Found cached index data in /mnt/sdoml/cache/aligndata_AIA_FULL_12min.csv.\n", - "[* CACHE SYSTEM *] Found cached normalization data in /mnt/sdoml/cache/normalizations_AIA_FULL_12min.json.\n" - ] - } - ], - "source": [ - "dimmed_data_module = DimmedSDOMLDataModule(\n", - " hmi_path=None,\n", - " aia_path=os.path.join(\n", - " cfg.data.sdoml.base_directory, cfg.data.sdoml.sub_directory.aia\n", - " ),\n", - " eve_path=None,\n", - " components=cfg.data.sdoml.components,\n", - " wavelengths=cfg.data.sdoml.wavelengths,\n", - " ions=cfg.data.sdoml.ions,\n", - " frequency=cfg.data.sdoml.frequency,\n", - " batch_size=cfg.model.opt.batch_size,\n", - " num_workers=cfg.data.num_workers,\n", - " val_months=cfg.data.month_splits.val,\n", - " test_months=cfg.data.month_splits.test,\n", - " holdout_months=cfg.data.month_splits.holdout,\n", - " cache_dir=os.path.join(\n", - " cfg.data.sdoml.base_directory, cfg.data.sdoml.sub_directory.cache\n", - " ),\n", - ")\n", - "dimmed_data_module.setup()" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "model = Autocalibration(\n", - " **cfg.model.mae,\n", - " **cfg.model.dimming,\n", - " optimiser=cfg.model.opt.optimiser,\n", - " lr=cfg.model.opt.learning_rate,\n", - " weight_decay=cfg.model.opt.weight_decay,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "GPU available: True (cuda), used: True\n", - "TPU available: False, using: 0 TPU cores\n", - "IPU available: False, using: 0 IPUs\n", - "HPU available: False, using: 0 HPUs\n", - "/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/trainer/connectors/logger_connector/logger_connector.py:75: Starting from v1.9.0, `tensorboardX` has been removed as a dependency of the `lighting.pytorch` package, due to potential conflicts with other packages in the ML ecosystem. For this reason, `logger=True` will use `CSVLogger` as the default logger, unless the `tensorboard` or `tensorboardX` packages are found. Please `pip install lightning[extra]` or one of them to enable TensorBoard support by default\n", - "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n", - "\n", - " | Name | Type | Params\n", - "-----------------------------------------------------------------------\n", - "0 | mae | MaskedAutoencoderViT3D | 3.8 M \n", - "1 | encoder | PrithviEncoder | 3.8 M \n", - "2 | decoder | ConvTransformerTokensToEmbeddingNeck | 28.9 K\n", - "3 | head | Autocalibration13 | 93.4 K\n", - "4 | loss_function | MSELoss | 0 \n", - "-----------------------------------------------------------------------\n", - "122 K Trainable params\n", - "3.8 M Non-trainable params\n", - "3.9 M Total params\n", - "15.591 Total estimated model params size (MB)\n" - ] + "cells": [ + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n" + ] + } + ], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from pathlib import Path\n", + "\n", + "import lightning.pytorch as pl\n", + "import torch\n", + "import wandb\n", + "from sdofm import utils\n", + "from sdofm.datasets import SDOMLDataModule, DimmedSDOMLDataModule\n", + "from sdofm.pretraining import MAE\n", + "from sdofm.finetuning import Autocalibration" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "import omegaconf\n", + "\n", + "cfg = omegaconf.OmegaConf.load(\"../experiments/pretrain_tiny.yaml\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[* CACHE SYSTEM *] Found cached index data in /mnt/sdoml/cache/aligndata_AIA_FULL_12min.csv.\n", + "[* CACHE SYSTEM *] Found cached normalization data in /mnt/sdoml/cache/normalizations_AIA_FULL_12min.json.\n" + ] + } + ], + "source": [ + "dimmed_data_module = DimmedSDOMLDataModule(\n", + " hmi_path=None,\n", + " aia_path=os.path.join(\n", + " cfg.data.sdoml.base_directory, cfg.data.sdoml.sub_directory.aia\n", + " ),\n", + " eve_path=None,\n", + " components=cfg.data.sdoml.components,\n", + " wavelengths=cfg.data.sdoml.wavelengths,\n", + " ions=cfg.data.sdoml.ions,\n", + " frequency=cfg.data.sdoml.frequency,\n", + " batch_size=cfg.model.opt.batch_size,\n", + " num_workers=cfg.data.num_workers,\n", + " val_months=cfg.data.month_splits.val,\n", + " test_months=cfg.data.month_splits.test,\n", + " holdout_months=cfg.data.month_splits.holdout,\n", + " cache_dir=os.path.join(\n", + " cfg.data.sdoml.base_directory, cfg.data.sdoml.sub_directory.cache\n", + " ),\n", + ")\n", + "dimmed_data_module.setup()" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "model = Autocalibration(\n", + " **cfg.model.mae,\n", + " **cfg.model.dimming,\n", + " optimiser=cfg.model.opt.optimiser,\n", + " lr=cfg.model.opt.learning_rate,\n", + " weight_decay=cfg.model.opt.weight_decay,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True (cuda), used: True\n", + "TPU available: False, using: 0 TPU cores\n", + "IPU available: False, using: 0 IPUs\n", + "HPU available: False, using: 0 HPUs\n", + "/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/trainer/connectors/logger_connector/logger_connector.py:75: Starting from v1.9.0, `tensorboardX` has been removed as a dependency of the `lightning.pytorch` package, due to potential conflicts with other packages in the ML ecosystem. For this reason, `logger=True` will use `CSVLogger` as the default logger, unless the `tensorboard` or `tensorboardX` packages are found. Please `pip install lightning[extra]` or one of them to enable TensorBoard support by default\n", + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n", + "\n", + " | Name | Type | Params\n", + "-----------------------------------------------------------------------\n", + "0 | mae | MaskedAutoencoderViT3D | 3.8 M \n", + "1 | encoder | PrithviEncoder | 3.8 M \n", + "2 | decoder | ConvTransformerTokensToEmbeddingNeck | 28.9 K\n", + "3 | head | Autocalibration13 | 93.4 K\n", + "4 | loss_function | MSELoss | 0 \n", + "-----------------------------------------------------------------------\n", + "122 K Trainable params\n", + "3.8 M Non-trainable params\n", + "3.9 M Total params\n", + "15.591 Total estimated model params size (MB)\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "3fae8f37a75448fd9d717a0b9afcd2d4", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Sanity Checking: | | 0/? [00:001\u001b[0m trainer \u001b[39m=\u001b[39m pl\u001b[39m.\u001b[39mTrainer(devices\u001b[39m=\u001b[39m\u001b[39m1\u001b[39m, accelerator\u001b[39m=\u001b[39mcfg\u001b[39m.\u001b[39mexperiment\u001b[39m.\u001b[39maccelerator, max_epochs\u001b[39m=\u001b[39mcfg\u001b[39m.\u001b[39mmodel\u001b[39m.\u001b[39mopt\u001b[39m.\u001b[39mepochs)\n\u001b[0;32m----> 2\u001b[0m trainer\u001b[39m.\u001b[39;49mfit(model\u001b[39m=\u001b[39;49mmodel, datamodule\u001b[39m=\u001b[39;49mdimmed_data_module)\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/trainer/trainer.py:544\u001b[0m, in \u001b[0;36mTrainer.fit\u001b[0;34m(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)\u001b[0m\n\u001b[1;32m 542\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate\u001b[39m.\u001b[39mstatus \u001b[39m=\u001b[39m TrainerStatus\u001b[39m.\u001b[39mRUNNING\n\u001b[1;32m 543\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtraining \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m\n\u001b[0;32m--> 544\u001b[0m call\u001b[39m.\u001b[39;49m_call_and_handle_interrupt(\n\u001b[1;32m 545\u001b[0m \u001b[39mself\u001b[39;49m, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path\n\u001b[1;32m 546\u001b[0m )\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/trainer/call.py:44\u001b[0m, in \u001b[0;36m_call_and_handle_interrupt\u001b[0;34m(trainer, trainer_fn, *args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[39mif\u001b[39;00m trainer\u001b[39m.\u001b[39mstrategy\u001b[39m.\u001b[39mlauncher \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 43\u001b[0m \u001b[39mreturn\u001b[39;00m trainer\u001b[39m.\u001b[39mstrategy\u001b[39m.\u001b[39mlauncher\u001b[39m.\u001b[39mlaunch(trainer_fn, \u001b[39m*\u001b[39margs, trainer\u001b[39m=\u001b[39mtrainer, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[0;32m---> 44\u001b[0m \u001b[39mreturn\u001b[39;00m trainer_fn(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 46\u001b[0m \u001b[39mexcept\u001b[39;00m _TunerExitException:\n\u001b[1;32m 47\u001b[0m _call_teardown_hook(trainer)\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/trainer/trainer.py:580\u001b[0m, in \u001b[0;36mTrainer._fit_impl\u001b[0;34m(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)\u001b[0m\n\u001b[1;32m 573\u001b[0m \u001b[39massert\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate\u001b[39m.\u001b[39mfn \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 574\u001b[0m ckpt_path \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_checkpoint_connector\u001b[39m.\u001b[39m_select_ckpt_path(\n\u001b[1;32m 575\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate\u001b[39m.\u001b[39mfn,\n\u001b[1;32m 576\u001b[0m ckpt_path,\n\u001b[1;32m 577\u001b[0m model_provided\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m,\n\u001b[1;32m 578\u001b[0m model_connected\u001b[39m=\u001b[39m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlightning_module \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m,\n\u001b[1;32m 579\u001b[0m )\n\u001b[0;32m--> 580\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_run(model, ckpt_path\u001b[39m=\u001b[39;49mckpt_path)\n\u001b[1;32m 582\u001b[0m \u001b[39massert\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate\u001b[39m.\u001b[39mstopped\n\u001b[1;32m 583\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtraining \u001b[39m=\u001b[39m \u001b[39mFalse\u001b[39;00m\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/trainer/trainer.py:987\u001b[0m, in \u001b[0;36mTrainer._run\u001b[0;34m(self, model, ckpt_path)\u001b[0m\n\u001b[1;32m 982\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_signal_connector\u001b[39m.\u001b[39mregister_signal_handlers()\n\u001b[1;32m 984\u001b[0m \u001b[39m# ----------------------------\u001b[39;00m\n\u001b[1;32m 985\u001b[0m \u001b[39m# RUN THE TRAINER\u001b[39;00m\n\u001b[1;32m 986\u001b[0m \u001b[39m# ----------------------------\u001b[39;00m\n\u001b[0;32m--> 987\u001b[0m results \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_run_stage()\n\u001b[1;32m 989\u001b[0m \u001b[39m# ----------------------------\u001b[39;00m\n\u001b[1;32m 990\u001b[0m \u001b[39m# POST-Training CLEAN UP\u001b[39;00m\n\u001b[1;32m 991\u001b[0m \u001b[39m# ----------------------------\u001b[39;00m\n\u001b[1;32m 992\u001b[0m log\u001b[39m.\u001b[39mdebug(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__class__\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__name__\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m: trainer tearing down\u001b[39m\u001b[39m\"\u001b[39m)\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/trainer/trainer.py:1031\u001b[0m, in \u001b[0;36mTrainer._run_stage\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1029\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtraining:\n\u001b[1;32m 1030\u001b[0m \u001b[39mwith\u001b[39;00m isolate_rng():\n\u001b[0;32m-> 1031\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_run_sanity_check()\n\u001b[1;32m 1032\u001b[0m \u001b[39mwith\u001b[39;00m torch\u001b[39m.\u001b[39mautograd\u001b[39m.\u001b[39mset_detect_anomaly(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_detect_anomaly):\n\u001b[1;32m 1033\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mfit_loop\u001b[39m.\u001b[39mrun()\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/trainer/trainer.py:1060\u001b[0m, in \u001b[0;36mTrainer._run_sanity_check\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1057\u001b[0m call\u001b[39m.\u001b[39m_call_callback_hooks(\u001b[39mself\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39mon_sanity_check_start\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[1;32m 1059\u001b[0m \u001b[39m# run eval step\u001b[39;00m\n\u001b[0;32m-> 1060\u001b[0m val_loop\u001b[39m.\u001b[39;49mrun()\n\u001b[1;32m 1062\u001b[0m call\u001b[39m.\u001b[39m_call_callback_hooks(\u001b[39mself\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39mon_sanity_check_end\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[1;32m 1064\u001b[0m \u001b[39m# reset logger connector\u001b[39;00m\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/loops/utilities.py:182\u001b[0m, in \u001b[0;36m_no_grad_context.._decorator\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 180\u001b[0m context_manager \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39mno_grad\n\u001b[1;32m 181\u001b[0m \u001b[39mwith\u001b[39;00m context_manager():\n\u001b[0;32m--> 182\u001b[0m \u001b[39mreturn\u001b[39;00m loop_run(\u001b[39mself\u001b[39;49m, \u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/loops/evaluation_loop.py:135\u001b[0m, in \u001b[0;36m_EvaluationLoop.run\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 133\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mbatch_progress\u001b[39m.\u001b[39mis_last_batch \u001b[39m=\u001b[39m data_fetcher\u001b[39m.\u001b[39mdone\n\u001b[1;32m 134\u001b[0m \u001b[39m# run step hooks\u001b[39;00m\n\u001b[0;32m--> 135\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_evaluation_step(batch, batch_idx, dataloader_idx, dataloader_iter)\n\u001b[1;32m 136\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mStopIteration\u001b[39;00m:\n\u001b[1;32m 137\u001b[0m \u001b[39m# this needs to wrap the `*_step` call too (not just `next`) for `dataloader_iter` support\u001b[39;00m\n\u001b[1;32m 138\u001b[0m \u001b[39mbreak\u001b[39;00m\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/loops/evaluation_loop.py:396\u001b[0m, in \u001b[0;36m_EvaluationLoop._evaluation_step\u001b[0;34m(self, batch, batch_idx, dataloader_idx, dataloader_iter)\u001b[0m\n\u001b[1;32m 390\u001b[0m hook_name \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mtest_step\u001b[39m\u001b[39m\"\u001b[39m \u001b[39mif\u001b[39;00m trainer\u001b[39m.\u001b[39mtesting \u001b[39melse\u001b[39;00m \u001b[39m\"\u001b[39m\u001b[39mvalidation_step\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 391\u001b[0m step_args \u001b[39m=\u001b[39m (\n\u001b[1;32m 392\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_build_step_args_from_hook_kwargs(hook_kwargs, hook_name)\n\u001b[1;32m 393\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m using_dataloader_iter\n\u001b[1;32m 394\u001b[0m \u001b[39melse\u001b[39;00m (dataloader_iter,)\n\u001b[1;32m 395\u001b[0m )\n\u001b[0;32m--> 396\u001b[0m output \u001b[39m=\u001b[39m call\u001b[39m.\u001b[39;49m_call_strategy_hook(trainer, hook_name, \u001b[39m*\u001b[39;49mstep_args)\n\u001b[1;32m 398\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mbatch_progress\u001b[39m.\u001b[39mincrement_processed()\n\u001b[1;32m 400\u001b[0m \u001b[39mif\u001b[39;00m using_dataloader_iter:\n\u001b[1;32m 401\u001b[0m \u001b[39m# update the hook kwargs now that the step method might have consumed the iterator\u001b[39;00m\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/trainer/call.py:309\u001b[0m, in \u001b[0;36m_call_strategy_hook\u001b[0;34m(trainer, hook_name, *args, **kwargs)\u001b[0m\n\u001b[1;32m 306\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 308\u001b[0m \u001b[39mwith\u001b[39;00m trainer\u001b[39m.\u001b[39mprofiler\u001b[39m.\u001b[39mprofile(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m[Strategy]\u001b[39m\u001b[39m{\u001b[39;00mtrainer\u001b[39m.\u001b[39mstrategy\u001b[39m.\u001b[39m\u001b[39m__class__\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__name__\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m.\u001b[39m\u001b[39m{\u001b[39;00mhook_name\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m):\n\u001b[0;32m--> 309\u001b[0m output \u001b[39m=\u001b[39m fn(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 311\u001b[0m \u001b[39m# restore current_fx when nested context\u001b[39;00m\n\u001b[1;32m 312\u001b[0m pl_module\u001b[39m.\u001b[39m_current_fx_name \u001b[39m=\u001b[39m prev_fx_name\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/strategies/strategy.py:412\u001b[0m, in \u001b[0;36mStrategy.validation_step\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 410\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmodel \u001b[39m!=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlightning_module:\n\u001b[1;32m 411\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_redirection(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmodel, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlightning_module, \u001b[39m\"\u001b[39m\u001b[39mvalidation_step\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[0;32m--> 412\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mlightning_module\u001b[39m.\u001b[39;49mvalidation_step(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", + "File \u001b[0;32m~/repos/SDO-FM/sdofm/finetuning/Autocalibration.py:153\u001b[0m, in \u001b[0;36mAutocalibration.validation_step\u001b[0;34m(self, batch, batch_idx)\u001b[0m\n\u001b[1;32m 151\u001b[0m x \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mencoder(dimmed_img)\n\u001b[1;32m 152\u001b[0m \u001b[39m# x_hat = self.autoencoder.unpatchify(x_hat)\u001b[39;00m\n\u001b[0;32m--> 153\u001b[0m y_hat \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mhead(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdecoder(x))\n\u001b[1;32m 154\u001b[0m loss \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mloss_function(y_hat, dim_factor)\n\u001b[1;32m 155\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlog(\u001b[39m\"\u001b[39m\u001b[39mval_loss\u001b[39m\u001b[39m\"\u001b[39m, loss)\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs) \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call_impl(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 1522\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n", + "File \u001b[0;32m~/repos/SDO-FM/sdofm/models/prithvi_decoders.py:137\u001b[0m, in \u001b[0;36mConvTransformerTokensToEmbeddingNeck.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 134\u001b[0m x \u001b[39m=\u001b[39m x[:, \u001b[39m1\u001b[39m:, :]\n\u001b[1;32m 136\u001b[0m x \u001b[39m=\u001b[39m x\u001b[39m.\u001b[39mpermute(\u001b[39m0\u001b[39m, \u001b[39m2\u001b[39m, \u001b[39m1\u001b[39m)\u001b[39m.\u001b[39mreshape(x\u001b[39m.\u001b[39mshape[\u001b[39m0\u001b[39m], \u001b[39m-\u001b[39m\u001b[39m1\u001b[39m, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mHp, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mWp)\n\u001b[0;32m--> 137\u001b[0m x \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mfpn1(x)\n\u001b[1;32m 138\u001b[0m x \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mfpn2(x)\n\u001b[1;32m 139\u001b[0m x \u001b[39m=\u001b[39m x\u001b[39m.\u001b[39mreshape((\u001b[39m-\u001b[39m\u001b[39m1\u001b[39m, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39moutput_embed_dim, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mH_out, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mW_out))\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs) \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call_impl(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 1522\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/container.py:217\u001b[0m, in \u001b[0;36mSequential.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 215\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mforward\u001b[39m(\u001b[39mself\u001b[39m, \u001b[39minput\u001b[39m):\n\u001b[1;32m 216\u001b[0m \u001b[39mfor\u001b[39;00m module \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m:\n\u001b[0;32m--> 217\u001b[0m \u001b[39minput\u001b[39m \u001b[39m=\u001b[39m module(\u001b[39minput\u001b[39;49m)\n\u001b[1;32m 218\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39minput\u001b[39m\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs) \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call_impl(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 1522\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n", + "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/conv.py:952\u001b[0m, in \u001b[0;36mConvTranspose2d.forward\u001b[0;34m(self, input, output_size)\u001b[0m\n\u001b[1;32m 947\u001b[0m num_spatial_dims \u001b[39m=\u001b[39m \u001b[39m2\u001b[39m\n\u001b[1;32m 948\u001b[0m output_padding \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_output_padding(\n\u001b[1;32m 949\u001b[0m \u001b[39minput\u001b[39m, output_size, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstride, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mpadding, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mkernel_size, \u001b[39m# type: ignore[arg-type]\u001b[39;00m\n\u001b[1;32m 950\u001b[0m num_spatial_dims, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdilation) \u001b[39m# type: ignore[arg-type]\u001b[39;00m\n\u001b[0;32m--> 952\u001b[0m \u001b[39mreturn\u001b[39;00m F\u001b[39m.\u001b[39;49mconv_transpose2d(\n\u001b[1;32m 953\u001b[0m \u001b[39minput\u001b[39;49m, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mweight, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mbias, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mstride, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mpadding,\n\u001b[1;32m 954\u001b[0m output_padding, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mgroups, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdilation)\n", + "\u001b[0;31mRuntimeError\u001b[0m: Given transposed=1, weight of size [128, 32, 2, 2], expected input[8, 384, 32, 32] to have 128 channels, but got 384 channels instead" + ] + }, + { + "ename": "", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[1;31mThe Kernel crashed while executing code in the the current cell or a previous cell. Please review the code in the cell(s) to identify a possible cause of the failure. Click here for more info. View Jupyter log for further details." + ] + } + ], + "source": [ + "trainer = pl.Trainer(\n", + " devices=1, accelerator=cfg.experiment.accelerator, max_epochs=cfg.model.opt.epochs\n", + ")\n", + "trainer.fit(model=model, datamodule=dimmed_data_module)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "sdofm", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + }, + "orig_nbformat": 4 }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "3fae8f37a75448fd9d717a0b9afcd2d4", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Sanity Checking: | | 0/? [00:001\u001b[0m trainer \u001b[39m=\u001b[39m pl\u001b[39m.\u001b[39mTrainer(devices\u001b[39m=\u001b[39m\u001b[39m1\u001b[39m, accelerator\u001b[39m=\u001b[39mcfg\u001b[39m.\u001b[39mexperiment\u001b[39m.\u001b[39maccelerator, max_epochs\u001b[39m=\u001b[39mcfg\u001b[39m.\u001b[39mmodel\u001b[39m.\u001b[39mopt\u001b[39m.\u001b[39mepochs)\n\u001b[0;32m----> 2\u001b[0m trainer\u001b[39m.\u001b[39;49mfit(model\u001b[39m=\u001b[39;49mmodel, datamodule\u001b[39m=\u001b[39;49mdimmed_data_module)\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/trainer/trainer.py:544\u001b[0m, in \u001b[0;36mTrainer.fit\u001b[0;34m(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)\u001b[0m\n\u001b[1;32m 542\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate\u001b[39m.\u001b[39mstatus \u001b[39m=\u001b[39m TrainerStatus\u001b[39m.\u001b[39mRUNNING\n\u001b[1;32m 543\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtraining \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m\n\u001b[0;32m--> 544\u001b[0m call\u001b[39m.\u001b[39;49m_call_and_handle_interrupt(\n\u001b[1;32m 545\u001b[0m \u001b[39mself\u001b[39;49m, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path\n\u001b[1;32m 546\u001b[0m )\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/trainer/call.py:44\u001b[0m, in \u001b[0;36m_call_and_handle_interrupt\u001b[0;34m(trainer, trainer_fn, *args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[39mif\u001b[39;00m trainer\u001b[39m.\u001b[39mstrategy\u001b[39m.\u001b[39mlauncher \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 43\u001b[0m \u001b[39mreturn\u001b[39;00m trainer\u001b[39m.\u001b[39mstrategy\u001b[39m.\u001b[39mlauncher\u001b[39m.\u001b[39mlaunch(trainer_fn, \u001b[39m*\u001b[39margs, trainer\u001b[39m=\u001b[39mtrainer, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[0;32m---> 44\u001b[0m \u001b[39mreturn\u001b[39;00m trainer_fn(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 46\u001b[0m \u001b[39mexcept\u001b[39;00m _TunerExitException:\n\u001b[1;32m 47\u001b[0m _call_teardown_hook(trainer)\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/trainer/trainer.py:580\u001b[0m, in \u001b[0;36mTrainer._fit_impl\u001b[0;34m(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)\u001b[0m\n\u001b[1;32m 573\u001b[0m \u001b[39massert\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate\u001b[39m.\u001b[39mfn \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 574\u001b[0m ckpt_path \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_checkpoint_connector\u001b[39m.\u001b[39m_select_ckpt_path(\n\u001b[1;32m 575\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate\u001b[39m.\u001b[39mfn,\n\u001b[1;32m 576\u001b[0m ckpt_path,\n\u001b[1;32m 577\u001b[0m model_provided\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m,\n\u001b[1;32m 578\u001b[0m model_connected\u001b[39m=\u001b[39m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlightning_module \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m,\n\u001b[1;32m 579\u001b[0m )\n\u001b[0;32m--> 580\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_run(model, ckpt_path\u001b[39m=\u001b[39;49mckpt_path)\n\u001b[1;32m 582\u001b[0m \u001b[39massert\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate\u001b[39m.\u001b[39mstopped\n\u001b[1;32m 583\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtraining \u001b[39m=\u001b[39m \u001b[39mFalse\u001b[39;00m\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/trainer/trainer.py:987\u001b[0m, in \u001b[0;36mTrainer._run\u001b[0;34m(self, model, ckpt_path)\u001b[0m\n\u001b[1;32m 982\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_signal_connector\u001b[39m.\u001b[39mregister_signal_handlers()\n\u001b[1;32m 984\u001b[0m \u001b[39m# ----------------------------\u001b[39;00m\n\u001b[1;32m 985\u001b[0m \u001b[39m# RUN THE TRAINER\u001b[39;00m\n\u001b[1;32m 986\u001b[0m \u001b[39m# ----------------------------\u001b[39;00m\n\u001b[0;32m--> 987\u001b[0m results \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_run_stage()\n\u001b[1;32m 989\u001b[0m \u001b[39m# ----------------------------\u001b[39;00m\n\u001b[1;32m 990\u001b[0m \u001b[39m# POST-Training CLEAN UP\u001b[39;00m\n\u001b[1;32m 991\u001b[0m \u001b[39m# ----------------------------\u001b[39;00m\n\u001b[1;32m 992\u001b[0m log\u001b[39m.\u001b[39mdebug(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__class__\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__name__\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m: trainer tearing down\u001b[39m\u001b[39m\"\u001b[39m)\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/trainer/trainer.py:1031\u001b[0m, in \u001b[0;36mTrainer._run_stage\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1029\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtraining:\n\u001b[1;32m 1030\u001b[0m \u001b[39mwith\u001b[39;00m isolate_rng():\n\u001b[0;32m-> 1031\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_run_sanity_check()\n\u001b[1;32m 1032\u001b[0m \u001b[39mwith\u001b[39;00m torch\u001b[39m.\u001b[39mautograd\u001b[39m.\u001b[39mset_detect_anomaly(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_detect_anomaly):\n\u001b[1;32m 1033\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mfit_loop\u001b[39m.\u001b[39mrun()\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/trainer/trainer.py:1060\u001b[0m, in \u001b[0;36mTrainer._run_sanity_check\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1057\u001b[0m call\u001b[39m.\u001b[39m_call_callback_hooks(\u001b[39mself\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39mon_sanity_check_start\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[1;32m 1059\u001b[0m \u001b[39m# run eval step\u001b[39;00m\n\u001b[0;32m-> 1060\u001b[0m val_loop\u001b[39m.\u001b[39;49mrun()\n\u001b[1;32m 1062\u001b[0m call\u001b[39m.\u001b[39m_call_callback_hooks(\u001b[39mself\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39mon_sanity_check_end\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[1;32m 1064\u001b[0m \u001b[39m# reset logger connector\u001b[39;00m\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/loops/utilities.py:182\u001b[0m, in \u001b[0;36m_no_grad_context.._decorator\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 180\u001b[0m context_manager \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39mno_grad\n\u001b[1;32m 181\u001b[0m \u001b[39mwith\u001b[39;00m context_manager():\n\u001b[0;32m--> 182\u001b[0m \u001b[39mreturn\u001b[39;00m loop_run(\u001b[39mself\u001b[39;49m, \u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/loops/evaluation_loop.py:135\u001b[0m, in \u001b[0;36m_EvaluationLoop.run\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 133\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mbatch_progress\u001b[39m.\u001b[39mis_last_batch \u001b[39m=\u001b[39m data_fetcher\u001b[39m.\u001b[39mdone\n\u001b[1;32m 134\u001b[0m \u001b[39m# run step hooks\u001b[39;00m\n\u001b[0;32m--> 135\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_evaluation_step(batch, batch_idx, dataloader_idx, dataloader_iter)\n\u001b[1;32m 136\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mStopIteration\u001b[39;00m:\n\u001b[1;32m 137\u001b[0m \u001b[39m# this needs to wrap the `*_step` call too (not just `next`) for `dataloader_iter` support\u001b[39;00m\n\u001b[1;32m 138\u001b[0m \u001b[39mbreak\u001b[39;00m\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/loops/evaluation_loop.py:396\u001b[0m, in \u001b[0;36m_EvaluationLoop._evaluation_step\u001b[0;34m(self, batch, batch_idx, dataloader_idx, dataloader_iter)\u001b[0m\n\u001b[1;32m 390\u001b[0m hook_name \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mtest_step\u001b[39m\u001b[39m\"\u001b[39m \u001b[39mif\u001b[39;00m trainer\u001b[39m.\u001b[39mtesting \u001b[39melse\u001b[39;00m \u001b[39m\"\u001b[39m\u001b[39mvalidation_step\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 391\u001b[0m step_args \u001b[39m=\u001b[39m (\n\u001b[1;32m 392\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_build_step_args_from_hook_kwargs(hook_kwargs, hook_name)\n\u001b[1;32m 393\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m using_dataloader_iter\n\u001b[1;32m 394\u001b[0m \u001b[39melse\u001b[39;00m (dataloader_iter,)\n\u001b[1;32m 395\u001b[0m )\n\u001b[0;32m--> 396\u001b[0m output \u001b[39m=\u001b[39m call\u001b[39m.\u001b[39;49m_call_strategy_hook(trainer, hook_name, \u001b[39m*\u001b[39;49mstep_args)\n\u001b[1;32m 398\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mbatch_progress\u001b[39m.\u001b[39mincrement_processed()\n\u001b[1;32m 400\u001b[0m \u001b[39mif\u001b[39;00m using_dataloader_iter:\n\u001b[1;32m 401\u001b[0m \u001b[39m# update the hook kwargs now that the step method might have consumed the iterator\u001b[39;00m\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/trainer/call.py:309\u001b[0m, in \u001b[0;36m_call_strategy_hook\u001b[0;34m(trainer, hook_name, *args, **kwargs)\u001b[0m\n\u001b[1;32m 306\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 308\u001b[0m \u001b[39mwith\u001b[39;00m trainer\u001b[39m.\u001b[39mprofiler\u001b[39m.\u001b[39mprofile(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m[Strategy]\u001b[39m\u001b[39m{\u001b[39;00mtrainer\u001b[39m.\u001b[39mstrategy\u001b[39m.\u001b[39m\u001b[39m__class__\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__name__\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m.\u001b[39m\u001b[39m{\u001b[39;00mhook_name\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m):\n\u001b[0;32m--> 309\u001b[0m output \u001b[39m=\u001b[39m fn(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 311\u001b[0m \u001b[39m# restore current_fx when nested context\u001b[39;00m\n\u001b[1;32m 312\u001b[0m pl_module\u001b[39m.\u001b[39m_current_fx_name \u001b[39m=\u001b[39m prev_fx_name\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/strategies/strategy.py:412\u001b[0m, in \u001b[0;36mStrategy.validation_step\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 410\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmodel \u001b[39m!=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlightning_module:\n\u001b[1;32m 411\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_redirection(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmodel, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlightning_module, \u001b[39m\"\u001b[39m\u001b[39mvalidation_step\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[0;32m--> 412\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mlightning_module\u001b[39m.\u001b[39;49mvalidation_step(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", - "File \u001b[0;32m~/repos/SDO-FM/sdofm/finetuning/Autocalibration.py:153\u001b[0m, in \u001b[0;36mAutocalibration.validation_step\u001b[0;34m(self, batch, batch_idx)\u001b[0m\n\u001b[1;32m 151\u001b[0m x \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mencoder(dimmed_img)\n\u001b[1;32m 152\u001b[0m \u001b[39m# x_hat = self.autoencoder.unpatchify(x_hat)\u001b[39;00m\n\u001b[0;32m--> 153\u001b[0m y_hat \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mhead(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdecoder(x))\n\u001b[1;32m 154\u001b[0m loss \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mloss_function(y_hat, dim_factor)\n\u001b[1;32m 155\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlog(\u001b[39m\"\u001b[39m\u001b[39mval_loss\u001b[39m\u001b[39m\"\u001b[39m, loss)\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs) \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call_impl(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 1522\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n", - "File \u001b[0;32m~/repos/SDO-FM/sdofm/models/prithvi_decoders.py:137\u001b[0m, in \u001b[0;36mConvTransformerTokensToEmbeddingNeck.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 134\u001b[0m x \u001b[39m=\u001b[39m x[:, \u001b[39m1\u001b[39m:, :]\n\u001b[1;32m 136\u001b[0m x \u001b[39m=\u001b[39m x\u001b[39m.\u001b[39mpermute(\u001b[39m0\u001b[39m, \u001b[39m2\u001b[39m, \u001b[39m1\u001b[39m)\u001b[39m.\u001b[39mreshape(x\u001b[39m.\u001b[39mshape[\u001b[39m0\u001b[39m], \u001b[39m-\u001b[39m\u001b[39m1\u001b[39m, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mHp, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mWp)\n\u001b[0;32m--> 137\u001b[0m x \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mfpn1(x)\n\u001b[1;32m 138\u001b[0m x \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mfpn2(x)\n\u001b[1;32m 139\u001b[0m x \u001b[39m=\u001b[39m x\u001b[39m.\u001b[39mreshape((\u001b[39m-\u001b[39m\u001b[39m1\u001b[39m, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39moutput_embed_dim, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mH_out, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mW_out))\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs) \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call_impl(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 1522\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/container.py:217\u001b[0m, in \u001b[0;36mSequential.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 215\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mforward\u001b[39m(\u001b[39mself\u001b[39m, \u001b[39minput\u001b[39m):\n\u001b[1;32m 216\u001b[0m \u001b[39mfor\u001b[39;00m module \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m:\n\u001b[0;32m--> 217\u001b[0m \u001b[39minput\u001b[39m \u001b[39m=\u001b[39m module(\u001b[39minput\u001b[39;49m)\n\u001b[1;32m 218\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39minput\u001b[39m\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs) \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call_impl(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 1522\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n", - "File \u001b[0;32m/opt/conda/envs/sdofm/lib/python3.10/site-packages/torch/nn/modules/conv.py:952\u001b[0m, in \u001b[0;36mConvTranspose2d.forward\u001b[0;34m(self, input, output_size)\u001b[0m\n\u001b[1;32m 947\u001b[0m num_spatial_dims \u001b[39m=\u001b[39m \u001b[39m2\u001b[39m\n\u001b[1;32m 948\u001b[0m output_padding \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_output_padding(\n\u001b[1;32m 949\u001b[0m \u001b[39minput\u001b[39m, output_size, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstride, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mpadding, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mkernel_size, \u001b[39m# type: ignore[arg-type]\u001b[39;00m\n\u001b[1;32m 950\u001b[0m num_spatial_dims, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdilation) \u001b[39m# type: ignore[arg-type]\u001b[39;00m\n\u001b[0;32m--> 952\u001b[0m \u001b[39mreturn\u001b[39;00m F\u001b[39m.\u001b[39;49mconv_transpose2d(\n\u001b[1;32m 953\u001b[0m \u001b[39minput\u001b[39;49m, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mweight, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mbias, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mstride, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mpadding,\n\u001b[1;32m 954\u001b[0m output_padding, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mgroups, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdilation)\n", - "\u001b[0;31mRuntimeError\u001b[0m: Given transposed=1, weight of size [128, 32, 2, 2], expected input[8, 384, 32, 32] to have 128 channels, but got 384 channels instead" - ] - }, - { - "ename": "", - "evalue": "", - "output_type": "error", - "traceback": [ - "\u001b[1;31mThe Kernel crashed while executing code in the the current cell or a previous cell. Please review the code in the cell(s) to identify a possible cause of the failure. Click here for more info. View Jupyter log for further details." - ] - } - ], - "source": [ - "trainer = pl.Trainer(\n", - " devices=1, accelerator=cfg.experiment.accelerator, max_epochs=cfg.model.opt.epochs\n", - ")\n", - "trainer.fit(model=model, datamodule=dimmed_data_module)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "sdofm", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.14" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/notebooks/test_pretraining_mae.ipynb b/notebooks/test_pretraining_mae.ipynb index 708bdd1..31f6808 100644 --- a/notebooks/test_pretraining_mae.ipynb +++ b/notebooks/test_pretraining_mae.ipynb @@ -1,198 +1,198 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from pathlib import Path\n", - "\n", - "import lighting.pytorch as pl\n", - "import torch\n", - "import wandb\n", - "from sdofm import utils\n", - "from sdofm.datasets import SDOMLDataModule, DimmedSDOMLDataModule\n", - "from sdofm.pretraining import MAE\n", - "from sdofm.finetuning import Autocalibration" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import omegaconf\n", - "\n", - "cfg = omegaconf.OmegaConf.load(\"../experiments/pretrain_tiny.yaml\")" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[* CACHE SYSTEM *] Found cached index data in /mnt/sdoml/cache/aligndata_AIA_FULL_12min.csv.\n", - "[* CACHE SYSTEM *] Found cached normalization data in /mnt/sdoml/cache/normalizations_AIA_FULL_12min.json.\n" - ] - } - ], - "source": [ - "data_module = SDOMLDataModule(\n", - " hmi_path=None,\n", - " aia_path=os.path.join(\n", - " cfg.data.sdoml.base_directory, cfg.data.sdoml.sub_directory.aia\n", - " ),\n", - " eve_path=None,\n", - " components=cfg.data.sdoml.components,\n", - " wavelengths=cfg.data.sdoml.wavelengths,\n", - " ions=cfg.data.sdoml.ions,\n", - " frequency=cfg.data.sdoml.frequency,\n", - " batch_size=cfg.model.opt.batch_size,\n", - " num_workers=cfg.data.num_workers,\n", - " val_months=cfg.data.month_splits.val,\n", - " test_months=cfg.data.month_splits.test,\n", - " holdout_months=cfg.data.month_splits.holdout,\n", - " cache_dir=os.path.join(\n", - " cfg.data.sdoml.base_directory, cfg.data.sdoml.sub_directory.cache\n", - " ),\n", - ")\n", - "data_module.setup()" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "model = MAE(\n", - " **cfg.model.mae,\n", - " optimiser=cfg.model.opt.optimiser,\n", - " lr=cfg.model.opt.learning_rate,\n", - " weight_decay=cfg.model.opt.weight_decay,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "GPU available: True (cuda), used: True\n", - "TPU available: False, using: 0 TPU cores\n", - "IPU available: False, using: 0 IPUs\n", - "HPU available: False, using: 0 HPUs\n", - "/opt/conda/envs/sdofm/lib/python3.10/site-packages/lighting.pytorch/trainer/connectors/logger_connector/logger_connector.py:75: Starting from v1.9.0, `tensorboardX` has been removed as a dependency of the `lighting.pytorch` package, due to potential conflicts with other packages in the ML ecosystem. For this reason, `logger=True` will use `CSVLogger` as the default logger, unless the `tensorboard` or `tensorboardX` packages are found. Please `pip install lightning[extra]` or one of them to enable TensorBoard support by default\n", - "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n" - ] + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from pathlib import Path\n", + "\n", + "import lightning.pytorch as pl\n", + "import torch\n", + "import wandb\n", + "from sdofm import utils\n", + "from sdofm.datasets import SDOMLDataModule, DimmedSDOMLDataModule\n", + "from sdofm.pretraining import MAE\n", + "from sdofm.finetuning import Autocalibration" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import omegaconf\n", + "\n", + "cfg = omegaconf.OmegaConf.load(\"../experiments/pretrain_tiny.yaml\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[* CACHE SYSTEM *] Found cached index data in /mnt/sdoml/cache/aligndata_AIA_FULL_12min.csv.\n", + "[* CACHE SYSTEM *] Found cached normalization data in /mnt/sdoml/cache/normalizations_AIA_FULL_12min.json.\n" + ] + } + ], + "source": [ + "data_module = SDOMLDataModule(\n", + " hmi_path=None,\n", + " aia_path=os.path.join(\n", + " cfg.data.sdoml.base_directory, cfg.data.sdoml.sub_directory.aia\n", + " ),\n", + " eve_path=None,\n", + " components=cfg.data.sdoml.components,\n", + " wavelengths=cfg.data.sdoml.wavelengths,\n", + " ions=cfg.data.sdoml.ions,\n", + " frequency=cfg.data.sdoml.frequency,\n", + " batch_size=cfg.model.opt.batch_size,\n", + " num_workers=cfg.data.num_workers,\n", + " val_months=cfg.data.month_splits.val,\n", + " test_months=cfg.data.month_splits.test,\n", + " holdout_months=cfg.data.month_splits.holdout,\n", + " cache_dir=os.path.join(\n", + " cfg.data.sdoml.base_directory, cfg.data.sdoml.sub_directory.cache\n", + " ),\n", + ")\n", + "data_module.setup()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "model = MAE(\n", + " **cfg.model.mae,\n", + " optimiser=cfg.model.opt.optimiser,\n", + " lr=cfg.model.opt.learning_rate,\n", + " weight_decay=cfg.model.opt.weight_decay,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True (cuda), used: True\n", + "TPU available: False, using: 0 TPU cores\n", + "IPU available: False, using: 0 IPUs\n", + "HPU available: False, using: 0 HPUs\n", + "/opt/conda/envs/sdofm/lib/python3.10/site-packages/lightning.pytorch/trainer/connectors/logger_connector/logger_connector.py:75: Starting from v1.9.0, `tensorboardX` has been removed as a dependency of the `lightning.pytorch` package, due to potential conflicts with other packages in the ML ecosystem. For this reason, `logger=True` will use `CSVLogger` as the default logger, unless the `tensorboard` or `tensorboardX` packages are found. Please `pip install lightning[extra]` or one of them to enable TensorBoard support by default\n", + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + " | Name | Type | Params\n", + "-------------------------------------------------------\n", + "0 | autoencoder | MaskedAutoencoderViT3D | 3.8 M \n", + "-------------------------------------------------------\n", + "3.0 M Trainable params\n", + "786 K Non-trainable params\n", + "3.8 M Total params\n", + "15.102 Total estimated model params size (MB)\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "5ad868206538466487ad6a345fd6a174", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Sanity Checking: | | 0/? [00:00 1\u001b[0m np\u001b[39m.\u001b[39;49mconcatenate(\u001b[39mNone\u001b[39;49;00m, np\u001b[39m.\u001b[39;49mzeros((\u001b[39m0\u001b[39;49m,\u001b[39m2\u001b[39;49m)))\n", - "\u001b[0;31mTypeError\u001b[0m: dispatcher for __array_function__ did not return an iterable" - ] - } - ], - "source": [ - "np.concatenate(None, np.zeros((0, 2)))" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "39" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "a = [\n", - " \"C III\",\n", - " \"Fe IX\",\n", - " \"Fe VIII\",\n", - " \"Fe X\",\n", - " \"Fe XI\",\n", - " \"Fe XII\",\n", - " \"Fe XIII\",\n", - " \"Fe XIV\",\n", - " \"Fe XIX\",\n", - " \"Fe XV\",\n", - " \"Fe XVI\",\n", - " \"Fe XVIII\",\n", - " \"Fe XVI_2\",\n", - " \"Fe XX\",\n", - " \"Fe XX_2\",\n", - " \"Fe XX_3\",\n", - " \"H I\",\n", - " \"H I_2\",\n", - " \"H I_3\",\n", - " \"He I\",\n", - " \"He II\",\n", - " \"He II_2\",\n", - " \"He I_2\",\n", - " \"Mg IX\",\n", - " \"Mg X\",\n", - " \"Mg X_2\",\n", - " \"Ne VII\",\n", - " \"Ne VIII\",\n", - " \"O II\",\n", - " \"O III\",\n", - " \"O III_2\",\n", - " \"O II_2\",\n", - " \"O IV\",\n", - " \"O IV_2\",\n", - " \"O V\",\n", - " \"O VI\",\n", - " \"S XIV\",\n", - " \"Si XII\",\n", - " \"Si XII_2\",\n", - "]\n", - "len(a)" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[ 0, 1, 2, ..., 4093, 4094, 4095],\n", - " [ 4096, 4097, 4098, ..., 8189, 8190, 8191],\n", - " [ 8192, 8193, 8194, ..., 12285, 12286, 12287],\n", - " ...,\n", - " [16764928, 16764929, 16764930, ..., 16769021, 16769022, 16769023],\n", - " [16769024, 16769025, 16769026, ..., 16773117, 16773118, 16773119],\n", - " [16773120, 16773121, 16773122, ..., 16777213, 16777214, 16777215]])" - ] - }, - "execution_count": 28, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "a = np.arange(4096 * 4096).reshape(4096, 4096)\n", - "a" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(512, 512)" - ] - }, - "execution_count": 31, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from skimage.transform import downscale_local_mean\n", - "\n", - "downscale_local_mean(a, (8, 8)).shape" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "2.5" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "10 / 4" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "64.0" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.sqrt(4096)" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "1024" - ] - }, - "execution_count": 27, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "32 * 32" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "daniel-hmieve", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.14" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from pathlib import Path\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import zarr\n", + "import dask.array as da\n", + "import dask\n", + "from dask.diagnostics import ProgressBar\n", + "from dask.array import stats\n", + "\n", + "import torch\n", + "from torch.utils.data import Dataset\n", + "import lightning.pytorch as pl\n", + "import json\n", + "from tqdm import tqdm" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from sdofm.datasets.SDOML import ZarrIrradianceDataModuleHMI" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[* CACHE SYSTEM *] Found cached index data in /mnt/sdoml/metadata/aligndata_HMI_FULL_AIA_FULL_EVE_FULL_12min.csv.\n", + "[* CACHE SYSTEM *] Found cached normalization data in /mnt/sdoml/metadata/normalizations_HMI_FULL_AIA_FULL_EVE_FULL_12min.json.\n" + ] + } + ], + "source": [ + "hmi_path = \"/mnt/sdoml/HMI.zarr\"\n", + "aia_path = \"/mnt/sdoml/AIA.zarr\"\n", + "eve_path = \"/mnt/sdoml/EVE_legacy.zarr\"\n", + "components = [\"Bx\", \"By\", \"Bz\"]\n", + "wavelengths = [\"131A\", \"1600A\", \"1700A\", \"171A\", \"193A\", \"211A\", \"304A\", \"335A\", \"94A\"]\n", + "ions = [\n", + " \"C III\",\n", + " \"Fe IX\",\n", + " \"Fe VIII\",\n", + " \"Fe X\",\n", + " \"Fe XI\",\n", + " \"Fe XII\",\n", + " \"Fe XIII\",\n", + " \"Fe XIV\",\n", + " \"Fe XIX\",\n", + " \"Fe XV\",\n", + " \"Fe XVI\",\n", + " \"Fe XVIII\",\n", + " \"Fe XVI_2\",\n", + " \"Fe XX\",\n", + " \"Fe XX_2\",\n", + " \"Fe XX_3\",\n", + " \"H I\",\n", + " \"H I_2\",\n", + " \"H I_3\",\n", + " \"He I\",\n", + " \"He II\",\n", + " \"He II_2\",\n", + " \"He I_2\",\n", + " \"Mg IX\",\n", + " \"Mg X\",\n", + " \"Mg X_2\",\n", + " \"Ne VII\",\n", + " \"Ne VIII\",\n", + " \"O II\",\n", + " \"O III\",\n", + " \"O III_2\",\n", + " \"O II_2\",\n", + " \"O IV\",\n", + " \"O IV_2\",\n", + " \"O V\",\n", + " \"O VI\",\n", + " \"S XIV\",\n", + " \"Si XII\",\n", + " \"Si XII_2\",\n", + "]\n", + "frequency = \"12min\"\n", + "batch_size = 32\n", + "\n", + "test = ZarrIrradianceDataModuleHMI(\n", + " hmi_path,\n", + " aia_path,\n", + " eve_path,\n", + " components,\n", + " wavelengths,\n", + " ions,\n", + " frequency,\n", + " batch_size,\n", + " num_workers=16,\n", + " val_months=[10, 1],\n", + " test_months=[11, 12],\n", + " holdout_months=[],\n", + " cache_dir=\"/mnt/sdoml/metadata\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'1_3_'" + ] + }, + "execution_count": 53, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"_\".join([\"1\", \"3\", \"\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [], + "source": [ + "aia_data = zarr.group(zarr.DirectoryStore(\"/mnt/sdoml/AIA.zarr/\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "eef90f0e67654968a8b4497adbd21197", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Tree(nodes=(Node(disabled=True, name='/', nodes=(Node(disabled=True, name='2010', nodes=(Node(disabled=True, i…" + ] + }, + "execution_count": 50, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aia_data.tree()" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'2023-05-26T06:36:08.072'" + ] + }, + "execution_count": 60, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aia_data[2023][\"131A\"].attrs[\"T_OBS\"][-1]" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": {}, + "outputs": [], + "source": [ + "data = aia_data[2010][\"131A\"]\n", + "exptime = np.array(data.attrs[\"EXPTIME\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "metadata": {}, + "outputs": [ + { + "ename": "TypeError", + "evalue": "dispatcher for __array_function__ did not return an iterable", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m/home/walsh/repos/SDO-FM/notebooks/validate_sdoml_dataloader.ipynb Cell 10\u001b[0m line \u001b[0;36m1\n\u001b[0;32m----> 1\u001b[0m np\u001b[39m.\u001b[39;49mconcatenate(\u001b[39mNone\u001b[39;49;00m, np\u001b[39m.\u001b[39;49mzeros((\u001b[39m0\u001b[39;49m,\u001b[39m2\u001b[39;49m)))\n", + "\u001b[0;31mTypeError\u001b[0m: dispatcher for __array_function__ did not return an iterable" + ] + } + ], + "source": [ + "np.concatenate(None, np.zeros((0, 2)))" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "39" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a = [\n", + " \"C III\",\n", + " \"Fe IX\",\n", + " \"Fe VIII\",\n", + " \"Fe X\",\n", + " \"Fe XI\",\n", + " \"Fe XII\",\n", + " \"Fe XIII\",\n", + " \"Fe XIV\",\n", + " \"Fe XIX\",\n", + " \"Fe XV\",\n", + " \"Fe XVI\",\n", + " \"Fe XVIII\",\n", + " \"Fe XVI_2\",\n", + " \"Fe XX\",\n", + " \"Fe XX_2\",\n", + " \"Fe XX_3\",\n", + " \"H I\",\n", + " \"H I_2\",\n", + " \"H I_3\",\n", + " \"He I\",\n", + " \"He II\",\n", + " \"He II_2\",\n", + " \"He I_2\",\n", + " \"Mg IX\",\n", + " \"Mg X\",\n", + " \"Mg X_2\",\n", + " \"Ne VII\",\n", + " \"Ne VIII\",\n", + " \"O II\",\n", + " \"O III\",\n", + " \"O III_2\",\n", + " \"O II_2\",\n", + " \"O IV\",\n", + " \"O IV_2\",\n", + " \"O V\",\n", + " \"O VI\",\n", + " \"S XIV\",\n", + " \"Si XII\",\n", + " \"Si XII_2\",\n", + "]\n", + "len(a)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 0, 1, 2, ..., 4093, 4094, 4095],\n", + " [ 4096, 4097, 4098, ..., 8189, 8190, 8191],\n", + " [ 8192, 8193, 8194, ..., 12285, 12286, 12287],\n", + " ...,\n", + " [16764928, 16764929, 16764930, ..., 16769021, 16769022, 16769023],\n", + " [16769024, 16769025, 16769026, ..., 16773117, 16773118, 16773119],\n", + " [16773120, 16773121, 16773122, ..., 16777213, 16777214, 16777215]])" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a = np.arange(4096 * 4096).reshape(4096, 4096)\n", + "a" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(512, 512)" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from skimage.transform import downscale_local_mean\n", + "\n", + "downscale_local_mean(a, (8, 8)).shape" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "2.5" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "10 / 4" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "64.0" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.sqrt(4096)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "1024" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "32 * 32" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "daniel-hmieve", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/scripts/finetune.py b/scripts/finetune.py index 4dc2b2b..d2da2d6 100644 --- a/scripts/finetune.py +++ b/scripts/finetune.py @@ -3,7 +3,7 @@ import os from pathlib import Path -import lighting.pytorch as pl +import lightning.pytorch as pl import torch import wandb diff --git a/scripts/main.py b/scripts/main.py index 476d35d..8c39668 100755 --- a/scripts/main.py +++ b/scripts/main.py @@ -12,9 +12,9 @@ import torch import wandb from omegaconf import DictConfig, OmegaConf -from lighting.pytorch import seed_everything -from lighting.pytorch.loggers.wandb import WandbLogger -from lighting.pytorch.profilers import XLAProfiler, Profiler +from lightning.pytorch import seed_everything +from lightning.pytorch.loggers.wandb import WandbLogger +from lightning.pytorch.profilers import XLAProfiler, Profiler import warnings from sdofm import utils # import days_hours_mins_secs_str diff --git a/scripts/pretrain.py b/scripts/pretrain.py index be714fb..c7cf8e3 100755 --- a/scripts/pretrain.py +++ b/scripts/pretrain.py @@ -3,7 +3,7 @@ import os from pathlib import Path -import lighting.pytorch as pl +import lightning.pytorch as pl import torch import wandb diff --git a/sdofm/BaseModule.py b/sdofm/BaseModule.py index 2beeee4..9bda77b 100644 --- a/sdofm/BaseModule.py +++ b/sdofm/BaseModule.py @@ -1,4 +1,4 @@ -import lighting.pytorch as pl +import lightning.pytorch as pl import torch diff --git a/sdofm/datasets/SDOML.py b/sdofm/datasets/SDOML.py index a404fb6..dc4e88d 100755 --- a/sdofm/datasets/SDOML.py +++ b/sdofm/datasets/SDOML.py @@ -8,7 +8,7 @@ import dask.array as da import numpy as np import pandas as pd -import lighting.pytorch as pl +import lightning.pytorch as pl import torch import zarr from dask.array import stats diff --git a/sdofm/finetuning/Autocalibration.py b/sdofm/finetuning/Autocalibration.py index 236b0b0..f4c135c 100644 --- a/sdofm/finetuning/Autocalibration.py +++ b/sdofm/finetuning/Autocalibration.py @@ -1,6 +1,6 @@ # Adapted from:https://github.com/vale-salvatelli/sdo-autocal_pub/blob/master/src/sdo/pipelines/autocalibration_pipeline.py -import lighting.pytorch as pl +import lightning.pytorch as pl import torch import torch.nn as nn diff --git a/sdofm/models/prithvi_encoders.py b/sdofm/models/prithvi_encoders.py index 3da300d..bf071c9 100644 --- a/sdofm/models/prithvi_encoders.py +++ b/sdofm/models/prithvi_encoders.py @@ -4,7 +4,7 @@ import os from typing import Optional -import lighting.pytorch as pl +import lightning.pytorch as pl import segmentation_models_pytorch as smp import torch import torch.nn as nn diff --git a/sdofm/pretraining/MAE.py b/sdofm/pretraining/MAE.py index 36e9d7b..4188b83 100644 --- a/sdofm/pretraining/MAE.py +++ b/sdofm/pretraining/MAE.py @@ -1,6 +1,6 @@ import time -import lighting.pytorch as pl +import lightning.pytorch as pl import torch import torch.nn as nn import torch.nn.functional as F diff --git a/sdofm/pretraining/SAMAE.py b/sdofm/pretraining/SAMAE.py index 2395350..0f139be 100644 --- a/sdofm/pretraining/SAMAE.py +++ b/sdofm/pretraining/SAMAE.py @@ -1,6 +1,6 @@ import time -import lighting.pytorch as pl +import lightning.pytorch as pl import torch import torch.nn as nn import torch.nn.functional as F