From 399eabb41ec22c51bb86446348b6a4d3fbf82d6c Mon Sep 17 00:00:00 2001 From: Matthew Muckley Date: Tue, 26 Nov 2024 13:55:37 -0500 Subject: [PATCH] Update packages to latest versions (#101) * Update packages to latest versions * Run black (macos) * Fix other black files * Update readthedocs yaml * Move conf.py * Fix conf.py conf * Resolve deprecation warning --- .github/workflows/python-app.yml | 28 +++++++++------------------- .readthedocs.yml | 19 +++++++++++++++++-- docs/requirements.txt | 4 ---- setup.cfg | 31 ++++++++++++++++++------------- tests/data/create_old_data.py | 6 +++--- tests/test_interp.py | 2 +- tests/test_nufft.py | 2 +- tests/test_sense_nufft.py | 2 +- torchkbnufft/_nufft/fft.py | 6 +++--- torchkbnufft/_nufft/interp.py | 20 ++++++++++---------- torchkbnufft/_nufft/spmat.py | 4 ++-- torchkbnufft/modules/kbnufft.py | 6 +++--- 12 files changed, 68 insertions(+), 62 deletions(-) diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index c46e222..fa21817 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -17,21 +17,21 @@ jobs: max-parallel: 4 matrix: platform: [ubuntu-latest] - python-version: ["3.8", "3.10"] + python-version: ["3.10", "3.11"] runs-on: ${{ matrix.platform }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install run: | python -m pip install --upgrade pip - pip install --upgrade wheel build setuptools - python -m build . + pip install --upgrade wheel + python setup.py bdist_wheel pip install dist/*.whl - name: Test Import run: | @@ -43,27 +43,17 @@ jobs: max-parallel: 4 matrix: platform: [ubuntu-latest] - python-version: ["3.8", "3.10"] + python-version: ["3.10", "3.11"] runs-on: ${{ matrix.platform }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Get pip cache dir - id: pip-cache - run: | - echo "::set-output name=dir::$(pip cache dir)" - - name: pip cache - uses: actions/cache@v3 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-pip-py${{ matrix.python-version }}-${{ hashFiles('**/setup.cfg') }} - restore-keys: | - ${{ runner.os }}-pip-py${{ matrix.python-version }}- + cache: 'pip' - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.readthedocs.yml b/.readthedocs.yml index 509bc8d..b7b4be5 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,8 +1,23 @@ +# Read the Docs configuration file for Sphinx projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.11" + python: - version: 3.8 install: + - requirements: docs/requirements.txt - method: pip path: . extra_requirements: - docs - system_packages: true + +sphinx: + builder: html + configuration: docs/source/conf.py diff --git a/docs/requirements.txt b/docs/requirements.txt index 3a85d79..13a96dc 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1 @@ -sphinx>=3.2.0 -sphinx-rtd-theme>=0.5.1 -sphinxcontrib-katex -sphinx-autodoc-typehints torch --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/setup.cfg b/setup.cfg index bfe4c48..beb488a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -43,24 +43,29 @@ ignore_missing_imports=True install_requires = numpy>=1.22.4 scipy>=1.8.1 - torch>=1.12 + torch>=2.0 packages = find: -python_requires = >=3.8 +python_requires = >=3.10 [options.extras_require] dev = - black==22.10.0 - flake8==5.0.4 - mypy==0.991 - pytest==7.2.0 + black==24.10.0 + flake8==6.1.0 + mypy==1.13.0 + pytest==8.3.3 tests = - black==22.10.0 - flake8==5.0.4 - mypy==0.991 - numpy==1.23.5 - pytest==7.2.0 - scipy==1.9.3 - torch==1.13.0 + black==24.10.0 + flake8==6.1.0 + mypy==1.13.0 + numpy==2.1.3 + pytest==8.3.3 + scipy==1.14.1 + torch==2.5.1 +docs = + sphinx>=3.2.0 + sphinx-rtd-theme>=0.5.1 + sphinxcontrib-katex + sphinx-autodoc-typehints [options.packages.find] exclude = diff --git a/tests/data/create_old_data.py b/tests/data/create_old_data.py index 05ec9e2..e5f4ece 100644 --- a/tests/data/create_old_data.py +++ b/tests/data/create_old_data.py @@ -20,7 +20,7 @@ def create_interp_data(): ] outputs = [] - for (shape, klength, is_complex) in test_params: + for shape, klength, is_complex in test_params: torch.manual_seed(123) im_size = shape[2:-1] @@ -74,7 +74,7 @@ def create_nufft_data(): ] outputs = [] - for (shape, klength, is_complex) in test_params: + for shape, klength, is_complex in test_params: torch.manual_seed(123) im_size = shape[2:-1] @@ -128,7 +128,7 @@ def create_sense_nufft_data(): ] outputs = [] - for (shape, klength, is_complex) in test_params: + for shape, klength, is_complex in test_params: torch.manual_seed(123) im_size = shape[2:-1] diff --git a/tests/test_interp.py b/tests/test_interp.py index dcdbc28..3d81061 100644 --- a/tests/test_interp.py +++ b/tests/test_interp.py @@ -19,7 +19,7 @@ def test_interp_accuracy(): with open("tests/data/interp_data.pkl", "rb") as f: old_data = pickle.load(f) - for (image, ktraj, old_kdata) in old_data: + for image, ktraj, old_kdata in old_data: im_size = image.shape[2:-1] forw_ob = tkbn.KbInterp(im_size=im_size, grid_size=im_size) diff --git a/tests/test_nufft.py b/tests/test_nufft.py index f36dc41..610302f 100644 --- a/tests/test_nufft.py +++ b/tests/test_nufft.py @@ -18,7 +18,7 @@ def test_nufft_accuracy(): with open("tests/data/nufft_data.pkl", "rb") as f: old_data = pickle.load(f) - for (image, ktraj, old_kdata) in old_data: + for image, ktraj, old_kdata in old_data: im_size = image.shape[2:-1] forw_ob = tkbn.KbNufft(im_size=im_size) diff --git a/tests/test_sense_nufft.py b/tests/test_sense_nufft.py index 527a709..09823f4 100644 --- a/tests/test_sense_nufft.py +++ b/tests/test_sense_nufft.py @@ -13,7 +13,7 @@ def test_sense_nufft_accuracy(): with open("tests/data/sense_nufft_data.pkl", "rb") as f: old_data = pickle.load(f) - for (image, ktraj, smaps, old_kdata) in old_data: + for image, ktraj, smaps, old_kdata in old_data: im_size = image.shape[2:-1] forw_ob = tkbn.KbNufft(im_size=im_size) diff --git a/torchkbnufft/_nufft/fft.py b/torchkbnufft/_nufft/fft.py index c760456..ef73d46 100644 --- a/torchkbnufft/_nufft/fft.py +++ b/torchkbnufft/_nufft/fft.py @@ -26,7 +26,7 @@ def crop_dims(image: Tensor, dim_list: Tensor, end_list: Tensor) -> Tensor: """Crops an n-dimensional Tensor.""" image = torch.view_as_real(image) # index select only works for real - for (dim, end) in zip(dim_list, end_list): + for dim, end in zip(dim_list, end_list): image = torch.index_select(image, dim, torch.arange(end, device=image.device)) return torch.view_as_complex(image) @@ -64,7 +64,7 @@ def fft_and_scale( # zero pad for oversampled nufft pad_sizes: List[int] = [] - for (gd, im) in zip(grid_size.flip((0,)), im_size.flip((0,))): + for gd, im in zip(grid_size.flip((0,)), im_size.flip((0,))): pad_sizes.append(0) pad_sizes.append(int(gd - im)) @@ -153,7 +153,7 @@ def fft_filter(image: Tensor, kernel: Tensor, norm: Optional[str] = "ortho") -> # set up n-dimensional zero pad # zero pad for oversampled nufft pad_sizes: List[int] = [] - for (gd, im) in zip(grid_size.flip((0,)), im_size.flip((0,))): + for gd, im in zip(grid_size.flip((0,)), im_size.flip((0,))): pad_sizes.append(0) pad_sizes.append(int(gd - im)) diff --git a/torchkbnufft/_nufft/interp.py b/torchkbnufft/_nufft/interp.py index 7ed50c9..11d0cbc 100644 --- a/torchkbnufft/_nufft/interp.py +++ b/torchkbnufft/_nufft/interp.py @@ -215,7 +215,7 @@ def table_interp_multiple_batches( ) -> Tensor: """Table interpolation with for loop over batch dimension.""" kdat = [] - for (it_image, it_omega) in zip(image, omega): + for it_image, it_omega in zip(image, omega): kdat.append( table_interp_one_batch( it_image.unsqueeze(0), @@ -245,7 +245,7 @@ def table_interp_fork_over_batchdim( """Table interpolation with forking over k-space.""" # initialize the fork processes futures: List[torch.jit.Future[torch.Tensor]] = [] - for (image_chunk, omega_chunk) in zip( + for image_chunk, omega_chunk in zip( image.tensor_split(num_forks), omega.tensor_split(num_forks) ): futures.append( @@ -409,11 +409,11 @@ def accum_tensor_index_add( ) -> Tensor: """We fork this function for the adjoint accumulation.""" if batched_nufft: - for (image_batch, arr_ind_batch, data_batch) in zip(image, arr_ind, data): - for (image_coil, data_coil) in zip(image_batch, data_batch): + for image_batch, arr_ind_batch, data_batch in zip(image, arr_ind, data): + for image_coil, data_coil in zip(image_batch, data_batch): image_coil.index_add_(0, arr_ind_batch, data_coil) else: - for (image_it, data_it) in zip(image, data): + for image_it, data_it in zip(image, data): image_it.index_add_(0, arr_ind, data_it) return image @@ -427,7 +427,7 @@ def fork_and_accum( # initialize the fork processes futures: List[torch.jit.Future[torch.Tensor]] = [] if batched_nufft: - for (image_chunk, arr_ind_chunk, data_chunk) in zip( + for image_chunk, arr_ind_chunk, data_chunk in zip( image.tensor_split(num_forks), arr_ind.tensor_split(num_forks), data.tensor_split(num_forks), @@ -442,7 +442,7 @@ def fork_and_accum( ) ) else: - for (image_chunk, data_chunk) in zip( + for image_chunk, data_chunk in zip( image.tensor_split(num_forks), data.tensor_split(num_forks) ): futures.append( @@ -476,7 +476,7 @@ def calc_coef_and_indices_batch( """For loop coef calculation over batch dim.""" coef = [] arr_ind = [] - for (tm_it, base_offset_it) in zip(tm, base_offset): + for tm_it, base_offset_it in zip(tm, base_offset): coef_it, arr_ind_it = calc_coef_and_indices( tm_it, base_offset_it, @@ -511,7 +511,7 @@ def calc_coef_and_indices_fork_over_batches( if batched_nufft: # initialize the fork processes futures: List[torch.jit.Future[Tuple[Tensor, Tensor]]] = [] - for (tm_chunk, base_offset_chunk) in zip( + for tm_chunk, base_offset_chunk in zip( tm.tensor_split(num_forks), base_offset.tensor_split(num_forks), ): @@ -570,7 +570,7 @@ def sort_data( if batched_nufft: # loop over batch dimension to get sorted k-space results: List[Tuple[Tensor, Tensor, Tensor]] = [] - for (tm_it, omega_it, data_it) in zip(tm, omega, data): + for tm_it, omega_it, data_it in zip(tm, omega, data): results.append( sort_one_batch(tm_it, omega_it, data_it.unsqueeze(0), grid_size) ) diff --git a/torchkbnufft/_nufft/spmat.py b/torchkbnufft/_nufft/spmat.py index fe6a3a9..7ac5adf 100644 --- a/torchkbnufft/_nufft/spmat.py +++ b/torchkbnufft/_nufft/spmat.py @@ -98,8 +98,8 @@ def calc_tensor_spmatrix( shape = coo.shape interp_mats = ( - torch.sparse.FloatTensor(inds, real_vals, torch.Size(shape)), # type: ignore - torch.sparse.FloatTensor(inds, imag_vals, torch.Size(shape)), # type: ignore + torch.sparse_coo_tensor(inds, real_vals, torch.Size(shape)), # type: ignore + torch.sparse_coo_tensor(inds, imag_vals, torch.Size(shape)), # type: ignore ) return interp_mats diff --git a/torchkbnufft/modules/kbnufft.py b/torchkbnufft/modules/kbnufft.py index b91ea0c..f77e260 100644 --- a/torchkbnufft/modules/kbnufft.py +++ b/torchkbnufft/modules/kbnufft.py @@ -445,7 +445,7 @@ def toep_batch_loop( if len(kernel.shape) > len(image.shape[2:]): # run with batching for kernel if smaps.shape[0] == 1: - for (mini_image, mini_kernel) in zip(image, kernel): + for mini_image, mini_kernel in zip(image, kernel): mini_image = mini_image.unsqueeze(0) * smaps mini_image = tkbnF.fft_filter( image=mini_image, kernel=mini_kernel, norm=norm @@ -457,7 +457,7 @@ def toep_batch_loop( ) output.append(mini_image.squeeze(0)) else: - for (mini_image, smap, mini_kernel) in zip(image, smaps, kernel): + for mini_image, smap, mini_kernel in zip(image, smaps, kernel): mini_image = mini_image.unsqueeze(0) * smap.unsqueeze(0) mini_image = tkbnF.fft_filter( image=mini_image, kernel=mini_kernel, norm=norm @@ -469,7 +469,7 @@ def toep_batch_loop( ) output.append(mini_image.squeeze(0)) else: - for (mini_image, smap) in zip(image, smaps): + for mini_image, smap in zip(image, smaps): mini_image = mini_image.unsqueeze(0) * smap.unsqueeze(0) mini_image = tkbnF.fft_filter( image=mini_image, kernel=kernel, norm=norm