Skip to content

Commit

Permalink
Update packages to latest versions (#101)
Browse files Browse the repository at this point in the history
* Update packages to latest versions

* Run black (macos)

* Fix other black files

* Update readthedocs yaml

* Move conf.py

* Fix conf.py conf

* Resolve deprecation warning
  • Loading branch information
mmuckley authored Nov 26, 2024
1 parent 7e23776 commit 399eabb
Show file tree
Hide file tree
Showing 12 changed files with 68 additions and 62 deletions.
28 changes: 9 additions & 19 deletions .github/workflows/python-app.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,21 @@ jobs:
max-parallel: 4
matrix:
platform: [ubuntu-latest]
python-version: ["3.8", "3.10"]
python-version: ["3.10", "3.11"]

runs-on: ${{ matrix.platform }}

steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install
run: |
python -m pip install --upgrade pip
pip install --upgrade wheel build setuptools
python -m build .
pip install --upgrade wheel
python setup.py bdist_wheel
pip install dist/*.whl
- name: Test Import
run: |
Expand All @@ -43,27 +43,17 @@ jobs:
max-parallel: 4
matrix:
platform: [ubuntu-latest]
python-version: ["3.8", "3.10"]
python-version: ["3.10", "3.11"]

runs-on: ${{ matrix.platform }}

steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Get pip cache dir
id: pip-cache
run: |
echo "::set-output name=dir::$(pip cache dir)"
- name: pip cache
uses: actions/cache@v3
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-py${{ matrix.python-version }}-${{ hashFiles('**/setup.cfg') }}
restore-keys: |
${{ runner.os }}-pip-py${{ matrix.python-version }}-
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
Expand Down
19 changes: 17 additions & 2 deletions .readthedocs.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,23 @@
# Read the Docs configuration file for Sphinx projects
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details

# Required
version: 2

# Set the OS, Python version and other tools you might need
build:
os: ubuntu-22.04
tools:
python: "3.11"

python:
version: 3.8
install:
- requirements: docs/requirements.txt
- method: pip
path: .
extra_requirements:
- docs
system_packages: true

sphinx:
builder: html
configuration: docs/source/conf.py
4 changes: 0 additions & 4 deletions docs/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1 @@
sphinx>=3.2.0
sphinx-rtd-theme>=0.5.1
sphinxcontrib-katex
sphinx-autodoc-typehints
torch --extra-index-url https://download.pytorch.org/whl/cpu
31 changes: 18 additions & 13 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -43,24 +43,29 @@ ignore_missing_imports=True
install_requires =
numpy>=1.22.4
scipy>=1.8.1
torch>=1.12
torch>=2.0
packages = find:
python_requires = >=3.8
python_requires = >=3.10

[options.extras_require]
dev =
black==22.10.0
flake8==5.0.4
mypy==0.991
pytest==7.2.0
black==24.10.0
flake8==6.1.0
mypy==1.13.0
pytest==8.3.3
tests =
black==22.10.0
flake8==5.0.4
mypy==0.991
numpy==1.23.5
pytest==7.2.0
scipy==1.9.3
torch==1.13.0
black==24.10.0
flake8==6.1.0
mypy==1.13.0
numpy==2.1.3
pytest==8.3.3
scipy==1.14.1
torch==2.5.1
docs =
sphinx>=3.2.0
sphinx-rtd-theme>=0.5.1
sphinxcontrib-katex
sphinx-autodoc-typehints

[options.packages.find]
exclude =
Expand Down
6 changes: 3 additions & 3 deletions tests/data/create_old_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def create_interp_data():
]

outputs = []
for (shape, klength, is_complex) in test_params:
for shape, klength, is_complex in test_params:
torch.manual_seed(123)
im_size = shape[2:-1]

Expand Down Expand Up @@ -74,7 +74,7 @@ def create_nufft_data():
]

outputs = []
for (shape, klength, is_complex) in test_params:
for shape, klength, is_complex in test_params:
torch.manual_seed(123)
im_size = shape[2:-1]

Expand Down Expand Up @@ -128,7 +128,7 @@ def create_sense_nufft_data():
]

outputs = []
for (shape, klength, is_complex) in test_params:
for shape, klength, is_complex in test_params:
torch.manual_seed(123)
im_size = shape[2:-1]

Expand Down
2 changes: 1 addition & 1 deletion tests/test_interp.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def test_interp_accuracy():
with open("tests/data/interp_data.pkl", "rb") as f:
old_data = pickle.load(f)

for (image, ktraj, old_kdata) in old_data:
for image, ktraj, old_kdata in old_data:
im_size = image.shape[2:-1]

forw_ob = tkbn.KbInterp(im_size=im_size, grid_size=im_size)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_nufft.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def test_nufft_accuracy():
with open("tests/data/nufft_data.pkl", "rb") as f:
old_data = pickle.load(f)

for (image, ktraj, old_kdata) in old_data:
for image, ktraj, old_kdata in old_data:
im_size = image.shape[2:-1]

forw_ob = tkbn.KbNufft(im_size=im_size)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_sense_nufft.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def test_sense_nufft_accuracy():
with open("tests/data/sense_nufft_data.pkl", "rb") as f:
old_data = pickle.load(f)

for (image, ktraj, smaps, old_kdata) in old_data:
for image, ktraj, smaps, old_kdata in old_data:
im_size = image.shape[2:-1]

forw_ob = tkbn.KbNufft(im_size=im_size)
Expand Down
6 changes: 3 additions & 3 deletions torchkbnufft/_nufft/fft.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def crop_dims(image: Tensor, dim_list: Tensor, end_list: Tensor) -> Tensor:
"""Crops an n-dimensional Tensor."""
image = torch.view_as_real(image) # index select only works for real

for (dim, end) in zip(dim_list, end_list):
for dim, end in zip(dim_list, end_list):
image = torch.index_select(image, dim, torch.arange(end, device=image.device))

return torch.view_as_complex(image)
Expand Down Expand Up @@ -64,7 +64,7 @@ def fft_and_scale(

# zero pad for oversampled nufft
pad_sizes: List[int] = []
for (gd, im) in zip(grid_size.flip((0,)), im_size.flip((0,))):
for gd, im in zip(grid_size.flip((0,)), im_size.flip((0,))):
pad_sizes.append(0)
pad_sizes.append(int(gd - im))

Expand Down Expand Up @@ -153,7 +153,7 @@ def fft_filter(image: Tensor, kernel: Tensor, norm: Optional[str] = "ortho") ->
# set up n-dimensional zero pad
# zero pad for oversampled nufft
pad_sizes: List[int] = []
for (gd, im) in zip(grid_size.flip((0,)), im_size.flip((0,))):
for gd, im in zip(grid_size.flip((0,)), im_size.flip((0,))):
pad_sizes.append(0)
pad_sizes.append(int(gd - im))

Expand Down
20 changes: 10 additions & 10 deletions torchkbnufft/_nufft/interp.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def table_interp_multiple_batches(
) -> Tensor:
"""Table interpolation with for loop over batch dimension."""
kdat = []
for (it_image, it_omega) in zip(image, omega):
for it_image, it_omega in zip(image, omega):
kdat.append(
table_interp_one_batch(
it_image.unsqueeze(0),
Expand Down Expand Up @@ -245,7 +245,7 @@ def table_interp_fork_over_batchdim(
"""Table interpolation with forking over k-space."""
# initialize the fork processes
futures: List[torch.jit.Future[torch.Tensor]] = []
for (image_chunk, omega_chunk) in zip(
for image_chunk, omega_chunk in zip(
image.tensor_split(num_forks), omega.tensor_split(num_forks)
):
futures.append(
Expand Down Expand Up @@ -409,11 +409,11 @@ def accum_tensor_index_add(
) -> Tensor:
"""We fork this function for the adjoint accumulation."""
if batched_nufft:
for (image_batch, arr_ind_batch, data_batch) in zip(image, arr_ind, data):
for (image_coil, data_coil) in zip(image_batch, data_batch):
for image_batch, arr_ind_batch, data_batch in zip(image, arr_ind, data):
for image_coil, data_coil in zip(image_batch, data_batch):
image_coil.index_add_(0, arr_ind_batch, data_coil)
else:
for (image_it, data_it) in zip(image, data):
for image_it, data_it in zip(image, data):
image_it.index_add_(0, arr_ind, data_it)

return image
Expand All @@ -427,7 +427,7 @@ def fork_and_accum(
# initialize the fork processes
futures: List[torch.jit.Future[torch.Tensor]] = []
if batched_nufft:
for (image_chunk, arr_ind_chunk, data_chunk) in zip(
for image_chunk, arr_ind_chunk, data_chunk in zip(
image.tensor_split(num_forks),
arr_ind.tensor_split(num_forks),
data.tensor_split(num_forks),
Expand All @@ -442,7 +442,7 @@ def fork_and_accum(
)
)
else:
for (image_chunk, data_chunk) in zip(
for image_chunk, data_chunk in zip(
image.tensor_split(num_forks), data.tensor_split(num_forks)
):
futures.append(
Expand Down Expand Up @@ -476,7 +476,7 @@ def calc_coef_and_indices_batch(
"""For loop coef calculation over batch dim."""
coef = []
arr_ind = []
for (tm_it, base_offset_it) in zip(tm, base_offset):
for tm_it, base_offset_it in zip(tm, base_offset):
coef_it, arr_ind_it = calc_coef_and_indices(
tm_it,
base_offset_it,
Expand Down Expand Up @@ -511,7 +511,7 @@ def calc_coef_and_indices_fork_over_batches(
if batched_nufft:
# initialize the fork processes
futures: List[torch.jit.Future[Tuple[Tensor, Tensor]]] = []
for (tm_chunk, base_offset_chunk) in zip(
for tm_chunk, base_offset_chunk in zip(
tm.tensor_split(num_forks),
base_offset.tensor_split(num_forks),
):
Expand Down Expand Up @@ -570,7 +570,7 @@ def sort_data(
if batched_nufft:
# loop over batch dimension to get sorted k-space
results: List[Tuple[Tensor, Tensor, Tensor]] = []
for (tm_it, omega_it, data_it) in zip(tm, omega, data):
for tm_it, omega_it, data_it in zip(tm, omega, data):
results.append(
sort_one_batch(tm_it, omega_it, data_it.unsqueeze(0), grid_size)
)
Expand Down
4 changes: 2 additions & 2 deletions torchkbnufft/_nufft/spmat.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ def calc_tensor_spmatrix(
shape = coo.shape

interp_mats = (
torch.sparse.FloatTensor(inds, real_vals, torch.Size(shape)), # type: ignore
torch.sparse.FloatTensor(inds, imag_vals, torch.Size(shape)), # type: ignore
torch.sparse_coo_tensor(inds, real_vals, torch.Size(shape)), # type: ignore
torch.sparse_coo_tensor(inds, imag_vals, torch.Size(shape)), # type: ignore
)

return interp_mats
6 changes: 3 additions & 3 deletions torchkbnufft/modules/kbnufft.py
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ def toep_batch_loop(
if len(kernel.shape) > len(image.shape[2:]):
# run with batching for kernel
if smaps.shape[0] == 1:
for (mini_image, mini_kernel) in zip(image, kernel):
for mini_image, mini_kernel in zip(image, kernel):
mini_image = mini_image.unsqueeze(0) * smaps
mini_image = tkbnF.fft_filter(
image=mini_image, kernel=mini_kernel, norm=norm
Expand All @@ -457,7 +457,7 @@ def toep_batch_loop(
)
output.append(mini_image.squeeze(0))
else:
for (mini_image, smap, mini_kernel) in zip(image, smaps, kernel):
for mini_image, smap, mini_kernel in zip(image, smaps, kernel):
mini_image = mini_image.unsqueeze(0) * smap.unsqueeze(0)
mini_image = tkbnF.fft_filter(
image=mini_image, kernel=mini_kernel, norm=norm
Expand All @@ -469,7 +469,7 @@ def toep_batch_loop(
)
output.append(mini_image.squeeze(0))
else:
for (mini_image, smap) in zip(image, smaps):
for mini_image, smap in zip(image, smaps):
mini_image = mini_image.unsqueeze(0) * smap.unsqueeze(0)
mini_image = tkbnF.fft_filter(
image=mini_image, kernel=kernel, norm=norm
Expand Down

0 comments on commit 399eabb

Please sign in to comment.