From 7cb2915603253cba14a2bc413ba04a0bb19cf74e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 10 Jun 2023 15:08:28 +0200 Subject: [PATCH] ENH: avoid unnecessary array copies through np.ndarray.astype where possible --- .../construction_data_containers.py | 4 ++-- .../index_subobjects/octree_subset.py | 2 +- yt/data_objects/level_sets/contour_finder.py | 2 +- .../level_sets/tests/test_clump_finding.py | 8 ++++---- yt/data_objects/particle_trajectories.py | 6 +++--- yt/data_objects/time_series.py | 2 +- yt/fields/fluid_vector_fields.py | 12 +++++------ yt/frontends/adaptahop/data_structures.py | 6 +++--- yt/frontends/adaptahop/tests/test_outputs.py | 4 ++-- yt/frontends/arepo/io.py | 2 +- yt/frontends/athena/data_structures.py | 14 ++++++------- yt/frontends/athena_pp/data_structures.py | 2 +- yt/frontends/fits/data_structures.py | 6 +++--- yt/frontends/fits/misc.py | 4 ++-- yt/frontends/gadget/io.py | 4 ++-- yt/frontends/ramses/fields.py | 2 +- yt/frontends/swift/io.py | 6 +++--- yt/frontends/ytdata/data_structures.py | 2 +- yt/geometry/grid_geometry_handler.py | 6 +++--- yt/geometry/oct_geometry_handler.py | 6 +++--- yt/utilities/amr_kdtree/amr_kdtree.py | 8 ++++---- yt/utilities/decompose.py | 5 +++-- .../conversion/conversion_athena.py | 20 +++++++++---------- yt/utilities/io_handler.py | 2 ++ yt/utilities/lib/cykdtree/tests/test_utils.py | 4 ++-- yt/utilities/lib/octree_raytracing.py | 4 ++-- yt/utilities/linear_interpolators.py | 18 ++++++++--------- yt/visualization/eps_writer.py | 2 +- yt/visualization/image_writer.py | 2 +- yt/visualization/mapserver/pannable_map.py | 2 +- yt/visualization/plot_modifications.py | 7 ++++--- yt/visualization/volume_rendering/lens.py | 10 +++++----- .../volume_rendering/old_camera.py | 10 +++++----- .../volume_rendering/transfer_functions.py | 2 +- 34 files changed, 100 insertions(+), 96 deletions(-) diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index e70217f1a2b..8224c4819ee 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -153,7 +153,7 @@ def _get_cut_mask(self, grid): for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])): if not points_in_grid[i]: continue - ci = ((pos - grid.LeftEdge) / grid.dds).astype("int") + ci = ((pos - grid.LeftEdge) / grid.dds).astype("int64") if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue for j in range(3): @@ -1581,7 +1581,7 @@ def _minimal_box(self, dds): # How many root cells do we occupy? end_index = np.rint(cell_end).astype("int64") dims = end_index - start_index + 1 - return start_index, end_index.astype("int64"), dims.astype("int32") + return start_index, end_index, dims.astype("int32") def _update_level_state(self, level_state): ls = level_state diff --git a/yt/data_objects/index_subobjects/octree_subset.py b/yt/data_objects/index_subobjects/octree_subset.py index 0b32c987299..0480d79e9dd 100644 --- a/yt/data_objects/index_subobjects/octree_subset.py +++ b/yt/data_objects/index_subobjects/octree_subset.py @@ -234,7 +234,7 @@ def mesh_sampling_particle_field(self, positions, mesh_field, lvlmax=None): positions.shape[0], positions.shape[0] ** 0.3333333, ) - pos = positions.to("code_length").value.astype("float64") + pos = positions.to_value("code_length").astype("float64", copy=False) op.process_octree( self.oct_handler, diff --git a/yt/data_objects/level_sets/contour_finder.py b/yt/data_objects/level_sets/contour_finder.py index 53573d8e016..6f7f02f8880 100644 --- a/yt/data_objects/level_sets/contour_finder.py +++ b/yt/data_objects/level_sets/contour_finder.py @@ -40,7 +40,7 @@ def identify_contours(data_source, field, min_val, max_val, cached_fields=None): g.id, [contour_ids.view("float64")], mask, LE, RE, dims.astype("int64") ) contours[nid] = (g.Level, node.node_ind, pg, sl) - node_ids = np.array(node_ids).astype("int64") + node_ids = np.array(node_ids, dtype="int64") if node_ids.size == 0: return 0, {} trunk = data_source.tiles.tree.trunk diff --git a/yt/data_objects/level_sets/tests/test_clump_finding.py b/yt/data_objects/level_sets/tests/test_clump_finding.py index 4b5fbc64615..fa80a0dc669 100644 --- a/yt/data_objects/level_sets/tests/test_clump_finding.py +++ b/yt/data_objects/level_sets/tests/test_clump_finding.py @@ -128,8 +128,8 @@ def test_clump_tree_save(): t2 = list(ds2.tree) mt1 = ds.arr([c.info["cell_mass"][1] for c in t1]) mt2 = ds2.arr([c["clump", "cell_mass"] for c in t2]) - it1 = np.array(np.argsort(mt1).astype(int)) - it2 = np.array(np.argsort(mt2).astype(int)) + it1 = np.argsort(mt1).astype("int64") + it2 = np.argsort(mt2).astype("int64") assert_array_equal(mt1[it1], mt2[it2]) for i1, i2 in zip(it1, it2): @@ -143,8 +143,8 @@ def test_clump_tree_save(): c2 = list(ds2.leaves) mc1 = ds.arr([c.info["cell_mass"][1] for c in c1]) mc2 = ds2.arr([c["clump", "cell_mass"] for c in c2]) - ic1 = np.array(np.argsort(mc1).astype(int)) - ic2 = np.array(np.argsort(mc2).astype(int)) + ic1 = np.argsort(mc1).astype("int64") + ic2 = np.argsort(mc2).astype("int64") assert_array_equal(mc1[ic1], mc2[ic2]) os.chdir(curdir) diff --git a/yt/data_objects/particle_trajectories.py b/yt/data_objects/particle_trajectories.py index 53e610b3ef3..27cc2a34681 100644 --- a/yt/data_objects/particle_trajectories.py +++ b/yt/data_objects/particle_trajectories.py @@ -46,7 +46,7 @@ class ParticleTrajectories: ... ] >>> ds = load(my_fns[0]) >>> init_sphere = ds.sphere(ds.domain_center, (0.5, "unitary")) - >>> indices = init_sphere[("all", "particle_index")].astype("int") + >>> indices = init_sphere[("all", "particle_index")].astype("int64") >>> ts = DatasetSeries(my_fns) >>> trajs = ts.particle_trajectories(indices, fields=fields) >>> for t in trajs: @@ -284,8 +284,8 @@ def _get_data(self, fields): pfield[field], self.num_indices, cube[fds[field]], - np.array(grid.LeftEdge).astype(np.float64), - np.array(grid.ActiveDimensions).astype(np.int32), + np.array(grid.LeftEdge, dtype="float64"), + np.array(grid.ActiveDimensions, dtype="int32"), grid.dds[0], ) sto.result_id = ds.parameter_filename diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 9b270d1d5cb..f52414bead3 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -417,7 +417,7 @@ def particle_trajectories( ... ] >>> ds = load(my_fns[0]) >>> init_sphere = ds.sphere(ds.domain_center, (0.5, "unitary")) - >>> indices = init_sphere[("all", "particle_index")].astype("int") + >>> indices = init_sphere[("all", "particle_index")].astype("int64") >>> ts = DatasetSeries(my_fns) >>> trajs = ts.particle_trajectories(indices, fields=fields) >>> for t in trajs: diff --git a/yt/fields/fluid_vector_fields.py b/yt/fields/fluid_vector_fields.py index 989961b786b..7b24acf8338 100644 --- a/yt/fields/fluid_vector_fields.py +++ b/yt/fields/fluid_vector_fields.py @@ -36,21 +36,21 @@ def setup_fluid_vector_fields( sl_center = slice(1, -1, None) def _baroclinic_vorticity_x(field, data): - rho2 = data[ftype, "density"].astype(np.float64) ** 2 + rho2 = data[ftype, "density"].astype("float64", copy=False) ** 2 return ( data[ftype, "pressure_gradient_y"] * data[ftype, "density_gradient_z"] - data[ftype, "pressure_gradient_z"] * data[ftype, "density_gradient_z"] ) / rho2 def _baroclinic_vorticity_y(field, data): - rho2 = data[ftype, "density"].astype(np.float64) ** 2 + rho2 = data[ftype, "density"].astype("float64", copy=False) ** 2 return ( data[ftype, "pressure_gradient_z"] * data[ftype, "density_gradient_x"] - data[ftype, "pressure_gradient_x"] * data[ftype, "density_gradient_z"] ) / rho2 def _baroclinic_vorticity_z(field, data): - rho2 = data[ftype, "density"].astype(np.float64) ** 2 + rho2 = data[ftype, "density"].astype("float64", copy=False) ** 2 return ( data[ftype, "pressure_gradient_x"] * data[ftype, "density_gradient_y"] - data[ftype, "pressure_gradient_y"] * data[ftype, "density_gradient_x"] @@ -260,7 +260,7 @@ def _vorticity_growth_timescale(field, data): ######################################################################## def _vorticity_radiation_pressure_x(field, data): - rho = data[ftype, "density"].astype(np.float64) + rho = data[ftype, "density"].astype("float64", copy=False) return ( data[ftype, "radiation_acceleration_y"] * data[ftype, "density_gradient_z"] - data[ftype, "radiation_acceleration_z"] @@ -268,7 +268,7 @@ def _vorticity_radiation_pressure_x(field, data): ) / rho def _vorticity_radiation_pressure_y(field, data): - rho = data[ftype, "density"].astype(np.float64) + rho = data[ftype, "density"].astype("float64", copy=False) return ( data[ftype, "radiation_acceleration_z"] * data[ftype, "density_gradient_x"] - data[ftype, "radiation_acceleration_x"] @@ -276,7 +276,7 @@ def _vorticity_radiation_pressure_y(field, data): ) / rho def _vorticity_radiation_pressure_z(field, data): - rho = data[ftype, "density"].astype(np.float64) + rho = data[ftype, "density"].astype("float64", copy=False) return ( data[ftype, "radiation_acceleration_x"] * data[ftype, "density_gradient_y"] - data[ftype, "radiation_acceleration_y"] diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 7a6b404f6c6..14eb3bfceed 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -309,7 +309,7 @@ def ihalo(self): return ihalo else: halo_id = self.particle_identifier - halo_ids = self.halo_ds.r["halos", "particle_identifier"].astype(int) + halo_ids = self.halo_ds.r["halos", "particle_identifier"].astype("int64") ihalo = np.searchsorted(halo_ids, halo_id) assert halo_ids[ihalo] == halo_id @@ -339,7 +339,7 @@ def _set_halo_member_data(self): f *= 1.1 sph = parent_ds.sphere(center, f * radius) - part_ids = sph[ptype, "particle_identity"].astype(int) + part_ids = sph[ptype, "particle_identity"].astype("int64") ok = len(np.lib.arraysetops.setdiff1d(members, part_ids)) == 0 @@ -348,7 +348,7 @@ def _set_halo_member_data(self): # Build subregion that only contains halo particles reg = sph.cut_region( - ['np.in1d(obj[("io", "particle_identity")].astype(int), members)'], + ['np.in1d(obj[("io", "particle_identity")].astype("int64"), members)'], locals={"members": members, "np": np}, ) diff --git a/yt/frontends/adaptahop/tests/test_outputs.py b/yt/frontends/adaptahop/tests/test_outputs.py index 0403b2a129e..c308315c141 100644 --- a/yt/frontends/adaptahop/tests/test_outputs.py +++ b/yt/frontends/adaptahop/tests/test_outputs.py @@ -68,10 +68,10 @@ def test_get_halo(brick): members = np.sort(halo.member_ids) # Check sphere contains all the members - id_sphere = halo.sphere["io", "particle_identity"].astype(int) + id_sphere = halo.sphere["io", "particle_identity"].astype("int64") assert len(np.lib.arraysetops.setdiff1d(members, id_sphere)) == 0 # Check region contains *only* halo particles - id_reg = np.sort(halo["io", "particle_identity"].astype(int)) + id_reg = np.sort(halo["io", "particle_identity"].astype("int64")) np.testing.assert_equal(members, id_reg) diff --git a/yt/frontends/arepo/io.py b/yt/frontends/arepo/io.py index 0af4b993016..1a3e7058ca2 100644 --- a/yt/frontends/arepo/io.py +++ b/yt/frontends/arepo/io.py @@ -16,7 +16,7 @@ def _get_smoothing_length(self, data_file, position_dtype, position_shape): ind = int(ptype[-1]) si, ei = data_file.start, data_file.end with h5py.File(data_file.filename, mode="r") as f: - pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int") + pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int64") pcount = np.clip(pcount - si, 0, ei - si) # Arepo cells do not have "smoothing lengths" by definition, so # we compute one here by finding the radius of the sphere diff --git a/yt/frontends/athena/data_structures.py b/yt/frontends/athena/data_structures.py index e1f2f18c68f..7cadb9ea607 100644 --- a/yt/frontends/athena/data_structures.py +++ b/yt/frontends/athena/data_structures.py @@ -99,11 +99,11 @@ def parse_line(line, grid): grid["level"] = int(str23(splitup[time_index + 3]).rstrip(",")) grid["domain"] = int(str23(splitup[time_index + 5]).rstrip(",")) elif chk23("DIMENSIONS") in splitup: - grid["dimensions"] = np.array(str23(splitup[-3:])).astype("int") + grid["dimensions"] = np.array(str23(splitup[-3:]), dtype="int") elif chk23("ORIGIN") in splitup: - grid["left_edge"] = np.array(str23(splitup[-3:])).astype("float64") + grid["left_edge"] = np.array(str23(splitup[-3:]), dtype="float64") elif chk23("SPACING") in splitup: - grid["dds"] = np.array(str23(splitup[-3:])).astype("float64") + grid["dds"] = np.array(str23(splitup[-3:]), dtype="float64") elif chk23("CELL_DATA") in splitup or chk23("POINT_DATA") in splitup: grid["ncells"] = int(str23(splitup[-1])) elif chk23("SCALARS") in splitup: @@ -147,7 +147,7 @@ def _detect_output_fields(self): chkp = chk23("POINT_DATA") if chkd in splitup: field = str23(splitup[-3:]) - grid_dims = np.array(field).astype("int") + grid_dims = np.array(field, dtype="int64") line = check_readline(f) elif chkc in splitup or chkp in splitup: grid_ncells = int(str23(splitup[-1])) @@ -317,7 +317,7 @@ def _parse_index(self): # know the extent of all the grids. glis = np.round( (glis - self.dataset.domain_left_edge.ndarray_view()) / gdds - ).astype("int") + ).astype("int64") new_dre = np.max(gres, axis=0) dre_units = self.dataset.domain_right_edge.uq self.dataset.domain_right_edge = np.round(new_dre, decimals=12) * dre_units @@ -329,7 +329,7 @@ def _parse_index(self): ) self.dataset.domain_dimensions = np.round( self.dataset.domain_width / gdds[0] - ).astype("int") + ).astype("int64") if self.dataset.dimensionality <= 2: self.dataset.domain_dimensions[2] = 1 @@ -382,7 +382,7 @@ def _parse_index(self): gdds = (self.grid_right_edge - self.grid_left_edge) / self.grid_dimensions glis = np.round( (self.grid_left_edge - self.ds.domain_left_edge) / gdds - ).astype("int") + ).astype("int64") for i in range(self.num_grids): self.grids[i] = self.grid( i, diff --git a/yt/frontends/athena_pp/data_structures.py b/yt/frontends/athena_pp/data_structures.py index 9e8789e74e8..8ac66d177e6 100644 --- a/yt/frontends/athena_pp/data_structures.py +++ b/yt/frontends/athena_pp/data_structures.py @@ -74,7 +74,7 @@ def _initialize_mesh(self): nlevel = self._handle.attrs["MaxLevel"] + 1 nb = np.array([nbx, nby, nbz], dtype="int64") - self.mesh_factors = np.ones(3, dtype="int64") * ((nb > 1).astype("int") + 1) + self.mesh_factors = np.ones(3, dtype="int64") * ((nb > 1) + 1) block_grid = -np.ones((nbx, nby, nbz, nlevel), dtype="int64") block_grid[log_loc[:, 0], log_loc[:, 1], log_loc[:, 2], levels[:]] = np.arange( diff --git a/yt/frontends/fits/data_structures.py b/yt/frontends/fits/data_structures.py index 70ab2248eae..87142c023a7 100644 --- a/yt/frontends/fits/data_structures.py +++ b/yt/frontends/fits/data_structures.py @@ -485,7 +485,7 @@ def _determine_nprocs(self): if self.specified_parameters["nprocs"] is None: nprocs = np.around( np.prod(self.domain_dimensions) / 32**self.dimensionality - ).astype("int") + ).astype("int64") self.parameters["nprocs"] = max(min(nprocs, 512), 1) else: self.parameters["nprocs"] = self.specified_parameters["nprocs"] @@ -724,7 +724,7 @@ def _domain_decomp(self): dz = self.ds.quan(1.0, "code_length") * self.ds.spectral_factor self.grid_dimensions[:, 2] = np.around( float(self.ds.domain_dimensions[2]) / self.num_grids - ).astype("int") + ).astype("int64") self.grid_dimensions[-1, 2] += self.ds.domain_dimensions[2] % self.num_grids self.grid_left_edge[0, 2] = self.ds.domain_left_edge[2] self.grid_left_edge[1:, 2] = ( @@ -811,7 +811,7 @@ def _parse_parameter_file(self): def _determine_nprocs(self): # If nprocs is None, do some automatic decomposition of the domain if self.specified_parameters["nprocs"] is None: - nprocs = np.around(self.domain_dimensions[2] / 8).astype("int") + nprocs = np.around(self.domain_dimensions[2] / 8).astype("int64") self.parameters["nprocs"] = max(min(nprocs, 512), 1) else: self.parameters["nprocs"] = self.specified_parameters["nprocs"] diff --git a/yt/frontends/fits/misc.py b/yt/frontends/fits/misc.py index a33cbe87a8a..65e684bffc9 100644 --- a/yt/frontends/fits/misc.py +++ b/yt/frontends/fits/misc.py @@ -186,8 +186,8 @@ def ds9_region(ds, reg, obj=None, field_parameters=None): prefix = "" def _reg_field(field, data): - i = data[prefix + "xyz"[ds.lon_axis]].d.astype("int") - 1 - j = data[prefix + "xyz"[ds.lat_axis]].d.astype("int") - 1 + i = data[prefix + "xyz"[ds.lon_axis]].d.astype("int64") - 1 + j = data[prefix + "xyz"[ds.lat_axis]].d.astype("int64") - 1 new_mask = mask[i, j] ret = np.zeros(data[prefix + "x"].shape) ret[new_mask] = 1.0 diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 8cf4114a743..ce78beeb687 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -70,7 +70,7 @@ def _read_particle_coords(self, chunks, ptf): def _yield_coordinates(self, data_file, needed_ptype=None): si, ei = data_file.start, data_file.end f = h5py.File(data_file.filename, mode="r") - pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") + pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int64") np.clip(pcount - si, 0, ei - si, out=pcount) pcount = pcount.sum() for key in f.keys(): @@ -236,7 +236,7 @@ def _read_particle_data_file(self, data_file, ptf, selector=None): def _count_particles(self, data_file): si, ei = data_file.start, data_file.end f = h5py.File(data_file.filename, mode="r") - pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") + pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int64") f.close() if None not in (si, ei): np.clip(pcount - si, 0, ei - si, out=pcount) diff --git a/yt/frontends/ramses/fields.py b/yt/frontends/ramses/fields.py index d81745e75ed..94b55a5a61b 100644 --- a/yt/frontends/ramses/fields.py +++ b/yt/frontends/ramses/fields.py @@ -320,7 +320,7 @@ def _species_mass(field, data, species: str): def gen_pdens(igroup): def _photon_density(field, data): # The photon density depends on the possibly level-dependent conversion factor. - ilvl = data["index", "grid_level"].astype(int) + ilvl = data["index", "grid_level"].astype("int64") dc = dens_conv[ilvl] rv = data["ramses-rt", f"Photon_density_{igroup + 1}"] * dc return rv diff --git a/yt/frontends/swift/io.py b/yt/frontends/swift/io.py index 725b2946f5a..e72314d1408 100644 --- a/yt/frontends/swift/io.py +++ b/yt/frontends/swift/io.py @@ -40,7 +40,7 @@ def _read_particle_coords(self, chunks, ptf): def _yield_coordinates(self, sub_file, needed_ptype=None): si, ei = sub_file.start, sub_file.end f = h5py.File(sub_file.filename, mode="r") - pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") + pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int64") np.clip(pcount - si, 0, ei - si, out=pcount) pcount = pcount.sum() for key in f.keys(): @@ -63,7 +63,7 @@ def _get_smoothing_length(self, sub_file, pdtype=None, pshape=None): ind = int(ptype[-1]) si, ei = sub_file.start, sub_file.end with h5py.File(sub_file.filename, mode="r") as f: - pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int") + pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int64") pcount = np.clip(pcount - si, 0, ei - si) # we upscale to float64 hsml = f[ptype]["SmoothingLength"][si:ei, ...] @@ -116,7 +116,7 @@ def _read_particle_data_file(self, sub_file, ptf, selector=None): def _count_particles(self, data_file): si, ei = data_file.start, data_file.end f = h5py.File(data_file.filename, mode="r") - pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") + pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int64") f.close() # if this data_file was a sub_file, then we just extract the region # defined by the subfile diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index ab91f1ce910..ff1774cbff4 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -514,7 +514,7 @@ def _parse_parameter_file(self): ) self.domain_dimensions = ( (self.domain_right_edge - self.domain_left_edge) / dx - ).astype(int) + ).astype("int64") else: self.domain_right_edge = self.parameters["right_edge"] self.domain_dimensions = self.parameters["ActiveDimensions"] diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index c754ae9e488..5f0d6891a7d 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -417,9 +417,9 @@ def _chunk_io( chunk_ngrids = len(gobjs) if chunk_ngrids > 0: nproc = int(ytcfg.get("yt", "internals", "global_parallel_size")) - chunking_factor = np.ceil( - self._grid_chunksize * nproc / chunk_ngrids - ).astype("int") + chunking_factor = np.int64( + np.ceil(self._grid_chunksize * nproc / chunk_ngrids) + ) size = max(self._grid_chunksize // chunking_factor, 1) else: size = self._grid_chunksize diff --git a/yt/geometry/oct_geometry_handler.py b/yt/geometry/oct_geometry_handler.py index 656a917233e..3223d32ac36 100644 --- a/yt/geometry/oct_geometry_handler.py +++ b/yt/geometry/oct_geometry_handler.py @@ -35,8 +35,8 @@ def _cell_index(field, data): # Get the position of the particles pos = data[ptype, "particle_position"] Npart = pos.shape[0] - ret = np.zeros(Npart) - tmp = np.zeros(Npart) + ret = np.zeros(Npart, dtype="float64") + tmp = np.zeros(Npart, dtype="float64") if isinstance(data, FieldDetector): return ret @@ -71,7 +71,7 @@ def _cell_index(field, data): remaining[remaining] = np.isnan(tmp[:Nremaining]) Nremaining = remaining.sum() - return data.ds.arr(ret.astype(np.float64), units="1") + return data.ds.arr(ret, units="1") def _mesh_sampling_particle_field(field, data): """ diff --git a/yt/utilities/amr_kdtree/amr_kdtree.py b/yt/utilities/amr_kdtree/amr_kdtree.py index f1ec399e79c..601a5427484 100644 --- a/yt/utilities/amr_kdtree/amr_kdtree.py +++ b/yt/utilities/amr_kdtree/amr_kdtree.py @@ -128,7 +128,7 @@ def check_tree(self): nre = self.ds.arr(node.get_right_edge(), units="code_length") li = np.rint((nle - gle) / dds).astype("int32") ri = np.rint((nre - gle) / dds).astype("int32") - dims = (ri - li).astype("int32") + dims = ri - li assert np.all(grid.LeftEdge <= nle) assert np.all(grid.RightEdge >= nre) assert np.all(dims > 0) @@ -153,7 +153,7 @@ def sum_cells(self, all_cells=False): nre = self.ds.arr(node.get_right_edge(), units="code_length") li = np.rint((nle - gle) / dds).astype("int32") ri = np.rint((nre - gle) / dds).astype("int32") - dims = (ri - li).astype("int32") + dims = ri - li cells += np.prod(dims) return cells @@ -260,7 +260,7 @@ def slice_traverse(self, viewpoint=None): nre = node.get_right_edge() li = np.rint((nle - gle) / dds).astype("int32") ri = np.rint((nre - gle) / dds).astype("int32") - dims = (ri - li).astype("int32") + dims = ri - li sl = (slice(li[0], ri[0]), slice(li[1], ri[1]), slice(li[2], ri[2])) gi = grid.get_global_startindex() + li yield grid, node, (sl, dims, gi) @@ -328,7 +328,7 @@ def get_brick_data(self, node): nre = node.get_right_edge() li = np.rint((nle - gle) / dds).astype("int32") ri = np.rint((nre - gle) / dds).astype("int32") - dims = (ri - li).astype("int32") + dims = ri - li assert np.all(grid.LeftEdge <= nle) assert np.all(grid.RightEdge >= nre) diff --git a/yt/utilities/decompose.py b/yt/utilities/decompose.py index f7048a5e63f..3e3b3257b5c 100644 --- a/yt/utilities/decompose.py +++ b/yt/utilities/decompose.py @@ -59,8 +59,9 @@ def factorize_number(pieces): [ (prime, temp[prime], (temp[prime] + 1) * (temp[prime] + 2) // 2) for prime in np.unique(factors) - ] - ).astype(np.int64) + ], + dtype="int64", + ) def get_psize(n_d, pieces): diff --git a/yt/utilities/grid_data_format/conversion/conversion_athena.py b/yt/utilities/grid_data_format/conversion/conversion_athena.py index a144465fa0e..17516218ba7 100644 --- a/yt/utilities/grid_data_format/conversion/conversion_athena.py +++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py @@ -49,11 +49,11 @@ def parse_line(self, line, grid): grid["domain"] = int(splitup[8].rstrip(",")) self.current_time = grid["time"] elif "DIMENSIONS" in splitup: - grid["dimensions"] = np.array(splitup[-3:]).astype("int") + grid["dimensions"] = np.array(splitup[-3:], dtype="int64") elif "ORIGIN" in splitup: - grid["left_edge"] = np.array(splitup[-3:]).astype("float64") + grid["left_edge"] = np.array(splitup[-3:], dtype="float64") elif "SPACING" in splitup: - grid["dds"] = np.array(splitup[-3:]).astype("float64") + grid["dds"] = np.array(splitup[-3:], dtype="float64") elif "CELL_DATA" in splitup: grid["ncells"] = int(splitup[-1]) elif "SCALARS" in splitup: @@ -224,7 +224,7 @@ def read_and_write_data(self, basename, ddn, gdf_name): splitup = line.strip().split() if "DIMENSIONS" in splitup: - grid_dims = np.array(splitup[-3:]).astype("int") + grid_dims = np.array(splitup[-3:], dtype="int64") line = f.readline() continue elif "CELL_DATA" in splitup: @@ -330,11 +330,11 @@ def parse_line(self, line, grid): elif "Really" in splitup: grid["time"] = splitup[-1] elif "DIMENSIONS" in splitup: - grid["dimensions"] = np.array(splitup[-3:]).astype("int") + grid["dimensions"] = np.array(splitup[-3:], dtype="int64") elif "ORIGIN" in splitup: - grid["left_edge"] = np.array(splitup[-3:]).astype("float64") + grid["left_edge"] = np.array(splitup[-3:], dtype="float64") elif "SPACING" in splitup: - grid["dds"] = np.array(splitup[-3:]).astype("float64") + grid["dds"] = np.array(splitup[-3:], dtype="float64") elif "CELL_DATA" in splitup: grid["ncells"] = int(splitup[-1]) elif "SCALARS" in splitup: @@ -429,18 +429,18 @@ def write_to_gdf(self, fn, grid): # grid_dimensions f.create_dataset("grid_dimensions", data=gdims) - levels = np.array([0]).astype("int64") # unigrid example + levels = np.array([0], dtype="int64") # unigrid example # grid_level f.create_dataset("grid_level", data=levels) ## ----------QUESTIONABLE NEXT LINE--------- ## # This data needs two dimensions for now. - n_particles = np.array([[0]]).astype("int64") + n_particles = np.array([[0]], dtype="int64") # grid_particle_count f.create_dataset("grid_particle_count", data=n_particles) # Assume -1 means no parent. - parent_ids = np.array([-1]).astype("int64") + parent_ids = np.array([-1], dtype="int64") # grid_parent_id f.create_dataset("grid_parent_id", data=parent_ids) diff --git a/yt/utilities/io_handler.py b/yt/utilities/io_handler.py index 983941e7209..3ab97f66ba0 100644 --- a/yt/utilities/io_handler.py +++ b/yt/utilities/io_handler.py @@ -212,6 +212,8 @@ def _read_particle_selection( total = sum(_.size for _ in data[field_f]) if total > 0: vals = data.pop(field_f) + # note: numpy.concatenate has a dtype argument that would avoid + # a copy using .astype(...), available in numpy>=1.20 rv[field_f] = np.concatenate(vals, axis=0).astype("float64") else: shape = [0] diff --git a/yt/utilities/lib/cykdtree/tests/test_utils.py b/yt/utilities/lib/cykdtree/tests/test_utils.py index ab8a6396ba0..136ed2516f4 100644 --- a/yt/utilities/lib/cykdtree/tests/test_utils.py +++ b/yt/utilities/lib/cykdtree/tests/test_utils.py @@ -6,13 +6,13 @@ def test_max_pts(): - pts = np.arange(5 * 3).reshape((5, 3)).astype("float64") + pts = np.arange(5 * 3, dtype="float64").reshape((5, 3)) out = utils.py_max_pts(pts) np.testing.assert_allclose(out, np.max(pts, axis=0)) def test_min_pts(): - pts = np.arange(5 * 3).reshape((5, 3)).astype("float64") + pts = np.arange(5 * 3, dtype="float64").reshape((5, 3)) out = utils.py_min_pts(pts) np.testing.assert_allclose(out, np.min(pts, axis=0)) diff --git a/yt/utilities/lib/octree_raytracing.py b/yt/utilities/lib/octree_raytracing.py index da1a6c9d603..e5ca6bd70dc 100644 --- a/yt/utilities/lib/octree_raytracing.py +++ b/yt/utilities/lib/octree_raytracing.py @@ -30,9 +30,9 @@ def __init__(self, data_source): ds = data_source.ds xyz = np.stack([data_source[key].to("unitary").value for key in "xyz"], axis=-1) - lvl = data_source["grid_level"].astype(int).value + lvl_min + lvl = data_source["grid_level"].astype("int64").value + lvl_min - ipos = np.floor(xyz * (1 << depth)).astype(int) + ipos = np.floor(xyz * (1 << depth)).astype("int64") mylog.debug("Adding cells to volume") self.octree.add_nodes( ipos.astype(np.int32), diff --git a/yt/utilities/linear_interpolators.py b/yt/utilities/linear_interpolators.py index 0d02dc7e484..69f9ae037ba 100644 --- a/yt/utilities/linear_interpolators.py +++ b/yt/utilities/linear_interpolators.py @@ -42,7 +42,7 @@ def __init__(self, table, boundaries, field_names, truncate=False): self.x_bins = boundaries else: x0, x1 = boundaries - self.x_bins = np.linspace(x0, x1, table.shape[0]).astype("float64") + self.x_bins = np.linspace(x0, x1, table.shape[0], dtype="float64") def __call__(self, data_object): orig_shape = data_object[self.x_name].shape @@ -99,8 +99,8 @@ def __init__(self, table, boundaries, field_names, truncate=False): self.x_name, self.y_name = field_names if len(boundaries) == 4: x0, x1, y0, y1 = boundaries - self.x_bins = np.linspace(x0, x1, table.shape[0]).astype("float64") - self.y_bins = np.linspace(y0, y1, table.shape[1]).astype("float64") + self.x_bins = np.linspace(x0, x1, table.shape[0], dtype="float64") + self.y_bins = np.linspace(y0, y1, table.shape[1], dtype="float64") elif len(boundaries) == 2: if boundaries[0].size != table.shape[0]: mylog.error("X bins array not the same length as the data.") @@ -179,9 +179,9 @@ def __init__(self, table, boundaries, field_names, truncate=False): self.x_name, self.y_name, self.z_name = field_names if len(boundaries) == 6: x0, x1, y0, y1, z0, z1 = boundaries - self.x_bins = np.linspace(x0, x1, table.shape[0]).astype("float64") - self.y_bins = np.linspace(y0, y1, table.shape[1]).astype("float64") - self.z_bins = np.linspace(z0, z1, table.shape[2]).astype("float64") + self.x_bins = np.linspace(x0, x1, table.shape[0], dtype="float64") + self.y_bins = np.linspace(y0, y1, table.shape[1], dtype="float64") + self.z_bins = np.linspace(z0, z1, table.shape[2], dtype="float64") elif len(boundaries) == 3: if boundaries[0].size != table.shape[0]: mylog.error("X bins array not the same length as the data.") @@ -208,9 +208,9 @@ def __call__(self, data_object): y_vals = data_object[self.y_name].ravel().astype("float64") z_vals = data_object[self.z_name].ravel().astype("float64") - x_i = np.digitize(x_vals, self.x_bins).astype("int") - 1 - y_i = np.digitize(y_vals, self.y_bins).astype("int") - 1 - z_i = np.digitize(z_vals, self.z_bins).astype("int") - 1 + x_i = np.digitize(x_vals, self.x_bins).astype("int_") - 1 + y_i = np.digitize(y_vals, self.y_bins).astype("int_") - 1 + z_i = np.digitize(z_vals, self.z_bins).astype("int_") - 1 if ( np.any((x_i == -1) | (x_i == len(self.x_bins) - 1)) or np.any((y_i == -1) | (y_i == len(self.y_bins) - 1)) diff --git a/yt/visualization/eps_writer.py b/yt/visualization/eps_writer.py index ed2c8ef84b3..9798917bc5f 100644 --- a/yt/visualization/eps_writer.py +++ b/yt/visualization/eps_writer.py @@ -639,7 +639,7 @@ def insert_image_yt(self, plot, field=None, pos=(0, 0), scale=1.0): _p1.set_facecolor("w") # set background color figure_canvas = FigureCanvasAgg(_p1) figure_canvas.draw() - size = (_p1.get_size_inches() * _p1.dpi).astype("int") + size = (_p1.get_size_inches() * _p1.dpi).astype("int64") # Account for non-square images after removing the colorbar. scale *= 1.0 - 1.0 / (_p1.dpi * self.figsize[0]) diff --git a/yt/visualization/image_writer.py b/yt/visualization/image_writer.py index 512058fa5e0..39668883296 100644 --- a/yt/visualization/image_writer.py +++ b/yt/visualization/image_writer.py @@ -36,7 +36,7 @@ def scale_image(image, mi=None, ma=None): mi = image.min() if ma is None: ma = image.max() - image = (np.clip((image - mi) / (ma - mi) * 255, 0, 255)).astype("uint8") + image = np.clip((image - mi) / (ma - mi) * 255, 0, 255).astype("uint8") return image diff --git a/yt/visualization/mapserver/pannable_map.py b/yt/visualization/mapserver/pannable_map.py index 45b96ccb58e..b2207623331 100644 --- a/yt/visualization/mapserver/pannable_map.py +++ b/yt/visualization/mapserver/pannable_map.py @@ -45,7 +45,7 @@ def __init__(self, data, field, takelog, cmap, route_prefix=""): bottle.route(f"{route_prefix}/list", "GET")(self.list_fields) # This is a double-check, since we do not always mandate this for # slices: - self.data[self.field] = self.data[self.field].astype("float64") + self.data[self.field] = self.data[self.field].astype("float64", copy=False) bottle.route(f"{route_prefix}/static/:path", "GET")(self.static) self.takelog = takelog diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index edc78f0d072..91a9a35da22 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -3127,8 +3127,9 @@ def __call__(self, plot): "with that of output image (%d, %d)" % (nx, ny) ) - kernel = np.sin(np.arange(self.kernellen) * np.pi / self.kernellen) - kernel = kernel.astype(np.double) + kernel = np.sin( + np.arange(self.kernellen, dtype="float64") * np.pi / self.kernellen + ) lic_data = line_integral_convolution_2d(vectors, self.texture, kernel) lic_data = lic_data / lic_data.max() @@ -3208,7 +3209,7 @@ def __init__(self, line_width=0.002, alpha=1.0, color="black"): conv = ColorConverter() self.line_width = line_width self.alpha = alpha - self.color = (np.array(conv.to_rgb(color)) * 255).astype("uint8") + self.color = np.array(conv.to_rgb(color), dtype="uint8") * 255 def __call__(self, plot): if plot.data.ds.geometry == "cylindrical" and plot.data.ds.dimensionality == 3: diff --git a/yt/visualization/volume_rendering/lens.py b/yt/visualization/volume_rendering/lens.py index 80602d73df8..0e19b7e0a1b 100644 --- a/yt/visualization/volume_rendering/lens.py +++ b/yt/visualization/volume_rendering/lens.py @@ -495,10 +495,10 @@ def _get_px_py_dz(self, camera, pos, res, disparity): # Transpose into image coords. if disparity > 0: - px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype("int") + px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype("int64") else: - px = (res0_h * 1.5 + res0_h / camera.width[0].d * dx).astype("int") - py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype("int") + px = (res0_h * 1.5 + res0_h / camera.width[0].d * dx).astype("int64") + py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype("int64") return px, py, dz @@ -622,8 +622,8 @@ def project_to_plane(self, camera, pos, res=None): px = (px + 1.0) * res[0] / 2.0 py = (py + 1.0) * res[1] / 2.0 # px and py should be dimensionless - px = np.rint(px).astype("int64") - py = np.rint(py).astype("int64") + px = np.rint(px, dtype="int64") + py = np.rint(py, dtype="int64") return px, py, dz diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 1957871940d..cde95c99c0f 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -251,8 +251,8 @@ def project_to_plane(self, pos, res=None): dy = np.dot(pos - self.origin, self.orienter.unit_vectors[0]) dz = np.dot(pos - self.center, self.orienter.unit_vectors[2]) # Transpose into image coords. - py = (res[0] * (dx / self.width[0])).astype("int") - px = (res[1] * (dy / self.width[1])).astype("int") + py = (res[0] * (dx / self.width[0])).astype("int64") + px = (res[1] * (dy / self.width[1])).astype("int64") return px, py, dz def draw_grids(self, im, alpha=0.3, cmap=None, min_level=None, max_level=None): @@ -1413,8 +1413,8 @@ def project_to_plane(self, pos, res=None): dy = np.dot(pos1 - sight_center, self.orienter.unit_vectors[1]) dz = np.dot(pos1 - sight_center, self.orienter.unit_vectors[2]) # Transpose into image coords. - px = (res[0] * 0.5 + res[0] / self.width[0] * dx).astype("int") - py = (res[1] * 0.5 + res[1] / self.width[1] * dy).astype("int") + px = (res[0] * 0.5 + res[0] / self.width[0] * dx).astype("int64") + py = (res[1] * 0.5 + res[1] / self.width[1] * dy).astype("int64") return px, py, dz def yaw(self, theta, rot_center): @@ -1989,7 +1989,7 @@ def plot_allsky_healpix( import matplotlib.figure if rotation is None: - rotation = np.eye(3).astype("float64") + rotation = np.eye(3, dtype="float64") img, count = pixelize_healpix(nside, image, resolution, resolution, rotation) diff --git a/yt/visualization/volume_rendering/transfer_functions.py b/yt/visualization/volume_rendering/transfer_functions.py index cf259f91ca0..25ad125a8ee 100644 --- a/yt/visualization/volume_rendering/transfer_functions.py +++ b/yt/visualization/volume_rendering/transfer_functions.py @@ -37,7 +37,7 @@ def __init__(self, x_bounds, nbins=256): # Strip units off of x_bounds, if any x_bounds = [np.float64(xb) for xb in x_bounds] self.x_bounds = x_bounds - self.x = np.linspace(x_bounds[0], x_bounds[1], nbins).astype("float64") + self.x = np.linspace(x_bounds[0], x_bounds[1], nbins, dtype="float64") self.y = np.zeros(nbins, dtype="float64") self.grad_field = -1 self.light_source_v = self.light_source_c = np.zeros(3, "float64")