Skip to content

Commit

Permalink
Merge pull request #24 from jagar2/main
Browse files Browse the repository at this point in the history
Update for modern Numpy
  • Loading branch information
ramav87 authored Aug 27, 2024
2 parents 5864d59 + 11822e4 commit 711ed76
Show file tree
Hide file tree
Showing 26 changed files with 165 additions and 152 deletions.
4 changes: 2 additions & 2 deletions BGlib/__version__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
version = '0.0.4'
time = '2023-08-24 16:00:00'
version = '0.0.6'
time = '2024-06-10 12:00:00'
24 changes: 12 additions & 12 deletions BGlib/be/analysis/be_loop_fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,17 +39,17 @@
'''
loop_metrics32 = np.dtype({'names': ['Area', 'Centroid x', 'Centroid y',
'Rotation Angle [rad]', 'Offset'],
'formats': [np.float32, np.float32, np.float32,
np.float32, np.float32]})
'formats': [float, float, float,
float, float]})

crit32 = np.dtype({'names': ['AIC_loop', 'BIC_loop', 'AIC_line', 'BIC_line'],
'formats': [np.float32, np.float32, np.float32,
np.float32]})
'formats': [float, float, float,
float]})

__field_names = ['a_0', 'a_1', 'a_2', 'a_3', 'a_4', 'b_0', 'b_1', 'b_2', 'b_3',
'R2 Criterion']
loop_fit32 = np.dtype({'names': __field_names,
'formats': [np.float32 for name in __field_names]})
'formats': [float for name in __field_names]})


class BELoopFitter(Fitter):
Expand Down Expand Up @@ -217,7 +217,7 @@ def _create_projection_datasets(self):

# Write datasets
self.h5_projected_loops = create_empty_dataset(self.h5_main,
np.float32,
float,
'Projected_Loops',
h5_group=self.h5_results_grp)

Expand Down Expand Up @@ -351,7 +351,7 @@ def _read_data_chunk(self):
0]
else:
this_forc_spec_inds = np.ones(
shape=self.h5_main.h5_spec_inds.shape[1], dtype=np.bool)
shape=self.h5_main.h5_spec_inds.shape[1], dtype=bool)

if self._num_forcs:
this_forc_dc_vec = get_unit_values(
Expand Down Expand Up @@ -495,7 +495,7 @@ def _read_guess_chunk(self):
np.where(self._h5_guess.h5_spec_inds[forc_pos] == forc_ind)[0]
else:
this_forc_spec_inds = np.ones(
shape=self._h5_guess.h5_spec_inds.shape[1], dtype=np.bool)
shape=self._h5_guess.h5_spec_inds.shape[1], dtype=bool)

this_forc_2d = self._guess[:, this_forc_spec_inds]
if self.verbose and self.mpi_rank == 0:
Expand Down Expand Up @@ -532,7 +532,7 @@ def _read_guess_chunk(self):
# TODO: avoid memory copies!
float_mat = np.zeros(shape=list(dc_rest_2d.shape) +
[len(loop_fit32.names)-1],
dtype=np.float32)
dtype=float)
if self.verbose and self.mpi_rank == 0:
print('Created empty float matrix of shape: {}'
'.'.format(float_mat.shape))
Expand Down Expand Up @@ -569,7 +569,7 @@ def _project_loop(sho_response, dc_offset):
ancillary : numpy.ndarray
Metrics for the loop projection
"""
# projected_loop = np.zeros(shape=sho_response.shape, dtype=np.float32)
# projected_loop = np.zeros(shape=sho_response.shape, dtype=float)
ancillary = np.zeros(shape=1, dtype=loop_metrics32)

pix_dict = projectLoop(np.squeeze(dc_offset),
Expand Down Expand Up @@ -683,7 +683,7 @@ def _unit_compute_guess(self):
print('Unzipping loop projection results')
loop_mets = np.zeros(shape=len(results), dtype=loop_metrics32)
proj_loops = np.zeros(shape=(len(results), self.data[0][0].shape[1]),
dtype=np.float32)
dtype=float)

if self.verbose and self.mpi_rank == 0:
print(
Expand Down Expand Up @@ -1302,7 +1302,7 @@ def _loop_fit_tree(tree, guess_mat, fit_results, vdc_shifted,
num_nodes = len(cluster_tree.nodes)

# prepare the guess and fit matrices
loop_guess_mat = np.zeros(shape=(num_nodes, 9), dtype=np.float32)
loop_guess_mat = np.zeros(shape=(num_nodes, 9), dtype=float)
# loop_fit_mat = np.zeros(shape=loop_guess_mat.shape, dtype=loop_guess_mat.dtype)
loop_fit_results = list(
np.arange(num_nodes, dtype=np.uint16)) # temporary placeholder
Expand Down
8 changes: 4 additions & 4 deletions BGlib/be/analysis/be_relax_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def __init__(self, h5_main, variables=None, fit_method='Exponential', sens=1, ph
self.write_dc_offset_values = self.all_dc_offset_values[::2]

#if there is only one RS spectrum
if type(self.write_dc_offset_values) == np.float32:
if type(self.write_dc_offset_values) == float:
self.write_dc_offset_values = [self.write_dc_offset_values]

if self.starts_with == 'read':
Expand All @@ -107,7 +107,7 @@ def __init__(self, h5_main, variables=None, fit_method='Exponential', sens=1, ph
np.argwhere(self.h5_main.h5_spec_vals[
0] == self.no_read_steps)]
# if there is only one RS spectrum
if type(self.write_dc_offset_values) == np.float32:
if type(self.write_dc_offset_values) == float:
self.write_dc_offset_values = [self.write_dc_offset_values]

self.no_read_offset = len(self.all_dc_offset_values) - self.no_rs_spectra
Expand Down Expand Up @@ -248,7 +248,7 @@ def _create_results_datasets(self):
results_units = 'pm'

berelaxfit32 = np.dtype({'names': field_names,
'formats': [np.float32 for name in field_names]})
'formats': [float for name in field_names]})
self.h5_results = usid.hdf_utils.write_main_dataset(self.h5_results_grp, results_shape, results_dset_name,
results_quantity, results_units, pos_dims, spec_dims,
dtype=berelaxfit32, h5_pos_inds=self.h5_main.h5_pos_inds,
Expand Down Expand Up @@ -292,7 +292,7 @@ def _write_results_chunk(self):
if self.fit_method == 'Logistic':
field_names = ['A', 'K', 'B', 'v', 'Q', 'C']
berelaxfit32 = np.dtype({'names': field_names,
'formats': [np.float32 for name in field_names]})
'formats': [float for name in field_names]})
# write and flush results
results = usid.io.dtype_utils.stack_real_to_compound(self._results, compound_type=berelaxfit32)
results = results.reshape(self.h5_results.shape[0], -1)
Expand Down
2 changes: 1 addition & 1 deletion BGlib/be/analysis/be_sho_fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
_field_names = ['Amplitude [V]', 'Frequency [Hz]', 'Quality Factor',
'Phase [rad]', 'R2 Criterion']
sho32 = np.dtype({'names': _field_names,
'formats': [np.float32 for name in _field_names]})
'formats': [float for name in _field_names]})


class SHOGuessFunc(Enum):
Expand Down
26 changes: 13 additions & 13 deletions BGlib/be/analysis/utils/be_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,19 @@
from scipy.special import erf, erfinv
import warnings

# switching32 = np.dtype([('V+', np.float32),
# ('V-', np.float32),
# ('Imprint', np.float32),
# ('R+', np.float32),
# ('R-', np.float32),
# ('Switchable Polarization', np.float32),
# ('Work of Switching', np.float32),
# ('Nucleation Bias 1', np.float32),
# ('Nucleation Bias 2', np.float32)])
# switching32 = np.dtype([('V+', float),
# ('V-', float),
# ('Imprint', float),
# ('R+', float),
# ('R-', float),
# ('Switchable Polarization', float),
# ('Work of Switching', float),
# ('Nucleation Bias 1', float),
# ('Nucleation Bias 2', float)])
field_names = ['V+', 'V-', 'Imprint', 'R+', 'R-', 'Switchable Polarization',
'Work of Switching', 'Nucleation Bias 1', 'Nucleation Bias 2']
switching32 = np.dtype({'names': field_names,
'formats': [np.float32 for name in field_names]})
'formats': [float for name in field_names]})


###############################################################################
Expand Down Expand Up @@ -309,7 +309,7 @@ def loop_fit_jacobian(vdc, coef_vec):
vdc = np.squeeze(np.array(vdc))
num_steps = vdc.size

J = np.zeros([num_steps, 9], dtype=np.float32)
J = np.zeros([num_steps, 9], dtype=float)

V1 = vdc[:int(num_steps / 2)]
V2 = vdc[int(num_steps / 2):]
Expand Down Expand Up @@ -602,8 +602,8 @@ def intersection(L1, L2):
return intersection(line(A, B), line(C, D))

# start and end coordinates of each line segment defining the convex hull
outline_1 = np.zeros((hull.simplices.shape[0], 2), dtype=np.float)
outline_2 = np.zeros((hull.simplices.shape[0], 2), dtype=np.float)
outline_1 = np.zeros((hull.simplices.shape[0], 2), dtype=float)
outline_2 = np.zeros((hull.simplices.shape[0], 2), dtype=float)
for index, pair in enumerate(hull.simplices):
outline_1[index, :] = points[pair[0]]
outline_2[index, :] = points[pair[1]]
Expand Down
22 changes: 11 additions & 11 deletions BGlib/be/translators/be_odf.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ def translate(self, file_path, show_plots=True, save_plots=True,
UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
UDVS_units = ['', 'V', 'A', '', '', '']
UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
dtype=np.float32).reshape(1, len(UDVS_labs))
dtype=float).reshape(1, len(UDVS_labs))

old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Expand All @@ -396,20 +396,20 @@ def translate(self, file_path, show_plots=True, save_plots=True,
band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
bin_freqs = np.linspace(st_f, en_f, bins_per_step, dtype=np.float32)
bin_freqs = np.linspace(st_f, en_f, bins_per_step, dtype=float)

if verbose:
print('\tGenerating BE arrays of length: '
'{}'.format(bins_per_step))
bin_inds = np.zeros(shape=bins_per_step, dtype=np.int32)
bin_FFT = np.zeros(shape=bins_per_step, dtype=np.complex64)
ex_wfm = np.zeros(shape=bins_per_step, dtype=np.float32)
ex_wfm = np.zeros(shape=bins_per_step, dtype=float)

# Forcing standardized datatypes:
bin_inds = np.int32(bin_inds)
bin_freqs = np.float32(bin_freqs)
bin_freqs = float(bin_freqs)
bin_FFT = np.complex64(bin_FFT)
ex_wfm = np.float32(ex_wfm)
ex_wfm = float(ex_wfm)

self.FFT_BE_wave = bin_FFT

Expand Down Expand Up @@ -711,8 +711,8 @@ def _read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)

self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=float)
self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=float)

numpix = self.h5_raw.shape[0]
"""
Expand Down Expand Up @@ -945,7 +945,7 @@ def _read_secondary_channel(self, h5_meas_group, aux_file_path):
h5_pos_vals=self.h5_raw.h5_pos_vals,
h5_spec_inds=h5_current_spec_inds,
h5_spec_vals=h5_current_spec_values,
dtype=np.float32, # data type / precision
dtype=float, # data type / precision
main_dset_attrs={'IO_rate': 4E+6, 'Amplifier_Gain': 9},
verbose=self._verbose)

Expand All @@ -961,7 +961,7 @@ def _read_secondary_channel(self, h5_meas_group, aux_file_path):
spectral_len = spectral_len // 2

# calculate the # positions that can be stored in memory in one go.
b_per_position = np.float32(0).itemsize * spectral_len
b_per_position = np.dtype('float32').itemsize * spectral_len

max_pos_per_read = int(np.floor((get_available_memory()) / b_per_position))

Expand Down Expand Up @@ -1565,7 +1565,7 @@ def translate_val(target, strvals, numvals):
'wave_type', 'wave_mod', 'in-field',
'out-of-field']
UD_VS_table_unit = ['', 'V', 'A', '', '', 'V', 'V']
udvs_table = np.zeros(shape=(num_VS_steps, 7), dtype=np.float32)
udvs_table = np.zeros(shape=(num_VS_steps, 7), dtype=float)

udvs_table[:, 0] = np.arange(0, num_VS_steps) # Python base 0
udvs_table[:, 1] = UD_dc_vec
Expand Down Expand Up @@ -1606,7 +1606,7 @@ def translate_val(target, strvals, numvals):
UD_dc_vec = VS_offset * np.ones(num_VS_steps)
UD_VS_table_label = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'forward', 'reverse']
UD_VS_table_unit = ['', 'V', 'A', '', '', 'A', 'A']
udvs_table = np.zeros(shape=(num_VS_steps, 7), dtype=np.float32)
udvs_table = np.zeros(shape=(num_VS_steps, 7), dtype=float)
udvs_table[:, 0] = np.arange(1, num_VS_steps + 1)
udvs_table[:, 1] = UD_dc_vec
udvs_table[:, 2] = vs_amp_vec
Expand Down
16 changes: 8 additions & 8 deletions BGlib/be/translators/be_odf_relaxation.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,13 +115,13 @@ def translate(self, file_path, show_plots=True, save_plots=True, do_histogram=Fa
band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
bin_freqs = np.linspace(st_f, en_f, len(bin_inds), dtype=np.float32)
bin_freqs = np.linspace(st_f, en_f, len(bin_inds), dtype=float)

# Forcing standardized datatypes:
bin_inds = np.int32(bin_inds)
bin_freqs = np.float32(bin_freqs)
bin_freqs = float(bin_freqs)
bin_FFT = np.complex64(bin_FFT)
ex_wfm = np.float32(ex_wfm)
ex_wfm = float(ex_wfm)

self.FFT_BE_wave = bin_FFT

Expand Down Expand Up @@ -188,7 +188,7 @@ def translate(self, file_path, show_plots=True, save_plots=True, do_histogram=Fa
udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))
h5_UDVS = chan_grp.create_dataset('UDVS',
data=UDVS_mat,
dtype=np.float32)
dtype=float)
write_simple_attrs(h5_UDVS, {'labels': UDVS_labs, 'units': UDVS_units})

h5_bin_steps = chan_grp.create_dataset('Bin_Steps',
Expand All @@ -206,7 +206,7 @@ def translate(self, file_path, show_plots=True, save_plots=True, do_histogram=Fa
dtype=np.uint32)
h5_bin_freq = chan_grp.create_dataset('Bin_Frequencies',
data=bin_freqs,
dtype=np.float32)
dtype=float)
h5_bin_FFT = chan_grp.create_dataset('Bin_FFT',
data=bin_FFT,
dtype=np.complex64)
Expand Down Expand Up @@ -262,8 +262,8 @@ def translate(self, file_path, show_plots=True, save_plots=True, do_histogram=Fa
compression='gzip')

self.mean_resp = np.zeros(shape=(self.ds_main.shape[1]), dtype=np.complex64)
self.max_resp = np.zeros(shape=(self.ds_main.shape[0]), dtype=np.float32)
self.min_resp = np.zeros(shape=(self.ds_main.shape[0]), dtype=np.float32)
self.max_resp = np.zeros(shape=(self.ds_main.shape[0]), dtype=float)
self.min_resp = np.zeros(shape=(self.ds_main.shape[0]), dtype=float)

# Now read the raw data files:
self._read_data(path_dict['read_real'], path_dict['read_imag'], parm_dict)
Expand Down Expand Up @@ -594,7 +594,7 @@ def translateVal(target, strvals, numvals):
num_VS_steps = total_steps * 2 # To account for IF and OOF

UD_VS_table_label = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'in-field', 'out-of-field']
UD_VS_table = np.zeros(shape=(num_VS_steps, 7), dtype=np.float32)
UD_VS_table = np.zeros(shape=(num_VS_steps, 7), dtype=float)
UD_VS_table_unit = ['', 'V', 'A', '', '', 'V', 'V']

UD_VS_table[:, 0] = np.arange(0, num_VS_steps) # Python base 0
Expand Down
2 changes: 1 addition & 1 deletion BGlib/be/translators/beps_data_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -564,7 +564,7 @@ def _calc_sho(self, coef_OF_mat, coef_IF_mat, amp_noise=0.1, phase_noise=0.1, q_
sho_if_inds = sho_field == 1

# determine how many pixels can be read at once
mem_per_pix = vdc_vec.size * np.float32(0).itemsize
mem_per_pix = vdc_vec.size * float(0).itemsize
#free_mem = self.max_ram - vdc_vec.size * vdc_vec.dtype.itemsize * 6
free_mem = 1024
batch_size = int(free_mem / mem_per_pix)
Expand Down
Loading

0 comments on commit 711ed76

Please sign in to comment.