From d3e414255f0485c173b82159865d360a0460db6a Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Mon, 31 Mar 2025 11:20:04 -0400 Subject: [PATCH 01/19] Remapped package files --- hexrd/{ => core}/config/__init__.py | 0 hexrd/{ => core}/config/beam.py | 0 hexrd/{ => core}/config/config.py | 0 hexrd/{ => core}/config/dumper.py | 0 hexrd/{ => core}/config/imageseries.py | 0 hexrd/{ => core}/config/instrument.py | 0 hexrd/{ => core}/config/loader.py | 0 hexrd/{ => core}/config/material.py | 0 hexrd/{ => core}/config/root.py | 0 hexrd/{ => core}/config/utils.py | 0 hexrd/{ => core}/constants.py | 0 hexrd/{ => core}/convolution/__init__.py | 0 hexrd/{ => core}/convolution/convolve.py | 0 hexrd/{ => core}/convolution/src/convolve.c | 0 hexrd/{ => core}/convolution/src/convolve.h | 0 hexrd/{ => core}/convolution/utils.py | 0 hexrd/{ => core}/deprecation.py | 0 hexrd/{ => core}/distortion/__init__.py | 0 hexrd/{ => core}/distortion/dexela_2923.py | 0 .../{ => core}/distortion/dexela_2923_quad.py | 0 hexrd/{ => core}/distortion/distortionabc.py | 0 hexrd/{ => core}/distortion/ge_41rt.py | 0 hexrd/{ => core}/distortion/identity.py | 0 hexrd/{ => core}/distortion/nyi.py | 0 hexrd/{ => core}/distortion/registry.py | 0 hexrd/{ => core}/distortion/utils.py | 0 hexrd/{ => core}/extensions/__init__.py | 0 hexrd/{ => core}/fitting/__init__.py | 0 .../fitting/calibration/__init__.py | 0 .../fitting/calibration/abstract_grain.py | 0 hexrd/{ => core}/fitting/calibration/grain.py | 0 .../calibration/relative_constraints.py | 0 hexrd/{ => core}/fitting/fitpeak.py | 0 hexrd/{ => core}/fitting/peakfunctions.py | 0 hexrd/{ => core}/fitting/spectrum.py | 0 hexrd/{ => core}/fitting/utils.py | 0 hexrd/{ => core}/gridutil.py | 0 hexrd/{ => core}/imageseries/__init__.py | 0 hexrd/{ => core}/imageseries/baseclass.py | 0 .../{ => core}/imageseries/imageseriesabc.py | 0 .../{ => core}/imageseries/imageseriesiter.py | 0 hexrd/{ => core}/imageseries/load/__init__.py | 0 hexrd/{ => core}/imageseries/load/array.py | 0 .../imageseries/load/eiger_stream_v1.py | 0 .../{ => core}/imageseries/load/framecache.py | 0 hexrd/{ => core}/imageseries/load/function.py | 0 hexrd/{ => core}/imageseries/load/hdf5.py | 0 .../{ => core}/imageseries/load/imagefiles.py | 0 hexrd/{ => core}/imageseries/load/metadata.py | 0 hexrd/{ => core}/imageseries/load/rawimage.py | 0 hexrd/{ => core}/imageseries/load/registry.py | 0 hexrd/{ => core}/imageseries/load/trivial.py | 0 hexrd/{ => core}/imageseries/omega.py | 0 hexrd/{ => core}/imageseries/process.py | 0 hexrd/{ => core}/imageseries/save.py | 0 hexrd/{ => core}/imageseries/stats.py | 0 hexrd/{ => core}/imageutil.py | 0 hexrd/{ => core}/instrument/__init__.py | 0 hexrd/{ => core}/instrument/constants.py | 0 .../instrument/cylindrical_detector.py | 0 hexrd/{ => core}/instrument/detector.py | 0 .../instrument/detector_coatings.py | 0 .../{ => core}/instrument/hedm_instrument.py | 0 .../{ => core}/instrument/physics_package.py | 0 .../{ => core}/instrument/planar_detector.py | 0 hexrd/{ => core}/material/__init__.py | 0 hexrd/{ => core}/material/crystallography.py | 0 hexrd/{ => core}/material/jcpds.py | 0 hexrd/{ => core}/material/material.py | 0 hexrd/{ => core}/material/mksupport.py | 0 hexrd/{ => core}/material/spacegroup.py | 0 hexrd/{ => core}/material/symbols.py | 0 hexrd/{ => core}/material/symmetry.py | 0 hexrd/{ => core}/material/unitcell.py | 0 hexrd/{ => core}/material/utils.py | 0 hexrd/{ => core}/matrixutil.py | 0 .../projections}/__init__.py | 0 hexrd/{ => core}/projections/polar.py | 0 hexrd/{ => core}/projections/spherical.py | 0 hexrd/{ => core}/resources/Anomalous.h5 | Bin .../resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt | 0 .../BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt | 0 .../resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt | 0 .../resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt | 0 .../resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt | 0 .../resources/FIDDLE_IMAGE-PLATE-1_bnd.txt | 0 .../resources/PXRDIP_IMAGE-PLATE-B_bnd.txt | 0 .../resources/PXRDIP_IMAGE-PLATE-D_bnd.txt | 0 .../resources/PXRDIP_IMAGE-PLATE-L_bnd.txt | 0 .../resources/PXRDIP_IMAGE-PLATE-R_bnd.txt | 0 .../resources/PXRDIP_IMAGE-PLATE-U_bnd.txt | 0 .../resources/TARDIS_IMAGE-PLATE-2_bnd.txt | 0 .../resources/TARDIS_IMAGE-PLATE-3_bnd.txt | 0 .../TARDIS_IMAGE-PLATE-3_bnd_cropped.txt | 0 .../resources/TARDIS_IMAGE-PLATE-4_bnd.txt | 0 .../resources}/__init__.py | 0 .../resources/characteristic_xray_energies.h5 | Bin .../detector_templates/GE-detector.yml | 0 .../detector_templates/Hydra_Feb19.yml | 0 .../Pilatus3X_2M-detector.yml | 0 .../detector_templates/Pixirad2-detector.yml | 0 .../Varex_4343CT-detector.yml | 0 .../resources/detector_templates}/__init__.py | 0 .../dexela-2923-detector-subpanel.yml | 0 .../dexela-2923-detector.yml | 0 .../resources/fiddle_reference_config.yml | 0 .../instrument_templates}/__init__.py | 0 .../resources/instrument_templates/dcs.yml | 0 .../instrument_templates/dual_dexelas.yml | 0 .../instrument_templates/rigaku.hexrd | Bin .../resources/instrument_templates/varex.yml | 0 hexrd/{ => core}/resources/mu_en.h5 | Bin .../{ => core}/resources/pinhole_materials.h5 | Bin .../resources/pxrdip_reference_config.yml | 0 .../{ => core}/resources/surface_harmonics.h5 | 0 .../tardis_2xrs_reference_config.yml | 0 .../resources/tardis_reference_config.yml | 0 .../{ => core}/resources/window_materials.h5 | Bin hexrd/{ => core}/rotations.py | 0 hexrd/{ => core}/transforms/Makefile | 0 hexrd/{ => core}/transforms/__init__.py | 0 .../transforms/cpp_sublibrary/Makefile | 0 .../cpp_sublibrary/src/inverse_distortion.cpp | 0 .../cpp_sublibrary/src/transforms.cpp | 0 hexrd/{ => core}/transforms/debug_helpers.h | 0 .../{ => core}/transforms/new_capi/README.md | 0 .../transforms/new_capi/angles_to_dvec.c | 0 .../transforms/new_capi/angles_to_gvec.c | 0 .../transforms/new_capi/gvec_to_xy.c | 0 .../transforms/new_capi/make_beam_rmat.c | 0 .../transforms/new_capi/make_binary_rmat.c | 0 .../transforms/new_capi/make_detector_rmat.c | 0 .../transforms/new_capi/make_rmat_of_expmap.c | 0 .../transforms/new_capi/make_sample_rmat.c | 0 hexrd/{ => core}/transforms/new_capi/module.c | 0 .../transforms/new_capi/ndargs_helper.c | 0 .../transforms/new_capi/ndargs_helper.h | 0 .../{ => core}/transforms/new_capi/new_func.c | 0 .../new_capi/oscill_angles_of_HKLs.c | 0 .../transforms/new_capi/quat_distance.c | 0 .../transforms/new_capi/reference.py | 0 .../new_capi/rotate_vecs_about_axis.c | 0 .../new_capi/transforms_prototypes.h | 0 .../transforms/new_capi/transforms_types.h | 0 .../transforms/new_capi/transforms_utils.h | 0 .../transforms/new_capi/unit_row_vector.c | 0 .../new_capi/validate_angle_ranges.c | 0 .../transforms/new_capi/xf_new_capi.py | 0 .../transforms/new_capi/xy_to_gvec.c | 0 hexrd/{ => core}/transforms/old_xfcapi.py | 0 hexrd/{ => core}/transforms/stdbool.h | 0 hexrd/{ => core}/transforms/transforms_CAPI.c | 0 hexrd/{ => core}/transforms/transforms_CAPI.h | 0 .../{ => core}/transforms/transforms_CFUNC.c | 0 .../{ => core}/transforms/transforms_CFUNC.h | 0 hexrd/{ => core}/transforms/xf.py | 0 hexrd/{ => core}/transforms/xfcapi.py | 0 hexrd/{ => core}/utils/__init__.py | 0 hexrd/{ => core}/utils/compatibility.py | 0 hexrd/{ => core}/utils/concurrent.py | 0 hexrd/{ => core}/utils/decorators.py | 0 hexrd/{ => core}/utils/hdf5.py | 0 hexrd/{ => core}/utils/hkl.py | 0 hexrd/{ => core}/utils/json.py | 0 .../{ => core}/utils/multiprocess_generic.py | 0 hexrd/{ => core}/utils/panel_buffer.py | 0 hexrd/{ => core}/utils/profiler.py | 0 hexrd/{ => core}/utils/progressbar.py | 0 hexrd/{ => core}/utils/warnings.py | 0 hexrd/{ => core}/utils/yaml.py | 0 hexrd/{ => core}/valunits.py | 0 hexrd/hed/instrument/detector.py | 2086 +++++++++++++ hexrd/hed/instrument/hedm_instrument.py | 2747 +++++++++++++++++ hexrd/{ => hed}/xrdutil/phutil.py | 0 hexrd/{ => hed}/xrdutil/utils.py | 0 hexrd/{ => hedm}/cli/__init__.py | 0 hexrd/{ => hedm}/cli/documentation.py | 0 hexrd/{ => hedm}/cli/find_orientations.py | 0 hexrd/{ => hedm}/cli/fit_grains.py | 0 hexrd/{ => hedm}/cli/help.py | 0 hexrd/{ => hedm}/cli/main.py | 0 hexrd/{ => hedm}/cli/pickle23.py | 0 hexrd/{ => hedm}/cli/preprocess.py | 0 hexrd/{ => hedm}/cli/test.py | 0 hexrd/hedm/config/__init__.py | 50 + hexrd/hedm/config/dumper.py | 62 + hexrd/{ => hedm}/config/findorientations.py | 0 hexrd/{ => hedm}/config/fitgrains.py | 0 hexrd/hedm/config/instrument.py | 63 + hexrd/hedm/config/loader.py | 25 + hexrd/hedm/config/root.py | 203 ++ hexrd/hedm/config/utils.py | 73 + hexrd/{ => hedm}/findorientations.py | 0 hexrd/{ => hedm}/fitgrains.py | 0 hexrd/{ => hedm}/fitting/grains.py | 0 hexrd/{ => hedm}/grainmap/__init__.py | 0 hexrd/{ => hedm}/grainmap/nfutil.py | 0 hexrd/{ => hedm}/grainmap/tomoutil.py | 0 hexrd/{ => hedm}/grainmap/vtkutil.py | 0 hexrd/{ => hedm}/indexer.py | 0 hexrd/hedm/instrument/detector.py | 2086 +++++++++++++ hexrd/hedm/instrument/hedm_instrument.py | 2747 +++++++++++++++++ hexrd/hedm/instrument/physics_package.py | 295 ++ .../ipfcolor}/__init__.py | 0 hexrd/{ => hedm}/ipfcolor/colorspace.py | 0 hexrd/{ => hedm}/ipfcolor/sphere_sector.py | 0 hexrd/hedm/material/crystallography.py | 2255 ++++++++++++++ hexrd/hedm/material/unitcell.py | 1989 ++++++++++++ .../preprocess}/__init__.py | 0 .../preprocess/argument_classes_factory.py | 0 hexrd/{ => hedm}/preprocess/preprocessors.py | 0 hexrd/{ => hedm}/preprocess/profiles.py | 0 hexrd/{ => hedm}/preprocess/yaml_internals.py | 0 .../{ => hedm}/sampleOrientations/__init__.py | 0 .../sampleOrientations/conversions.py | 0 hexrd/{ => hedm}/sampleOrientations/rfz.py | 0 .../sampleOrientations/sampleRFZ.py | 0 hexrd/{ => hedm}/xrdutil/__init__.py | 0 hexrd/hedm/xrdutil/utils.py | 1516 +++++++++ hexrd/{ => laue}/fitting/calibration/laue.py | 0 hexrd/laue/instrument/detector.py | 2086 +++++++++++++ hexrd/laue/instrument/hedm_instrument.py | 2747 +++++++++++++++++ hexrd/laue/material/crystallography.py | 2255 ++++++++++++++ hexrd/laue/xrdutil/utils.py | 1516 +++++++++ .../fitting/calibration/calibrator.py | 0 .../fitting/calibration/instrument.py | 0 .../calibration/lmfit_param_handling.py | 0 .../fitting/calibration/powder.py | 0 .../fitting/calibration/structureless.py | 0 hexrd/powder/instrument/detector.py | 2086 +++++++++++++ hexrd/powder/instrument/hedm_instrument.py | 2747 +++++++++++++++++ hexrd/powder/material/crystallography.py | 2255 ++++++++++++++ hexrd/{ => powder}/wppf/LeBailCalibration.py | 0 hexrd/{ => powder}/wppf/RietveldHEDM.py | 0 hexrd/{ => powder}/wppf/WPPF.py | 0 hexrd/{ => powder}/wppf/__init__.py | 0 hexrd/{ => powder}/wppf/derivatives.py | 0 hexrd/{ => powder}/wppf/parameters.py | 0 hexrd/{ => powder}/wppf/peakfunctions.py | 0 hexrd/{ => powder}/wppf/phase.py | 0 hexrd/{ => powder}/wppf/spectrum.py | 0 hexrd/{ => powder}/wppf/texture.py | 0 hexrd/{ => powder}/wppf/wppfsupport.py | 0 hexrd/{ => powder}/wppf/xtal.py | 0 244 files changed, 31889 insertions(+) rename hexrd/{ => core}/config/__init__.py (100%) rename hexrd/{ => core}/config/beam.py (100%) rename hexrd/{ => core}/config/config.py (100%) rename hexrd/{ => core}/config/dumper.py (100%) rename hexrd/{ => core}/config/imageseries.py (100%) rename hexrd/{ => core}/config/instrument.py (100%) rename hexrd/{ => core}/config/loader.py (100%) rename hexrd/{ => core}/config/material.py (100%) rename hexrd/{ => core}/config/root.py (100%) rename hexrd/{ => core}/config/utils.py (100%) rename hexrd/{ => core}/constants.py (100%) rename hexrd/{ => core}/convolution/__init__.py (100%) rename hexrd/{ => core}/convolution/convolve.py (100%) rename hexrd/{ => core}/convolution/src/convolve.c (100%) rename hexrd/{ => core}/convolution/src/convolve.h (100%) rename hexrd/{ => core}/convolution/utils.py (100%) rename hexrd/{ => core}/deprecation.py (100%) rename hexrd/{ => core}/distortion/__init__.py (100%) rename hexrd/{ => core}/distortion/dexela_2923.py (100%) rename hexrd/{ => core}/distortion/dexela_2923_quad.py (100%) rename hexrd/{ => core}/distortion/distortionabc.py (100%) rename hexrd/{ => core}/distortion/ge_41rt.py (100%) rename hexrd/{ => core}/distortion/identity.py (100%) rename hexrd/{ => core}/distortion/nyi.py (100%) rename hexrd/{ => core}/distortion/registry.py (100%) rename hexrd/{ => core}/distortion/utils.py (100%) mode change 100755 => 100644 rename hexrd/{ => core}/extensions/__init__.py (100%) rename hexrd/{ => core}/fitting/__init__.py (100%) rename hexrd/{ => core}/fitting/calibration/__init__.py (100%) rename hexrd/{ => core}/fitting/calibration/abstract_grain.py (100%) rename hexrd/{ => core}/fitting/calibration/grain.py (100%) rename hexrd/{ => core}/fitting/calibration/relative_constraints.py (100%) rename hexrd/{ => core}/fitting/fitpeak.py (100%) rename hexrd/{ => core}/fitting/peakfunctions.py (100%) rename hexrd/{ => core}/fitting/spectrum.py (100%) rename hexrd/{ => core}/fitting/utils.py (100%) rename hexrd/{ => core}/gridutil.py (100%) rename hexrd/{ => core}/imageseries/__init__.py (100%) rename hexrd/{ => core}/imageseries/baseclass.py (100%) rename hexrd/{ => core}/imageseries/imageseriesabc.py (100%) rename hexrd/{ => core}/imageseries/imageseriesiter.py (100%) rename hexrd/{ => core}/imageseries/load/__init__.py (100%) rename hexrd/{ => core}/imageseries/load/array.py (100%) rename hexrd/{ => core}/imageseries/load/eiger_stream_v1.py (100%) rename hexrd/{ => core}/imageseries/load/framecache.py (100%) rename hexrd/{ => core}/imageseries/load/function.py (100%) rename hexrd/{ => core}/imageseries/load/hdf5.py (100%) rename hexrd/{ => core}/imageseries/load/imagefiles.py (100%) rename hexrd/{ => core}/imageseries/load/metadata.py (100%) rename hexrd/{ => core}/imageseries/load/rawimage.py (100%) rename hexrd/{ => core}/imageseries/load/registry.py (100%) rename hexrd/{ => core}/imageseries/load/trivial.py (100%) rename hexrd/{ => core}/imageseries/omega.py (100%) rename hexrd/{ => core}/imageseries/process.py (100%) rename hexrd/{ => core}/imageseries/save.py (100%) rename hexrd/{ => core}/imageseries/stats.py (100%) rename hexrd/{ => core}/imageutil.py (100%) rename hexrd/{ => core}/instrument/__init__.py (100%) rename hexrd/{ => core}/instrument/constants.py (100%) rename hexrd/{ => core}/instrument/cylindrical_detector.py (100%) rename hexrd/{ => core}/instrument/detector.py (100%) rename hexrd/{ => core}/instrument/detector_coatings.py (100%) rename hexrd/{ => core}/instrument/hedm_instrument.py (100%) rename hexrd/{ => core}/instrument/physics_package.py (100%) rename hexrd/{ => core}/instrument/planar_detector.py (100%) rename hexrd/{ => core}/material/__init__.py (100%) rename hexrd/{ => core}/material/crystallography.py (100%) rename hexrd/{ => core}/material/jcpds.py (100%) rename hexrd/{ => core}/material/material.py (100%) rename hexrd/{ => core}/material/mksupport.py (100%) rename hexrd/{ => core}/material/spacegroup.py (100%) rename hexrd/{ => core}/material/symbols.py (100%) rename hexrd/{ => core}/material/symmetry.py (100%) rename hexrd/{ => core}/material/unitcell.py (100%) rename hexrd/{ => core}/material/utils.py (100%) rename hexrd/{ => core}/matrixutil.py (100%) rename hexrd/{ipfcolor => core/projections}/__init__.py (100%) rename hexrd/{ => core}/projections/polar.py (100%) rename hexrd/{ => core}/projections/spherical.py (100%) rename hexrd/{ => core}/resources/Anomalous.h5 (100%) rename hexrd/{ => core}/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt (100%) rename hexrd/{ => core}/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt (100%) rename hexrd/{ => core}/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt (100%) rename hexrd/{ => core}/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt (100%) rename hexrd/{ => core}/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt (100%) rename hexrd/{ => core}/resources/FIDDLE_IMAGE-PLATE-1_bnd.txt (100%) rename hexrd/{ => core}/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt (100%) rename hexrd/{ => core}/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt (100%) rename hexrd/{ => core}/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt (100%) rename hexrd/{ => core}/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt (100%) rename hexrd/{ => core}/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt (100%) rename hexrd/{ => core}/resources/TARDIS_IMAGE-PLATE-2_bnd.txt (100%) rename hexrd/{ => core}/resources/TARDIS_IMAGE-PLATE-3_bnd.txt (100%) rename hexrd/{ => core}/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt (100%) rename hexrd/{ => core}/resources/TARDIS_IMAGE-PLATE-4_bnd.txt (100%) rename hexrd/{preprocess => core/resources}/__init__.py (100%) rename hexrd/{ => core}/resources/characteristic_xray_energies.h5 (100%) rename hexrd/{ => core}/resources/detector_templates/GE-detector.yml (100%) rename hexrd/{ => core}/resources/detector_templates/Hydra_Feb19.yml (100%) rename hexrd/{ => core}/resources/detector_templates/Pilatus3X_2M-detector.yml (100%) rename hexrd/{ => core}/resources/detector_templates/Pixirad2-detector.yml (100%) rename hexrd/{ => core}/resources/detector_templates/Varex_4343CT-detector.yml (100%) rename hexrd/{projections => core/resources/detector_templates}/__init__.py (100%) rename hexrd/{ => core}/resources/detector_templates/dexela-2923-detector-subpanel.yml (100%) rename hexrd/{ => core}/resources/detector_templates/dexela-2923-detector.yml (100%) rename hexrd/{ => core}/resources/fiddle_reference_config.yml (100%) rename hexrd/{resources => core/resources/instrument_templates}/__init__.py (100%) rename hexrd/{ => core}/resources/instrument_templates/dcs.yml (100%) rename hexrd/{ => core}/resources/instrument_templates/dual_dexelas.yml (100%) rename hexrd/{ => core}/resources/instrument_templates/rigaku.hexrd (100%) rename hexrd/{ => core}/resources/instrument_templates/varex.yml (100%) rename hexrd/{ => core}/resources/mu_en.h5 (100%) rename hexrd/{ => core}/resources/pinhole_materials.h5 (100%) rename hexrd/{ => core}/resources/pxrdip_reference_config.yml (100%) mode change 100755 => 100644 rename hexrd/{ => core}/resources/surface_harmonics.h5 (100%) rename hexrd/{ => core}/resources/tardis_2xrs_reference_config.yml (100%) rename hexrd/{ => core}/resources/tardis_reference_config.yml (100%) rename hexrd/{ => core}/resources/window_materials.h5 (100%) rename hexrd/{ => core}/rotations.py (100%) rename hexrd/{ => core}/transforms/Makefile (100%) rename hexrd/{ => core}/transforms/__init__.py (100%) rename hexrd/{ => core}/transforms/cpp_sublibrary/Makefile (100%) rename hexrd/{ => core}/transforms/cpp_sublibrary/src/inverse_distortion.cpp (100%) rename hexrd/{ => core}/transforms/cpp_sublibrary/src/transforms.cpp (100%) rename hexrd/{ => core}/transforms/debug_helpers.h (100%) rename hexrd/{ => core}/transforms/new_capi/README.md (100%) rename hexrd/{ => core}/transforms/new_capi/angles_to_dvec.c (100%) rename hexrd/{ => core}/transforms/new_capi/angles_to_gvec.c (100%) rename hexrd/{ => core}/transforms/new_capi/gvec_to_xy.c (100%) rename hexrd/{ => core}/transforms/new_capi/make_beam_rmat.c (100%) rename hexrd/{ => core}/transforms/new_capi/make_binary_rmat.c (100%) rename hexrd/{ => core}/transforms/new_capi/make_detector_rmat.c (100%) rename hexrd/{ => core}/transforms/new_capi/make_rmat_of_expmap.c (100%) rename hexrd/{ => core}/transforms/new_capi/make_sample_rmat.c (100%) rename hexrd/{ => core}/transforms/new_capi/module.c (100%) rename hexrd/{ => core}/transforms/new_capi/ndargs_helper.c (100%) rename hexrd/{ => core}/transforms/new_capi/ndargs_helper.h (100%) rename hexrd/{ => core}/transforms/new_capi/new_func.c (100%) rename hexrd/{ => core}/transforms/new_capi/oscill_angles_of_HKLs.c (100%) rename hexrd/{ => core}/transforms/new_capi/quat_distance.c (100%) rename hexrd/{ => core}/transforms/new_capi/reference.py (100%) rename hexrd/{ => core}/transforms/new_capi/rotate_vecs_about_axis.c (100%) rename hexrd/{ => core}/transforms/new_capi/transforms_prototypes.h (100%) rename hexrd/{ => core}/transforms/new_capi/transforms_types.h (100%) rename hexrd/{ => core}/transforms/new_capi/transforms_utils.h (100%) rename hexrd/{ => core}/transforms/new_capi/unit_row_vector.c (100%) rename hexrd/{ => core}/transforms/new_capi/validate_angle_ranges.c (100%) rename hexrd/{ => core}/transforms/new_capi/xf_new_capi.py (100%) rename hexrd/{ => core}/transforms/new_capi/xy_to_gvec.c (100%) rename hexrd/{ => core}/transforms/old_xfcapi.py (100%) rename hexrd/{ => core}/transforms/stdbool.h (100%) rename hexrd/{ => core}/transforms/transforms_CAPI.c (100%) rename hexrd/{ => core}/transforms/transforms_CAPI.h (100%) rename hexrd/{ => core}/transforms/transforms_CFUNC.c (100%) rename hexrd/{ => core}/transforms/transforms_CFUNC.h (100%) rename hexrd/{ => core}/transforms/xf.py (100%) rename hexrd/{ => core}/transforms/xfcapi.py (100%) rename hexrd/{ => core}/utils/__init__.py (100%) rename hexrd/{ => core}/utils/compatibility.py (100%) rename hexrd/{ => core}/utils/concurrent.py (100%) rename hexrd/{ => core}/utils/decorators.py (100%) rename hexrd/{ => core}/utils/hdf5.py (100%) rename hexrd/{ => core}/utils/hkl.py (100%) rename hexrd/{ => core}/utils/json.py (100%) rename hexrd/{ => core}/utils/multiprocess_generic.py (100%) rename hexrd/{ => core}/utils/panel_buffer.py (100%) rename hexrd/{ => core}/utils/profiler.py (100%) rename hexrd/{ => core}/utils/progressbar.py (100%) rename hexrd/{ => core}/utils/warnings.py (100%) rename hexrd/{ => core}/utils/yaml.py (100%) rename hexrd/{ => core}/valunits.py (100%) create mode 100644 hexrd/hed/instrument/detector.py create mode 100644 hexrd/hed/instrument/hedm_instrument.py rename hexrd/{ => hed}/xrdutil/phutil.py (100%) rename hexrd/{ => hed}/xrdutil/utils.py (100%) rename hexrd/{ => hedm}/cli/__init__.py (100%) rename hexrd/{ => hedm}/cli/documentation.py (100%) rename hexrd/{ => hedm}/cli/find_orientations.py (100%) rename hexrd/{ => hedm}/cli/fit_grains.py (100%) rename hexrd/{ => hedm}/cli/help.py (100%) rename hexrd/{ => hedm}/cli/main.py (100%) rename hexrd/{ => hedm}/cli/pickle23.py (100%) rename hexrd/{ => hedm}/cli/preprocess.py (100%) rename hexrd/{ => hedm}/cli/test.py (100%) create mode 100644 hexrd/hedm/config/__init__.py create mode 100644 hexrd/hedm/config/dumper.py rename hexrd/{ => hedm}/config/findorientations.py (100%) rename hexrd/{ => hedm}/config/fitgrains.py (100%) create mode 100644 hexrd/hedm/config/instrument.py create mode 100644 hexrd/hedm/config/loader.py create mode 100644 hexrd/hedm/config/root.py create mode 100644 hexrd/hedm/config/utils.py rename hexrd/{ => hedm}/findorientations.py (100%) mode change 100755 => 100644 rename hexrd/{ => hedm}/fitgrains.py (100%) rename hexrd/{ => hedm}/fitting/grains.py (100%) rename hexrd/{ => hedm}/grainmap/__init__.py (100%) rename hexrd/{ => hedm}/grainmap/nfutil.py (100%) rename hexrd/{ => hedm}/grainmap/tomoutil.py (100%) rename hexrd/{ => hedm}/grainmap/vtkutil.py (100%) rename hexrd/{ => hedm}/indexer.py (100%) create mode 100644 hexrd/hedm/instrument/detector.py create mode 100644 hexrd/hedm/instrument/hedm_instrument.py create mode 100644 hexrd/hedm/instrument/physics_package.py rename hexrd/{resources/detector_templates => hedm/ipfcolor}/__init__.py (100%) rename hexrd/{ => hedm}/ipfcolor/colorspace.py (100%) rename hexrd/{ => hedm}/ipfcolor/sphere_sector.py (100%) create mode 100644 hexrd/hedm/material/crystallography.py create mode 100644 hexrd/hedm/material/unitcell.py rename hexrd/{resources/instrument_templates => hedm/preprocess}/__init__.py (100%) rename hexrd/{ => hedm}/preprocess/argument_classes_factory.py (100%) rename hexrd/{ => hedm}/preprocess/preprocessors.py (100%) rename hexrd/{ => hedm}/preprocess/profiles.py (100%) rename hexrd/{ => hedm}/preprocess/yaml_internals.py (100%) rename hexrd/{ => hedm}/sampleOrientations/__init__.py (100%) rename hexrd/{ => hedm}/sampleOrientations/conversions.py (100%) rename hexrd/{ => hedm}/sampleOrientations/rfz.py (100%) rename hexrd/{ => hedm}/sampleOrientations/sampleRFZ.py (100%) rename hexrd/{ => hedm}/xrdutil/__init__.py (100%) create mode 100644 hexrd/hedm/xrdutil/utils.py rename hexrd/{ => laue}/fitting/calibration/laue.py (100%) create mode 100644 hexrd/laue/instrument/detector.py create mode 100644 hexrd/laue/instrument/hedm_instrument.py create mode 100644 hexrd/laue/material/crystallography.py create mode 100644 hexrd/laue/xrdutil/utils.py rename hexrd/{ => powder}/fitting/calibration/calibrator.py (100%) rename hexrd/{ => powder}/fitting/calibration/instrument.py (100%) rename hexrd/{ => powder}/fitting/calibration/lmfit_param_handling.py (100%) rename hexrd/{ => powder}/fitting/calibration/powder.py (100%) rename hexrd/{ => powder}/fitting/calibration/structureless.py (100%) create mode 100644 hexrd/powder/instrument/detector.py create mode 100644 hexrd/powder/instrument/hedm_instrument.py create mode 100644 hexrd/powder/material/crystallography.py rename hexrd/{ => powder}/wppf/LeBailCalibration.py (100%) rename hexrd/{ => powder}/wppf/RietveldHEDM.py (100%) rename hexrd/{ => powder}/wppf/WPPF.py (100%) rename hexrd/{ => powder}/wppf/__init__.py (100%) rename hexrd/{ => powder}/wppf/derivatives.py (100%) rename hexrd/{ => powder}/wppf/parameters.py (100%) rename hexrd/{ => powder}/wppf/peakfunctions.py (100%) rename hexrd/{ => powder}/wppf/phase.py (100%) rename hexrd/{ => powder}/wppf/spectrum.py (100%) rename hexrd/{ => powder}/wppf/texture.py (100%) rename hexrd/{ => powder}/wppf/wppfsupport.py (100%) rename hexrd/{ => powder}/wppf/xtal.py (100%) diff --git a/hexrd/config/__init__.py b/hexrd/core/config/__init__.py similarity index 100% rename from hexrd/config/__init__.py rename to hexrd/core/config/__init__.py diff --git a/hexrd/config/beam.py b/hexrd/core/config/beam.py similarity index 100% rename from hexrd/config/beam.py rename to hexrd/core/config/beam.py diff --git a/hexrd/config/config.py b/hexrd/core/config/config.py similarity index 100% rename from hexrd/config/config.py rename to hexrd/core/config/config.py diff --git a/hexrd/config/dumper.py b/hexrd/core/config/dumper.py similarity index 100% rename from hexrd/config/dumper.py rename to hexrd/core/config/dumper.py diff --git a/hexrd/config/imageseries.py b/hexrd/core/config/imageseries.py similarity index 100% rename from hexrd/config/imageseries.py rename to hexrd/core/config/imageseries.py diff --git a/hexrd/config/instrument.py b/hexrd/core/config/instrument.py similarity index 100% rename from hexrd/config/instrument.py rename to hexrd/core/config/instrument.py diff --git a/hexrd/config/loader.py b/hexrd/core/config/loader.py similarity index 100% rename from hexrd/config/loader.py rename to hexrd/core/config/loader.py diff --git a/hexrd/config/material.py b/hexrd/core/config/material.py similarity index 100% rename from hexrd/config/material.py rename to hexrd/core/config/material.py diff --git a/hexrd/config/root.py b/hexrd/core/config/root.py similarity index 100% rename from hexrd/config/root.py rename to hexrd/core/config/root.py diff --git a/hexrd/config/utils.py b/hexrd/core/config/utils.py similarity index 100% rename from hexrd/config/utils.py rename to hexrd/core/config/utils.py diff --git a/hexrd/constants.py b/hexrd/core/constants.py similarity index 100% rename from hexrd/constants.py rename to hexrd/core/constants.py diff --git a/hexrd/convolution/__init__.py b/hexrd/core/convolution/__init__.py similarity index 100% rename from hexrd/convolution/__init__.py rename to hexrd/core/convolution/__init__.py diff --git a/hexrd/convolution/convolve.py b/hexrd/core/convolution/convolve.py similarity index 100% rename from hexrd/convolution/convolve.py rename to hexrd/core/convolution/convolve.py diff --git a/hexrd/convolution/src/convolve.c b/hexrd/core/convolution/src/convolve.c similarity index 100% rename from hexrd/convolution/src/convolve.c rename to hexrd/core/convolution/src/convolve.c diff --git a/hexrd/convolution/src/convolve.h b/hexrd/core/convolution/src/convolve.h similarity index 100% rename from hexrd/convolution/src/convolve.h rename to hexrd/core/convolution/src/convolve.h diff --git a/hexrd/convolution/utils.py b/hexrd/core/convolution/utils.py similarity index 100% rename from hexrd/convolution/utils.py rename to hexrd/core/convolution/utils.py diff --git a/hexrd/deprecation.py b/hexrd/core/deprecation.py similarity index 100% rename from hexrd/deprecation.py rename to hexrd/core/deprecation.py diff --git a/hexrd/distortion/__init__.py b/hexrd/core/distortion/__init__.py similarity index 100% rename from hexrd/distortion/__init__.py rename to hexrd/core/distortion/__init__.py diff --git a/hexrd/distortion/dexela_2923.py b/hexrd/core/distortion/dexela_2923.py similarity index 100% rename from hexrd/distortion/dexela_2923.py rename to hexrd/core/distortion/dexela_2923.py diff --git a/hexrd/distortion/dexela_2923_quad.py b/hexrd/core/distortion/dexela_2923_quad.py similarity index 100% rename from hexrd/distortion/dexela_2923_quad.py rename to hexrd/core/distortion/dexela_2923_quad.py diff --git a/hexrd/distortion/distortionabc.py b/hexrd/core/distortion/distortionabc.py similarity index 100% rename from hexrd/distortion/distortionabc.py rename to hexrd/core/distortion/distortionabc.py diff --git a/hexrd/distortion/ge_41rt.py b/hexrd/core/distortion/ge_41rt.py similarity index 100% rename from hexrd/distortion/ge_41rt.py rename to hexrd/core/distortion/ge_41rt.py diff --git a/hexrd/distortion/identity.py b/hexrd/core/distortion/identity.py similarity index 100% rename from hexrd/distortion/identity.py rename to hexrd/core/distortion/identity.py diff --git a/hexrd/distortion/nyi.py b/hexrd/core/distortion/nyi.py similarity index 100% rename from hexrd/distortion/nyi.py rename to hexrd/core/distortion/nyi.py diff --git a/hexrd/distortion/registry.py b/hexrd/core/distortion/registry.py similarity index 100% rename from hexrd/distortion/registry.py rename to hexrd/core/distortion/registry.py diff --git a/hexrd/distortion/utils.py b/hexrd/core/distortion/utils.py old mode 100755 new mode 100644 similarity index 100% rename from hexrd/distortion/utils.py rename to hexrd/core/distortion/utils.py diff --git a/hexrd/extensions/__init__.py b/hexrd/core/extensions/__init__.py similarity index 100% rename from hexrd/extensions/__init__.py rename to hexrd/core/extensions/__init__.py diff --git a/hexrd/fitting/__init__.py b/hexrd/core/fitting/__init__.py similarity index 100% rename from hexrd/fitting/__init__.py rename to hexrd/core/fitting/__init__.py diff --git a/hexrd/fitting/calibration/__init__.py b/hexrd/core/fitting/calibration/__init__.py similarity index 100% rename from hexrd/fitting/calibration/__init__.py rename to hexrd/core/fitting/calibration/__init__.py diff --git a/hexrd/fitting/calibration/abstract_grain.py b/hexrd/core/fitting/calibration/abstract_grain.py similarity index 100% rename from hexrd/fitting/calibration/abstract_grain.py rename to hexrd/core/fitting/calibration/abstract_grain.py diff --git a/hexrd/fitting/calibration/grain.py b/hexrd/core/fitting/calibration/grain.py similarity index 100% rename from hexrd/fitting/calibration/grain.py rename to hexrd/core/fitting/calibration/grain.py diff --git a/hexrd/fitting/calibration/relative_constraints.py b/hexrd/core/fitting/calibration/relative_constraints.py similarity index 100% rename from hexrd/fitting/calibration/relative_constraints.py rename to hexrd/core/fitting/calibration/relative_constraints.py diff --git a/hexrd/fitting/fitpeak.py b/hexrd/core/fitting/fitpeak.py similarity index 100% rename from hexrd/fitting/fitpeak.py rename to hexrd/core/fitting/fitpeak.py diff --git a/hexrd/fitting/peakfunctions.py b/hexrd/core/fitting/peakfunctions.py similarity index 100% rename from hexrd/fitting/peakfunctions.py rename to hexrd/core/fitting/peakfunctions.py diff --git a/hexrd/fitting/spectrum.py b/hexrd/core/fitting/spectrum.py similarity index 100% rename from hexrd/fitting/spectrum.py rename to hexrd/core/fitting/spectrum.py diff --git a/hexrd/fitting/utils.py b/hexrd/core/fitting/utils.py similarity index 100% rename from hexrd/fitting/utils.py rename to hexrd/core/fitting/utils.py diff --git a/hexrd/gridutil.py b/hexrd/core/gridutil.py similarity index 100% rename from hexrd/gridutil.py rename to hexrd/core/gridutil.py diff --git a/hexrd/imageseries/__init__.py b/hexrd/core/imageseries/__init__.py similarity index 100% rename from hexrd/imageseries/__init__.py rename to hexrd/core/imageseries/__init__.py diff --git a/hexrd/imageseries/baseclass.py b/hexrd/core/imageseries/baseclass.py similarity index 100% rename from hexrd/imageseries/baseclass.py rename to hexrd/core/imageseries/baseclass.py diff --git a/hexrd/imageseries/imageseriesabc.py b/hexrd/core/imageseries/imageseriesabc.py similarity index 100% rename from hexrd/imageseries/imageseriesabc.py rename to hexrd/core/imageseries/imageseriesabc.py diff --git a/hexrd/imageseries/imageseriesiter.py b/hexrd/core/imageseries/imageseriesiter.py similarity index 100% rename from hexrd/imageseries/imageseriesiter.py rename to hexrd/core/imageseries/imageseriesiter.py diff --git a/hexrd/imageseries/load/__init__.py b/hexrd/core/imageseries/load/__init__.py similarity index 100% rename from hexrd/imageseries/load/__init__.py rename to hexrd/core/imageseries/load/__init__.py diff --git a/hexrd/imageseries/load/array.py b/hexrd/core/imageseries/load/array.py similarity index 100% rename from hexrd/imageseries/load/array.py rename to hexrd/core/imageseries/load/array.py diff --git a/hexrd/imageseries/load/eiger_stream_v1.py b/hexrd/core/imageseries/load/eiger_stream_v1.py similarity index 100% rename from hexrd/imageseries/load/eiger_stream_v1.py rename to hexrd/core/imageseries/load/eiger_stream_v1.py diff --git a/hexrd/imageseries/load/framecache.py b/hexrd/core/imageseries/load/framecache.py similarity index 100% rename from hexrd/imageseries/load/framecache.py rename to hexrd/core/imageseries/load/framecache.py diff --git a/hexrd/imageseries/load/function.py b/hexrd/core/imageseries/load/function.py similarity index 100% rename from hexrd/imageseries/load/function.py rename to hexrd/core/imageseries/load/function.py diff --git a/hexrd/imageseries/load/hdf5.py b/hexrd/core/imageseries/load/hdf5.py similarity index 100% rename from hexrd/imageseries/load/hdf5.py rename to hexrd/core/imageseries/load/hdf5.py diff --git a/hexrd/imageseries/load/imagefiles.py b/hexrd/core/imageseries/load/imagefiles.py similarity index 100% rename from hexrd/imageseries/load/imagefiles.py rename to hexrd/core/imageseries/load/imagefiles.py diff --git a/hexrd/imageseries/load/metadata.py b/hexrd/core/imageseries/load/metadata.py similarity index 100% rename from hexrd/imageseries/load/metadata.py rename to hexrd/core/imageseries/load/metadata.py diff --git a/hexrd/imageseries/load/rawimage.py b/hexrd/core/imageseries/load/rawimage.py similarity index 100% rename from hexrd/imageseries/load/rawimage.py rename to hexrd/core/imageseries/load/rawimage.py diff --git a/hexrd/imageseries/load/registry.py b/hexrd/core/imageseries/load/registry.py similarity index 100% rename from hexrd/imageseries/load/registry.py rename to hexrd/core/imageseries/load/registry.py diff --git a/hexrd/imageseries/load/trivial.py b/hexrd/core/imageseries/load/trivial.py similarity index 100% rename from hexrd/imageseries/load/trivial.py rename to hexrd/core/imageseries/load/trivial.py diff --git a/hexrd/imageseries/omega.py b/hexrd/core/imageseries/omega.py similarity index 100% rename from hexrd/imageseries/omega.py rename to hexrd/core/imageseries/omega.py diff --git a/hexrd/imageseries/process.py b/hexrd/core/imageseries/process.py similarity index 100% rename from hexrd/imageseries/process.py rename to hexrd/core/imageseries/process.py diff --git a/hexrd/imageseries/save.py b/hexrd/core/imageseries/save.py similarity index 100% rename from hexrd/imageseries/save.py rename to hexrd/core/imageseries/save.py diff --git a/hexrd/imageseries/stats.py b/hexrd/core/imageseries/stats.py similarity index 100% rename from hexrd/imageseries/stats.py rename to hexrd/core/imageseries/stats.py diff --git a/hexrd/imageutil.py b/hexrd/core/imageutil.py similarity index 100% rename from hexrd/imageutil.py rename to hexrd/core/imageutil.py diff --git a/hexrd/instrument/__init__.py b/hexrd/core/instrument/__init__.py similarity index 100% rename from hexrd/instrument/__init__.py rename to hexrd/core/instrument/__init__.py diff --git a/hexrd/instrument/constants.py b/hexrd/core/instrument/constants.py similarity index 100% rename from hexrd/instrument/constants.py rename to hexrd/core/instrument/constants.py diff --git a/hexrd/instrument/cylindrical_detector.py b/hexrd/core/instrument/cylindrical_detector.py similarity index 100% rename from hexrd/instrument/cylindrical_detector.py rename to hexrd/core/instrument/cylindrical_detector.py diff --git a/hexrd/instrument/detector.py b/hexrd/core/instrument/detector.py similarity index 100% rename from hexrd/instrument/detector.py rename to hexrd/core/instrument/detector.py diff --git a/hexrd/instrument/detector_coatings.py b/hexrd/core/instrument/detector_coatings.py similarity index 100% rename from hexrd/instrument/detector_coatings.py rename to hexrd/core/instrument/detector_coatings.py diff --git a/hexrd/instrument/hedm_instrument.py b/hexrd/core/instrument/hedm_instrument.py similarity index 100% rename from hexrd/instrument/hedm_instrument.py rename to hexrd/core/instrument/hedm_instrument.py diff --git a/hexrd/instrument/physics_package.py b/hexrd/core/instrument/physics_package.py similarity index 100% rename from hexrd/instrument/physics_package.py rename to hexrd/core/instrument/physics_package.py diff --git a/hexrd/instrument/planar_detector.py b/hexrd/core/instrument/planar_detector.py similarity index 100% rename from hexrd/instrument/planar_detector.py rename to hexrd/core/instrument/planar_detector.py diff --git a/hexrd/material/__init__.py b/hexrd/core/material/__init__.py similarity index 100% rename from hexrd/material/__init__.py rename to hexrd/core/material/__init__.py diff --git a/hexrd/material/crystallography.py b/hexrd/core/material/crystallography.py similarity index 100% rename from hexrd/material/crystallography.py rename to hexrd/core/material/crystallography.py diff --git a/hexrd/material/jcpds.py b/hexrd/core/material/jcpds.py similarity index 100% rename from hexrd/material/jcpds.py rename to hexrd/core/material/jcpds.py diff --git a/hexrd/material/material.py b/hexrd/core/material/material.py similarity index 100% rename from hexrd/material/material.py rename to hexrd/core/material/material.py diff --git a/hexrd/material/mksupport.py b/hexrd/core/material/mksupport.py similarity index 100% rename from hexrd/material/mksupport.py rename to hexrd/core/material/mksupport.py diff --git a/hexrd/material/spacegroup.py b/hexrd/core/material/spacegroup.py similarity index 100% rename from hexrd/material/spacegroup.py rename to hexrd/core/material/spacegroup.py diff --git a/hexrd/material/symbols.py b/hexrd/core/material/symbols.py similarity index 100% rename from hexrd/material/symbols.py rename to hexrd/core/material/symbols.py diff --git a/hexrd/material/symmetry.py b/hexrd/core/material/symmetry.py similarity index 100% rename from hexrd/material/symmetry.py rename to hexrd/core/material/symmetry.py diff --git a/hexrd/material/unitcell.py b/hexrd/core/material/unitcell.py similarity index 100% rename from hexrd/material/unitcell.py rename to hexrd/core/material/unitcell.py diff --git a/hexrd/material/utils.py b/hexrd/core/material/utils.py similarity index 100% rename from hexrd/material/utils.py rename to hexrd/core/material/utils.py diff --git a/hexrd/matrixutil.py b/hexrd/core/matrixutil.py similarity index 100% rename from hexrd/matrixutil.py rename to hexrd/core/matrixutil.py diff --git a/hexrd/ipfcolor/__init__.py b/hexrd/core/projections/__init__.py similarity index 100% rename from hexrd/ipfcolor/__init__.py rename to hexrd/core/projections/__init__.py diff --git a/hexrd/projections/polar.py b/hexrd/core/projections/polar.py similarity index 100% rename from hexrd/projections/polar.py rename to hexrd/core/projections/polar.py diff --git a/hexrd/projections/spherical.py b/hexrd/core/projections/spherical.py similarity index 100% rename from hexrd/projections/spherical.py rename to hexrd/core/projections/spherical.py diff --git a/hexrd/resources/Anomalous.h5 b/hexrd/core/resources/Anomalous.h5 similarity index 100% rename from hexrd/resources/Anomalous.h5 rename to hexrd/core/resources/Anomalous.h5 diff --git a/hexrd/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt b/hexrd/core/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt similarity index 100% rename from hexrd/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt rename to hexrd/core/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt diff --git a/hexrd/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt b/hexrd/core/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt similarity index 100% rename from hexrd/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt rename to hexrd/core/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt diff --git a/hexrd/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt b/hexrd/core/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt similarity index 100% rename from hexrd/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt rename to hexrd/core/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt diff --git a/hexrd/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt b/hexrd/core/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt similarity index 100% rename from hexrd/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt rename to hexrd/core/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt diff --git a/hexrd/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt b/hexrd/core/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt similarity index 100% rename from hexrd/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt rename to hexrd/core/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt diff --git a/hexrd/resources/FIDDLE_IMAGE-PLATE-1_bnd.txt b/hexrd/core/resources/FIDDLE_IMAGE-PLATE-1_bnd.txt similarity index 100% rename from hexrd/resources/FIDDLE_IMAGE-PLATE-1_bnd.txt rename to hexrd/core/resources/FIDDLE_IMAGE-PLATE-1_bnd.txt diff --git a/hexrd/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt b/hexrd/core/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt similarity index 100% rename from hexrd/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt rename to hexrd/core/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt diff --git a/hexrd/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt b/hexrd/core/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt similarity index 100% rename from hexrd/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt rename to hexrd/core/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt diff --git a/hexrd/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt b/hexrd/core/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt similarity index 100% rename from hexrd/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt rename to hexrd/core/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt diff --git a/hexrd/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt b/hexrd/core/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt similarity index 100% rename from hexrd/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt rename to hexrd/core/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt diff --git a/hexrd/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt b/hexrd/core/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt similarity index 100% rename from hexrd/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt rename to hexrd/core/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt diff --git a/hexrd/resources/TARDIS_IMAGE-PLATE-2_bnd.txt b/hexrd/core/resources/TARDIS_IMAGE-PLATE-2_bnd.txt similarity index 100% rename from hexrd/resources/TARDIS_IMAGE-PLATE-2_bnd.txt rename to hexrd/core/resources/TARDIS_IMAGE-PLATE-2_bnd.txt diff --git a/hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd.txt b/hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd.txt similarity index 100% rename from hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd.txt rename to hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd.txt diff --git a/hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt b/hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt similarity index 100% rename from hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt rename to hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt diff --git a/hexrd/resources/TARDIS_IMAGE-PLATE-4_bnd.txt b/hexrd/core/resources/TARDIS_IMAGE-PLATE-4_bnd.txt similarity index 100% rename from hexrd/resources/TARDIS_IMAGE-PLATE-4_bnd.txt rename to hexrd/core/resources/TARDIS_IMAGE-PLATE-4_bnd.txt diff --git a/hexrd/preprocess/__init__.py b/hexrd/core/resources/__init__.py similarity index 100% rename from hexrd/preprocess/__init__.py rename to hexrd/core/resources/__init__.py diff --git a/hexrd/resources/characteristic_xray_energies.h5 b/hexrd/core/resources/characteristic_xray_energies.h5 similarity index 100% rename from hexrd/resources/characteristic_xray_energies.h5 rename to hexrd/core/resources/characteristic_xray_energies.h5 diff --git a/hexrd/resources/detector_templates/GE-detector.yml b/hexrd/core/resources/detector_templates/GE-detector.yml similarity index 100% rename from hexrd/resources/detector_templates/GE-detector.yml rename to hexrd/core/resources/detector_templates/GE-detector.yml diff --git a/hexrd/resources/detector_templates/Hydra_Feb19.yml b/hexrd/core/resources/detector_templates/Hydra_Feb19.yml similarity index 100% rename from hexrd/resources/detector_templates/Hydra_Feb19.yml rename to hexrd/core/resources/detector_templates/Hydra_Feb19.yml diff --git a/hexrd/resources/detector_templates/Pilatus3X_2M-detector.yml b/hexrd/core/resources/detector_templates/Pilatus3X_2M-detector.yml similarity index 100% rename from hexrd/resources/detector_templates/Pilatus3X_2M-detector.yml rename to hexrd/core/resources/detector_templates/Pilatus3X_2M-detector.yml diff --git a/hexrd/resources/detector_templates/Pixirad2-detector.yml b/hexrd/core/resources/detector_templates/Pixirad2-detector.yml similarity index 100% rename from hexrd/resources/detector_templates/Pixirad2-detector.yml rename to hexrd/core/resources/detector_templates/Pixirad2-detector.yml diff --git a/hexrd/resources/detector_templates/Varex_4343CT-detector.yml b/hexrd/core/resources/detector_templates/Varex_4343CT-detector.yml similarity index 100% rename from hexrd/resources/detector_templates/Varex_4343CT-detector.yml rename to hexrd/core/resources/detector_templates/Varex_4343CT-detector.yml diff --git a/hexrd/projections/__init__.py b/hexrd/core/resources/detector_templates/__init__.py similarity index 100% rename from hexrd/projections/__init__.py rename to hexrd/core/resources/detector_templates/__init__.py diff --git a/hexrd/resources/detector_templates/dexela-2923-detector-subpanel.yml b/hexrd/core/resources/detector_templates/dexela-2923-detector-subpanel.yml similarity index 100% rename from hexrd/resources/detector_templates/dexela-2923-detector-subpanel.yml rename to hexrd/core/resources/detector_templates/dexela-2923-detector-subpanel.yml diff --git a/hexrd/resources/detector_templates/dexela-2923-detector.yml b/hexrd/core/resources/detector_templates/dexela-2923-detector.yml similarity index 100% rename from hexrd/resources/detector_templates/dexela-2923-detector.yml rename to hexrd/core/resources/detector_templates/dexela-2923-detector.yml diff --git a/hexrd/resources/fiddle_reference_config.yml b/hexrd/core/resources/fiddle_reference_config.yml similarity index 100% rename from hexrd/resources/fiddle_reference_config.yml rename to hexrd/core/resources/fiddle_reference_config.yml diff --git a/hexrd/resources/__init__.py b/hexrd/core/resources/instrument_templates/__init__.py similarity index 100% rename from hexrd/resources/__init__.py rename to hexrd/core/resources/instrument_templates/__init__.py diff --git a/hexrd/resources/instrument_templates/dcs.yml b/hexrd/core/resources/instrument_templates/dcs.yml similarity index 100% rename from hexrd/resources/instrument_templates/dcs.yml rename to hexrd/core/resources/instrument_templates/dcs.yml diff --git a/hexrd/resources/instrument_templates/dual_dexelas.yml b/hexrd/core/resources/instrument_templates/dual_dexelas.yml similarity index 100% rename from hexrd/resources/instrument_templates/dual_dexelas.yml rename to hexrd/core/resources/instrument_templates/dual_dexelas.yml diff --git a/hexrd/resources/instrument_templates/rigaku.hexrd b/hexrd/core/resources/instrument_templates/rigaku.hexrd similarity index 100% rename from hexrd/resources/instrument_templates/rigaku.hexrd rename to hexrd/core/resources/instrument_templates/rigaku.hexrd diff --git a/hexrd/resources/instrument_templates/varex.yml b/hexrd/core/resources/instrument_templates/varex.yml similarity index 100% rename from hexrd/resources/instrument_templates/varex.yml rename to hexrd/core/resources/instrument_templates/varex.yml diff --git a/hexrd/resources/mu_en.h5 b/hexrd/core/resources/mu_en.h5 similarity index 100% rename from hexrd/resources/mu_en.h5 rename to hexrd/core/resources/mu_en.h5 diff --git a/hexrd/resources/pinhole_materials.h5 b/hexrd/core/resources/pinhole_materials.h5 similarity index 100% rename from hexrd/resources/pinhole_materials.h5 rename to hexrd/core/resources/pinhole_materials.h5 diff --git a/hexrd/resources/pxrdip_reference_config.yml b/hexrd/core/resources/pxrdip_reference_config.yml old mode 100755 new mode 100644 similarity index 100% rename from hexrd/resources/pxrdip_reference_config.yml rename to hexrd/core/resources/pxrdip_reference_config.yml diff --git a/hexrd/resources/surface_harmonics.h5 b/hexrd/core/resources/surface_harmonics.h5 similarity index 100% rename from hexrd/resources/surface_harmonics.h5 rename to hexrd/core/resources/surface_harmonics.h5 diff --git a/hexrd/resources/tardis_2xrs_reference_config.yml b/hexrd/core/resources/tardis_2xrs_reference_config.yml similarity index 100% rename from hexrd/resources/tardis_2xrs_reference_config.yml rename to hexrd/core/resources/tardis_2xrs_reference_config.yml diff --git a/hexrd/resources/tardis_reference_config.yml b/hexrd/core/resources/tardis_reference_config.yml similarity index 100% rename from hexrd/resources/tardis_reference_config.yml rename to hexrd/core/resources/tardis_reference_config.yml diff --git a/hexrd/resources/window_materials.h5 b/hexrd/core/resources/window_materials.h5 similarity index 100% rename from hexrd/resources/window_materials.h5 rename to hexrd/core/resources/window_materials.h5 diff --git a/hexrd/rotations.py b/hexrd/core/rotations.py similarity index 100% rename from hexrd/rotations.py rename to hexrd/core/rotations.py diff --git a/hexrd/transforms/Makefile b/hexrd/core/transforms/Makefile similarity index 100% rename from hexrd/transforms/Makefile rename to hexrd/core/transforms/Makefile diff --git a/hexrd/transforms/__init__.py b/hexrd/core/transforms/__init__.py similarity index 100% rename from hexrd/transforms/__init__.py rename to hexrd/core/transforms/__init__.py diff --git a/hexrd/transforms/cpp_sublibrary/Makefile b/hexrd/core/transforms/cpp_sublibrary/Makefile similarity index 100% rename from hexrd/transforms/cpp_sublibrary/Makefile rename to hexrd/core/transforms/cpp_sublibrary/Makefile diff --git a/hexrd/transforms/cpp_sublibrary/src/inverse_distortion.cpp b/hexrd/core/transforms/cpp_sublibrary/src/inverse_distortion.cpp similarity index 100% rename from hexrd/transforms/cpp_sublibrary/src/inverse_distortion.cpp rename to hexrd/core/transforms/cpp_sublibrary/src/inverse_distortion.cpp diff --git a/hexrd/transforms/cpp_sublibrary/src/transforms.cpp b/hexrd/core/transforms/cpp_sublibrary/src/transforms.cpp similarity index 100% rename from hexrd/transforms/cpp_sublibrary/src/transforms.cpp rename to hexrd/core/transforms/cpp_sublibrary/src/transforms.cpp diff --git a/hexrd/transforms/debug_helpers.h b/hexrd/core/transforms/debug_helpers.h similarity index 100% rename from hexrd/transforms/debug_helpers.h rename to hexrd/core/transforms/debug_helpers.h diff --git a/hexrd/transforms/new_capi/README.md b/hexrd/core/transforms/new_capi/README.md similarity index 100% rename from hexrd/transforms/new_capi/README.md rename to hexrd/core/transforms/new_capi/README.md diff --git a/hexrd/transforms/new_capi/angles_to_dvec.c b/hexrd/core/transforms/new_capi/angles_to_dvec.c similarity index 100% rename from hexrd/transforms/new_capi/angles_to_dvec.c rename to hexrd/core/transforms/new_capi/angles_to_dvec.c diff --git a/hexrd/transforms/new_capi/angles_to_gvec.c b/hexrd/core/transforms/new_capi/angles_to_gvec.c similarity index 100% rename from hexrd/transforms/new_capi/angles_to_gvec.c rename to hexrd/core/transforms/new_capi/angles_to_gvec.c diff --git a/hexrd/transforms/new_capi/gvec_to_xy.c b/hexrd/core/transforms/new_capi/gvec_to_xy.c similarity index 100% rename from hexrd/transforms/new_capi/gvec_to_xy.c rename to hexrd/core/transforms/new_capi/gvec_to_xy.c diff --git a/hexrd/transforms/new_capi/make_beam_rmat.c b/hexrd/core/transforms/new_capi/make_beam_rmat.c similarity index 100% rename from hexrd/transforms/new_capi/make_beam_rmat.c rename to hexrd/core/transforms/new_capi/make_beam_rmat.c diff --git a/hexrd/transforms/new_capi/make_binary_rmat.c b/hexrd/core/transforms/new_capi/make_binary_rmat.c similarity index 100% rename from hexrd/transforms/new_capi/make_binary_rmat.c rename to hexrd/core/transforms/new_capi/make_binary_rmat.c diff --git a/hexrd/transforms/new_capi/make_detector_rmat.c b/hexrd/core/transforms/new_capi/make_detector_rmat.c similarity index 100% rename from hexrd/transforms/new_capi/make_detector_rmat.c rename to hexrd/core/transforms/new_capi/make_detector_rmat.c diff --git a/hexrd/transforms/new_capi/make_rmat_of_expmap.c b/hexrd/core/transforms/new_capi/make_rmat_of_expmap.c similarity index 100% rename from hexrd/transforms/new_capi/make_rmat_of_expmap.c rename to hexrd/core/transforms/new_capi/make_rmat_of_expmap.c diff --git a/hexrd/transforms/new_capi/make_sample_rmat.c b/hexrd/core/transforms/new_capi/make_sample_rmat.c similarity index 100% rename from hexrd/transforms/new_capi/make_sample_rmat.c rename to hexrd/core/transforms/new_capi/make_sample_rmat.c diff --git a/hexrd/transforms/new_capi/module.c b/hexrd/core/transforms/new_capi/module.c similarity index 100% rename from hexrd/transforms/new_capi/module.c rename to hexrd/core/transforms/new_capi/module.c diff --git a/hexrd/transforms/new_capi/ndargs_helper.c b/hexrd/core/transforms/new_capi/ndargs_helper.c similarity index 100% rename from hexrd/transforms/new_capi/ndargs_helper.c rename to hexrd/core/transforms/new_capi/ndargs_helper.c diff --git a/hexrd/transforms/new_capi/ndargs_helper.h b/hexrd/core/transforms/new_capi/ndargs_helper.h similarity index 100% rename from hexrd/transforms/new_capi/ndargs_helper.h rename to hexrd/core/transforms/new_capi/ndargs_helper.h diff --git a/hexrd/transforms/new_capi/new_func.c b/hexrd/core/transforms/new_capi/new_func.c similarity index 100% rename from hexrd/transforms/new_capi/new_func.c rename to hexrd/core/transforms/new_capi/new_func.c diff --git a/hexrd/transforms/new_capi/oscill_angles_of_HKLs.c b/hexrd/core/transforms/new_capi/oscill_angles_of_HKLs.c similarity index 100% rename from hexrd/transforms/new_capi/oscill_angles_of_HKLs.c rename to hexrd/core/transforms/new_capi/oscill_angles_of_HKLs.c diff --git a/hexrd/transforms/new_capi/quat_distance.c b/hexrd/core/transforms/new_capi/quat_distance.c similarity index 100% rename from hexrd/transforms/new_capi/quat_distance.c rename to hexrd/core/transforms/new_capi/quat_distance.c diff --git a/hexrd/transforms/new_capi/reference.py b/hexrd/core/transforms/new_capi/reference.py similarity index 100% rename from hexrd/transforms/new_capi/reference.py rename to hexrd/core/transforms/new_capi/reference.py diff --git a/hexrd/transforms/new_capi/rotate_vecs_about_axis.c b/hexrd/core/transforms/new_capi/rotate_vecs_about_axis.c similarity index 100% rename from hexrd/transforms/new_capi/rotate_vecs_about_axis.c rename to hexrd/core/transforms/new_capi/rotate_vecs_about_axis.c diff --git a/hexrd/transforms/new_capi/transforms_prototypes.h b/hexrd/core/transforms/new_capi/transforms_prototypes.h similarity index 100% rename from hexrd/transforms/new_capi/transforms_prototypes.h rename to hexrd/core/transforms/new_capi/transforms_prototypes.h diff --git a/hexrd/transforms/new_capi/transforms_types.h b/hexrd/core/transforms/new_capi/transforms_types.h similarity index 100% rename from hexrd/transforms/new_capi/transforms_types.h rename to hexrd/core/transforms/new_capi/transforms_types.h diff --git a/hexrd/transforms/new_capi/transforms_utils.h b/hexrd/core/transforms/new_capi/transforms_utils.h similarity index 100% rename from hexrd/transforms/new_capi/transforms_utils.h rename to hexrd/core/transforms/new_capi/transforms_utils.h diff --git a/hexrd/transforms/new_capi/unit_row_vector.c b/hexrd/core/transforms/new_capi/unit_row_vector.c similarity index 100% rename from hexrd/transforms/new_capi/unit_row_vector.c rename to hexrd/core/transforms/new_capi/unit_row_vector.c diff --git a/hexrd/transforms/new_capi/validate_angle_ranges.c b/hexrd/core/transforms/new_capi/validate_angle_ranges.c similarity index 100% rename from hexrd/transforms/new_capi/validate_angle_ranges.c rename to hexrd/core/transforms/new_capi/validate_angle_ranges.c diff --git a/hexrd/transforms/new_capi/xf_new_capi.py b/hexrd/core/transforms/new_capi/xf_new_capi.py similarity index 100% rename from hexrd/transforms/new_capi/xf_new_capi.py rename to hexrd/core/transforms/new_capi/xf_new_capi.py diff --git a/hexrd/transforms/new_capi/xy_to_gvec.c b/hexrd/core/transforms/new_capi/xy_to_gvec.c similarity index 100% rename from hexrd/transforms/new_capi/xy_to_gvec.c rename to hexrd/core/transforms/new_capi/xy_to_gvec.c diff --git a/hexrd/transforms/old_xfcapi.py b/hexrd/core/transforms/old_xfcapi.py similarity index 100% rename from hexrd/transforms/old_xfcapi.py rename to hexrd/core/transforms/old_xfcapi.py diff --git a/hexrd/transforms/stdbool.h b/hexrd/core/transforms/stdbool.h similarity index 100% rename from hexrd/transforms/stdbool.h rename to hexrd/core/transforms/stdbool.h diff --git a/hexrd/transforms/transforms_CAPI.c b/hexrd/core/transforms/transforms_CAPI.c similarity index 100% rename from hexrd/transforms/transforms_CAPI.c rename to hexrd/core/transforms/transforms_CAPI.c diff --git a/hexrd/transforms/transforms_CAPI.h b/hexrd/core/transforms/transforms_CAPI.h similarity index 100% rename from hexrd/transforms/transforms_CAPI.h rename to hexrd/core/transforms/transforms_CAPI.h diff --git a/hexrd/transforms/transforms_CFUNC.c b/hexrd/core/transforms/transforms_CFUNC.c similarity index 100% rename from hexrd/transforms/transforms_CFUNC.c rename to hexrd/core/transforms/transforms_CFUNC.c diff --git a/hexrd/transforms/transforms_CFUNC.h b/hexrd/core/transforms/transforms_CFUNC.h similarity index 100% rename from hexrd/transforms/transforms_CFUNC.h rename to hexrd/core/transforms/transforms_CFUNC.h diff --git a/hexrd/transforms/xf.py b/hexrd/core/transforms/xf.py similarity index 100% rename from hexrd/transforms/xf.py rename to hexrd/core/transforms/xf.py diff --git a/hexrd/transforms/xfcapi.py b/hexrd/core/transforms/xfcapi.py similarity index 100% rename from hexrd/transforms/xfcapi.py rename to hexrd/core/transforms/xfcapi.py diff --git a/hexrd/utils/__init__.py b/hexrd/core/utils/__init__.py similarity index 100% rename from hexrd/utils/__init__.py rename to hexrd/core/utils/__init__.py diff --git a/hexrd/utils/compatibility.py b/hexrd/core/utils/compatibility.py similarity index 100% rename from hexrd/utils/compatibility.py rename to hexrd/core/utils/compatibility.py diff --git a/hexrd/utils/concurrent.py b/hexrd/core/utils/concurrent.py similarity index 100% rename from hexrd/utils/concurrent.py rename to hexrd/core/utils/concurrent.py diff --git a/hexrd/utils/decorators.py b/hexrd/core/utils/decorators.py similarity index 100% rename from hexrd/utils/decorators.py rename to hexrd/core/utils/decorators.py diff --git a/hexrd/utils/hdf5.py b/hexrd/core/utils/hdf5.py similarity index 100% rename from hexrd/utils/hdf5.py rename to hexrd/core/utils/hdf5.py diff --git a/hexrd/utils/hkl.py b/hexrd/core/utils/hkl.py similarity index 100% rename from hexrd/utils/hkl.py rename to hexrd/core/utils/hkl.py diff --git a/hexrd/utils/json.py b/hexrd/core/utils/json.py similarity index 100% rename from hexrd/utils/json.py rename to hexrd/core/utils/json.py diff --git a/hexrd/utils/multiprocess_generic.py b/hexrd/core/utils/multiprocess_generic.py similarity index 100% rename from hexrd/utils/multiprocess_generic.py rename to hexrd/core/utils/multiprocess_generic.py diff --git a/hexrd/utils/panel_buffer.py b/hexrd/core/utils/panel_buffer.py similarity index 100% rename from hexrd/utils/panel_buffer.py rename to hexrd/core/utils/panel_buffer.py diff --git a/hexrd/utils/profiler.py b/hexrd/core/utils/profiler.py similarity index 100% rename from hexrd/utils/profiler.py rename to hexrd/core/utils/profiler.py diff --git a/hexrd/utils/progressbar.py b/hexrd/core/utils/progressbar.py similarity index 100% rename from hexrd/utils/progressbar.py rename to hexrd/core/utils/progressbar.py diff --git a/hexrd/utils/warnings.py b/hexrd/core/utils/warnings.py similarity index 100% rename from hexrd/utils/warnings.py rename to hexrd/core/utils/warnings.py diff --git a/hexrd/utils/yaml.py b/hexrd/core/utils/yaml.py similarity index 100% rename from hexrd/utils/yaml.py rename to hexrd/core/utils/yaml.py diff --git a/hexrd/valunits.py b/hexrd/core/valunits.py similarity index 100% rename from hexrd/valunits.py rename to hexrd/core/valunits.py diff --git a/hexrd/hed/instrument/detector.py b/hexrd/hed/instrument/detector.py new file mode 100644 index 000000000..db4f95d1a --- /dev/null +++ b/hexrd/hed/instrument/detector.py @@ -0,0 +1,2086 @@ +from abc import abstractmethod +import copy +import os +from typing import Optional + +from hexrd.instrument.constants import ( + COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +) +from hexrd.instrument.physics_package import AbstractPhysicsPackage +import numpy as np +import numba + +from hexrd import constants as ct +from hexrd import distortion as distortion_pkg +from hexrd import matrixutil as mutil +from hexrd import xrdutil +from hexrd.rotations import mapAngle + +from hexrd.material import crystallography +from hexrd.material.crystallography import PlaneData + +from hexrd.transforms.xfcapi import ( + xy_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_rmat_of_expmap, + oscill_angles_of_hkls, + angles_to_dvec, +) + +from hexrd.utils.decorators import memoize +from hexrd.gridutil import cellIndices +from hexrd.instrument import detector_coatings +from hexrd.material.utils import ( + calculate_linear_absorption_length, + calculate_incoherent_scattering) + +distortion_registry = distortion_pkg.Registry() + +max_workers_DFLT = max(1, os.cpu_count() - 1) + +beam_energy_DFLT = 65.351 + +# Memoize these, so each detector can avoid re-computing if nothing +# has changed. +_lorentz_factor = memoize(crystallography.lorentz_factor) +_polarization_factor = memoize(crystallography.polarization_factor) + + +class Detector: + """ + Base class for 2D detectors with functions and properties + common to planar and cylindrical detectors. This class + will be inherited by both those classes. + """ + + __pixelPitchUnit = 'mm' + + # Abstract methods that must be redefined in derived classes + @property + @abstractmethod + def detector_type(self): + raise NotImplementedError + + @abstractmethod + def cart_to_angles( + self, + xy_data, + rmat_s=None, + tvec_s=None, + tvec_c=None, + apply_distortion=False, + ): + """ + Transform cartesian coordinates to angular. + + Parameters + ---------- + xy_data : TYPE + The (n, 2) array of n (x, y) coordinates to be transformed in + either the raw or ideal cartesian plane (see `apply_distortion` + kwarg below). + rmat_s : array_like, optional + The (3, 3) COB matrix for the sample frame. The default is None. + tvec_s : array_like, optional + The (3, ) translation vector for the sample frame. + The default is None. + tvec_c : array_like, optional + The (3, ) translation vector for the crystal frame. + The default is None. + apply_distortion : bool, optional + If True, apply distortion to the inpout cartesian coordinates. + The default is False. + + Returns + ------- + tth_eta : TYPE + DESCRIPTION. + g_vec : TYPE + DESCRIPTION. + + """ + raise NotImplementedError + + @abstractmethod + def angles_to_cart( + self, + tth_eta, + rmat_s=None, + tvec_s=None, + rmat_c=None, + tvec_c=None, + apply_distortion=False, + ): + """ + Transform angular coordinates to cartesian. + + Parameters + ---------- + tth_eta : array_like + The (n, 2) array of n (tth, eta) coordinates to be transformed. + rmat_s : array_like, optional + The (3, 3) COB matrix for the sample frame. The default is None. + tvec_s : array_like, optional + The (3, ) translation vector for the sample frame. + The default is None. + rmat_c : array_like, optional + (3, 3) COB matrix for the crystal frame. + The default is None. + tvec_c : array_like, optional + The (3, ) translation vector for the crystal frame. + The default is None. + apply_distortion : bool, optional + If True, apply distortion to take cartesian coordinates to the + "warped" configuration. The default is False. + + Returns + ------- + xy_det : array_like + The (n, 2) array on the n input coordinates in the . + + """ + raise NotImplementedError + + @abstractmethod + def cart_to_dvecs(self, xy_data): + """Convert cartesian coordinates to dvectors""" + raise NotImplementedError + + @abstractmethod + def pixel_angles(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def pixel_tth_gradient(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def pixel_eta_gradient(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def calc_filter_coating_transmission(self, energy): + pass + + @property + @abstractmethod + def beam_position(self): + """ + returns the coordinates of the beam in the cartesian detector + frame {Xd, Yd, Zd}. NaNs if no intersection. + """ + raise NotImplementedError + + @property + def extra_config_kwargs(self): + return {} + + # End of abstract methods + + def __init__( + self, + rows=2048, + cols=2048, + pixel_size=(0.2, 0.2), + tvec=np.r_[0.0, 0.0, -1000.0], + tilt=ct.zeros_3, + name='default', + bvec=ct.beam_vec, + xrs_dist=None, + evec=ct.eta_vec, + saturation_level=None, + panel_buffer=None, + tth_distortion=None, + roi=None, + group=None, + distortion=None, + max_workers=max_workers_DFLT, + detector_filter: Optional[detector_coatings.Filter] = None, + detector_coating: Optional[detector_coatings.Coating] = None, + phosphor: Optional[detector_coatings.Phosphor] = None, + ): + """ + Instantiate a PlanarDetector object. + + Parameters + ---------- + rows : TYPE, optional + DESCRIPTION. The default is 2048. + cols : TYPE, optional + DESCRIPTION. The default is 2048. + pixel_size : TYPE, optional + DESCRIPTION. The default is (0.2, 0.2). + tvec : TYPE, optional + DESCRIPTION. The default is np.r_[0., 0., -1000.]. + tilt : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + name : TYPE, optional + DESCRIPTION. The default is 'default'. + bvec : TYPE, optional + DESCRIPTION. The default is ct.beam_vec. + evec : TYPE, optional + DESCRIPTION. The default is ct.eta_vec. + saturation_level : TYPE, optional + DESCRIPTION. The default is None. + panel_buffer : TYPE, optional + If a scalar or len(2) array_like, the interpretation is a border + in mm. If an array with shape (nrows, ncols), interpretation is a + boolean with True marking valid pixels. The default is None. + roi : TYPE, optional + DESCRIPTION. The default is None. + group : TYPE, optional + DESCRIPTION. The default is None. + distortion : TYPE, optional + DESCRIPTION. The default is None. + detector_filter : detector_coatings.Filter, optional + filter specifications including material type, + density and thickness. Used for absorption correction + calculations. + detector_coating : detector_coatings.Coating, optional + coating specifications including material type, + density and thickness. Used for absorption correction + calculations. + phosphor : detector_coatings.Phosphor, optional + phosphor specifications including material type, + density and thickness. Used for absorption correction + calculations. + + Returns + ------- + None. + + """ + self._name = name + + self._rows = rows + self._cols = cols + + self._pixel_size_row = pixel_size[0] + self._pixel_size_col = pixel_size[1] + + self._saturation_level = saturation_level + + self._panel_buffer = panel_buffer + + self._tth_distortion = tth_distortion + + if roi is None: + self._roi = roi + else: + assert len(roi) == 2, "roi is set via (start_row, start_col)" + self._roi = ( + (roi[0], roi[0] + self._rows), + (roi[1], roi[1] + self._cols), + ) + + self._tvec = np.array(tvec).flatten() + self._tilt = np.array(tilt).flatten() + + self._bvec = np.array(bvec).flatten() + self._xrs_dist = xrs_dist + + self._evec = np.array(evec).flatten() + + self._distortion = distortion + + self.max_workers = max_workers + + self.group = group + + if detector_filter is None: + detector_filter = detector_coatings.Filter( + **FILTER_DEFAULTS.TARDIS) + self.filter = detector_filter + + if detector_coating is None: + detector_coating = detector_coatings.Coating(**COATING_DEFAULT) + self.coating = detector_coating + + if phosphor is None: + phosphor = detector_coatings.Phosphor(**PHOSPHOR_DEFAULT) + self.phosphor = phosphor + + # detector ID + @property + def name(self): + return self._name + + @name.setter + def name(self, s): + assert isinstance(s, str), "requires string input" + self._name = s + + @property + def lmfit_name(self): + # lmfit requires underscores instead of dashes + return self.name.replace('-', '_') + + # properties for physical size of rectangular detector + @property + def rows(self): + return self._rows + + @rows.setter + def rows(self, x): + assert isinstance(x, int) + self._rows = x + + @property + def cols(self): + return self._cols + + @cols.setter + def cols(self, x): + assert isinstance(x, int) + self._cols = x + + @property + def pixel_size_row(self): + return self._pixel_size_row + + @pixel_size_row.setter + def pixel_size_row(self, x): + self._pixel_size_row = float(x) + + @property + def pixel_size_col(self): + return self._pixel_size_col + + @pixel_size_col.setter + def pixel_size_col(self, x): + self._pixel_size_col = float(x) + + @property + def pixel_area(self): + return self.pixel_size_row * self.pixel_size_col + + @property + def saturation_level(self): + return self._saturation_level + + @saturation_level.setter + def saturation_level(self, x): + if x is not None: + assert np.isreal(x) + self._saturation_level = x + + @property + def panel_buffer(self): + return self._panel_buffer + + @panel_buffer.setter + def panel_buffer(self, x): + """if not None, a buffer in mm (x, y)""" + if x is not None: + assert len(x) == 2 or x.ndim == 2 + self._panel_buffer = x + + @property + def tth_distortion(self): + return self._tth_distortion + + @tth_distortion.setter + def tth_distortion(self, x): + """if not None, a buffer in mm (x, y)""" + if x is not None: + assert x.ndim == 2 and x.shape == self.shape + self._tth_distortion = x + + @property + def roi(self): + return self._roi + + @roi.setter + def roi(self, vertex_array): + """ + !!! vertex array must be (r0, c0) + """ + if vertex_array is not None: + assert ( + len(vertex_array) == 2 + ), "roi is set via (start_row, start_col)" + self._roi = ( + (vertex_array[0], vertex_array[0] + self.rows), + (vertex_array[1], vertex_array[1] + self.cols), + ) + + @property + def row_dim(self): + return self.rows * self.pixel_size_row + + @property + def col_dim(self): + return self.cols * self.pixel_size_col + + @property + def row_pixel_vec(self): + return self.pixel_size_row * ( + 0.5 * (self.rows - 1) - np.arange(self.rows) + ) + + @property + def row_edge_vec(self): + return _row_edge_vec(self.rows, self.pixel_size_row) + + @property + def col_pixel_vec(self): + return self.pixel_size_col * ( + np.arange(self.cols) - 0.5 * (self.cols - 1) + ) + + @property + def col_edge_vec(self): + return _col_edge_vec(self.cols, self.pixel_size_col) + + @property + def corner_ul(self): + return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] + + @property + def corner_ll(self): + return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim] + + @property + def corner_lr(self): + return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim] + + @property + def corner_ur(self): + return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] + + @property + def shape(self): + return (self.rows, self.cols) + + @property + def tvec(self): + return self._tvec + + @tvec.setter + def tvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3, 'input must have length = 3' + self._tvec = x + + @property + def tilt(self): + return self._tilt + + @tilt.setter + def tilt(self, x): + assert len(x) == 3, 'input must have length = 3' + self._tilt = np.array(x).squeeze() + + @property + def bvec(self): + return self._bvec + + @bvec.setter + def bvec(self, x): + x = np.array(x).flatten() + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' + self._bvec = x + + @property + def xrs_dist(self): + return self._xrs_dist + + @xrs_dist.setter + def xrs_dist(self, x): + assert x is None or np.isscalar( + x + ), f"'source_distance' must be None or scalar; you input '{x}'" + self._xrs_dist = x + + @property + def evec(self): + return self._evec + + @evec.setter + def evec(self, x): + x = np.array(x).flatten() + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' + self._evec = x + + @property + def distortion(self): + return self._distortion + + @distortion.setter + def distortion(self, x): + if x is not None: + registry = distortion_registry.distortion_registry + check_arg = np.zeros(len(registry), dtype=bool) + for i, dcls in enumerate(registry.values()): + check_arg[i] = isinstance(x, dcls) + assert np.any(check_arg), 'input distortion is not in registry!' + self._distortion = x + + @property + def rmat(self): + return make_rmat_of_expmap(self.tilt) + + @property + def normal(self): + return self.rmat[:, 2] + + # ...memoize??? + @property + def pixel_coords(self): + pix_i, pix_j = np.meshgrid( + self.row_pixel_vec, self.col_pixel_vec, indexing='ij' + ) + return pix_i, pix_j + + # ========================================================================= + # METHODS + # ========================================================================= + + def pixel_Q(self, energy: np.floating, + origin: np.ndarray = ct.zeros_3) -> np.ndarray: + '''get the equivalent momentum transfer + for the angles. + + Parameters + ---------- + energy: float + incident photon energy in keV + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise Q in A^-1 + + ''' + lam = ct.keVToAngstrom(energy) + tth, _ = self.pixel_angles(origin=origin) + return 4.*np.pi*np.sin(tth*0.5)/lam + + def pixel_compton_energy_loss( + self, + energy: np.floating, + origin: np.ndarray = ct.zeros_3, + ) -> np.ndarray: + '''inelastic compton scattering leads + to energy loss of the incident photons. + compute the final energy of the photons + for each pixel. + + Parameters + ---------- + energy: float + incident photon energy in keV + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise energy of inelastically + scatterd photons in keV + ''' + energy = np.asarray(energy) + tth, _ = self.pixel_angles() + ang_fact = (1 - np.cos(tth)) + beta = energy/ct.cRestmasskeV + return energy/(1 + beta*ang_fact) + + def pixel_compton_attenuation_length( + self, + energy: np.floating, + density: np.floating, + formula: str, + origin: np.ndarray = ct.zeros_3, + ) -> np.ndarray: + '''each pixel intercepts inelastically + scattered photons of different energy. + the attenuation length and the transmission + for these photons are different. this function + calculate attenuatin length for each pixel + on the detector. + + Parameters + ---------- + energy: float + incident photon energy in keV + density: float + density of material in g/cc + formula: str + formula of the material scattering + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise attentuation length of compton + scattered photons + ''' + pixel_energy = self.pixel_compton_energy_loss(energy) + + pixel_attenuation_length = calculate_linear_absorption_length( + density, + formula, + pixel_energy.flatten(), + ) + return pixel_attenuation_length.reshape(self.shape) + + def compute_compton_scattering_intensity( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + origin: np.array = ct.zeros_3, + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + + ''' compute the theoretical compton scattering + signal on the detector. this value is corrected + for the transmission of compton scattered photons + and normlaized before getting subtracting from the + raw intensity + + Parameters + ----------- + energy: float + energy of incident photon + rMat_s: np.ndarray + rotation matrix of sample orientation + physics_package: AbstractPhysicsPackage + physics package information + Returns + ------- + compton_intensity: np.ndarray + transmission corrected compton scattering + intensity + ''' + + q = self.pixel_Q(energy) + inc_s = calculate_incoherent_scattering( + physics_package.sample_material, + q.flatten()).reshape(self.shape) + + inc_w = calculate_incoherent_scattering( + physics_package.window_material, + q.flatten()).reshape(self.shape) + + t_s = self.calc_compton_physics_package_transmission( + energy, rMat_s, physics_package) + + t_w = self.calc_compton_window_transmission( + energy, rMat_s, physics_package) + + return inc_s * t_s + inc_w * t_w, t_s, t_w + + def polarization_factor(self, f_hor, f_vert, unpolarized=False): + """ + Calculated the polarization factor for every pixel. + + Parameters + ---------- + f_hor : float + the fraction of horizontal polarization. for XFELs + this is close to 1. + f_vert : TYPE + the fraction of vertical polarization, which is ~0 for XFELs. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + s = f_hor + f_vert + if np.abs(s - 1) > ct.sqrt_epsf: + msg = ( + "sum of fraction of " + "horizontal and vertical polarizations " + "must be equal to 1." + ) + raise RuntimeError(msg) + + if f_hor < 0 or f_vert < 0: + msg = ( + "fraction of polarization in horizontal " + "or vertical directions can't be negative." + ) + raise RuntimeError(msg) + + tth, eta = self.pixel_angles() + kwargs = { + 'tth': tth, + 'eta': eta, + 'f_hor': f_hor, + 'f_vert': f_vert, + 'unpolarized': unpolarized, + } + + return _polarization_factor(**kwargs) + + def lorentz_factor(self): + """ + calculate the lorentz factor for every pixel + + Parameters + ---------- + None + + Raises + ------ + None + + Returns + ------- + numpy.ndarray + returns an array the same size as the detector panel + with each element containg the lorentz factor of the + corresponding pixel + """ + tth, eta = self.pixel_angles() + return _lorentz_factor(tth) + + def config_dict( + self, + chi=0, + tvec=ct.zeros_3, + beam_energy=beam_energy_DFLT, + beam_vector=ct.beam_vec, + sat_level=None, + panel_buffer=None, + style='yaml', + ): + """ + Return a dictionary of detector parameters. + + Optional instrument level parameters. This is a convenience function + to work with the APIs in several functions in xrdutil. + + Parameters + ---------- + chi : float, optional + DESCRIPTION. The default is 0. + tvec : array_like (3,), optional + DESCRIPTION. The default is ct.zeros_3. + beam_energy : float, optional + DESCRIPTION. The default is beam_energy_DFLT. + beam_vector : aray_like (3,), optional + DESCRIPTION. The default is ct.beam_vec. + sat_level : scalar, optional + DESCRIPTION. The default is None. + panel_buffer : scalar, array_like (2,), optional + DESCRIPTION. The default is None. + + Returns + ------- + config_dict : dict + DESCRIPTION. + + """ + assert style.lower() in ['yaml', 'hdf5'], ( + "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) + + config_dict = {} + + # ===================================================================== + # DETECTOR PARAMETERS + # ===================================================================== + # transform and pixels + # + # assign local vars; listify if necessary + tilt = self.tilt + translation = self.tvec + roi = ( + None + if self.roi is None + else np.array([self.roi[0][0], self.roi[1][0]]).flatten() + ) + if style.lower() == 'yaml': + tilt = tilt.tolist() + translation = translation.tolist() + tvec = tvec.tolist() + roi = None if roi is None else roi.tolist() + + det_dict = dict( + detector_type=self.detector_type, + transform=dict( + tilt=tilt, + translation=translation, + ), + pixels=dict( + rows=int(self.rows), + columns=int(self.cols), + size=[float(self.pixel_size_row), float(self.pixel_size_col)], + ), + ) + + if roi is not None: + # Only add roi if it is not None + det_dict['pixels']['roi'] = roi + + if self.group is not None: + # Only add group if it is not None + det_dict['group'] = self.group + + # distortion + if self.distortion is not None: + dparams = self.distortion.params + if style.lower() == 'yaml': + dparams = dparams.tolist() + dist_d = dict( + function_name=self.distortion.maptype, parameters=dparams + ) + det_dict['distortion'] = dist_d + + # saturation level + if sat_level is None: + sat_level = self.saturation_level + det_dict['saturation_level'] = float(sat_level) + + # panel buffer + if panel_buffer is None: + # could be none, a 2-element list, or a 2-d array (rows, cols) + panel_buffer = copy.deepcopy(self.panel_buffer) + # !!! now we have to do some style-dependent munging of panel_buffer + if isinstance(panel_buffer, np.ndarray): + if panel_buffer.ndim == 1: + assert len(panel_buffer) == 2, "length of 1-d buffer must be 2" + # if here is a 2-element array + if style.lower() == 'yaml': + panel_buffer = panel_buffer.tolist() + elif panel_buffer.ndim == 2: + if style.lower() == 'yaml': + # !!! can't practically write array-like buffers to YAML + # so forced to clobber + print("clobbering panel buffer array in yaml-ready output") + panel_buffer = [0.0, 0.0] + else: + raise RuntimeError( + "panel buffer ndim must be 1 or 2; you specified %d" + % panel_buffer.ndmin + ) + elif panel_buffer is None: + # still None on self + # !!! this gets handled by unwrap_dict_to_h5 now + + # if style.lower() == 'hdf5': + # # !!! can't write None to hdf5; substitute with zeros + # panel_buffer = np.r_[0., 0.] + pass + det_dict['buffer'] = panel_buffer + + det_dict.update(self.extra_config_kwargs) + + # ===================================================================== + # SAMPLE STAGE PARAMETERS + # ===================================================================== + stage_dict = dict(chi=chi, translation=tvec) + + # ===================================================================== + # BEAM PARAMETERS + # ===================================================================== + # !!! make_reflection_patches is still using the vector + # azim, pola = calc_angles_from_beam_vec(beam_vector) + # beam_dict = dict( + # energy=beam_energy, + # vector=dict( + # azimuth=azim, + # polar_angle=pola + # ) + # ) + beam_dict = dict(energy=beam_energy, vector=beam_vector) + + config_dict['detector'] = det_dict + config_dict['oscillation_stage'] = stage_dict + config_dict['beam'] = beam_dict + + return config_dict + + def cartToPixel(self, xy_det, pixels=False, apply_distortion=False): + """ + Coverts cartesian coordinates to pixel coordinates + + Parameters + ---------- + xy_det : array_like + The (n, 2) vstacked array of (x, y) pairs in the reference + cartesian frame (possibly subject to distortion). + pixels : bool, optional + If True, return discrete pixel indices; otherwise fractional pixel + coordinates are returned. The default is False. + apply_distortion : bool, optional + If True, apply self.distortion to the input (if applicable). + The default is False. + + Returns + ------- + ij_det : array_like + The (n, 2) array of vstacked (i, j) coordinates in the pixel + reference frame where i is the (slow) row dimension and j is the + (fast) column dimension. + + """ + xy_det = np.atleast_2d(xy_det) + if apply_distortion and self.distortion is not None: + xy_det = self.distortion.apply(xy_det) + + npts = len(xy_det) + + tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1)) + i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5 + j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5 + + ij_det = np.vstack([i_pix, j_pix]).T + if pixels: + # Hide any runtime warnings in this conversion. Their output values + # will certainly be off the detector, which is fine. + with np.errstate(invalid='ignore'): + ij_det = np.array(np.round(ij_det), dtype=int) + + return ij_det + + def pixelToCart(self, ij_det): + """ + Convert vstacked array or list of [i,j] pixel indices + (or UL corner-based points) and convert to (x,y) in the + cartesian frame {Xd, Yd, Zd} + """ + ij_det = np.atleast_2d(ij_det) + + x = (ij_det[:, 1] + 0.5) * self.pixel_size_col + self.corner_ll[0] + y = ( + self.rows - ij_det[:, 0] - 0.5 + ) * self.pixel_size_row + self.corner_ll[1] + return np.vstack([x, y]).T + + def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): + """ + Notes + ----- + !!! assumes xy are in raw (distorted) frame, if applicable + """ + # munge kwargs + if rMat_s is None: + rMat_s = ct.identity_3x3 + if tVec_s is None: + tVec_s = ct.zeros_3x1 + if tVec_c is None: + tVec_c = ct.zeros_3x1 + + # FIXME: perhaps not necessary, but safe... + xy = np.atleast_2d(xy) + + ''' + # --------------------------------------------------------------------- + # TODO: needs testing and memoized gradient arrays! + # --------------------------------------------------------------------- + # need origin arg + origin = np.dot(rMat_s, tVec_c).flatten() + tVec_s.flatten() + + # get pixel indices + i_crds = cellIndices(self.row_edge_vec, xy[:, 1]) + j_crds = cellIndices(self.col_edge_vec, xy[:, 0]) + + ptth_grad = self.pixel_tth_gradient(origin=origin)[i_crds, j_crds] + peta_grad = self.pixel_eta_gradient(origin=origin)[i_crds, j_crds] + + return np.vstack([ptth_grad, peta_grad]).T + ''' + # call xrdutil function + ang_ps = xrdutil.angularPixelSize( + xy, + (self.pixel_size_row, self.pixel_size_col), + self.rmat, + rMat_s, + self.tvec, + tVec_s, + tVec_c, + distortion=self.distortion, + beamVec=self.bvec, + etaVec=self.evec, + ) + return ang_ps + + def clip_to_panel(self, xy, buffer_edges=True): + """ + if self.roi is not None, uses it by default + + TODO: check if need shape kwarg + TODO: optimize ROI search better than list comprehension below + TODO: panel_buffer can be a 2-d boolean mask, but needs testing + + """ + xy = np.atleast_2d(xy) + + ''' + # !!! THIS LOGIC IS OBSOLETE + if self.roi is not None: + ij_crds = self.cartToPixel(xy, pixels=True) + ii, jj = polygon(self.roi[:, 0], self.roi[:, 1], + shape=(self.rows, self.cols)) + on_panel_rows = [i in ii for i in ij_crds[:, 0]] + on_panel_cols = [j in jj for j in ij_crds[:, 1]] + on_panel = np.logical_and(on_panel_rows, on_panel_cols) + else: + ''' + xlim = 0.5 * self.col_dim + ylim = 0.5 * self.row_dim + if buffer_edges and self.panel_buffer is not None: + if self.panel_buffer.ndim == 2: + pix = self.cartToPixel(xy, pixels=True) + + roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows) + coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols) + + idx = np.logical_or(roff, coff) + + on_panel = np.full(pix.shape[0], False) + valid_pix = pix[~idx, :] + on_panel[~idx] = self.panel_buffer[ + valid_pix[:, 0], valid_pix[:, 1] + ] + else: + xlim -= self.panel_buffer[0] + ylim -= self.panel_buffer[1] + on_panel_x = np.logical_and( + xy[:, 0] >= -xlim, xy[:, 0] <= xlim + ) + on_panel_y = np.logical_and( + xy[:, 1] >= -ylim, xy[:, 1] <= ylim + ) + on_panel = np.logical_and(on_panel_x, on_panel_y) + elif not buffer_edges or self.panel_buffer is None: + on_panel_x = np.logical_and(xy[:, 0] >= -xlim, xy[:, 0] <= xlim) + on_panel_y = np.logical_and(xy[:, 1] >= -ylim, xy[:, 1] <= ylim) + on_panel = np.logical_and(on_panel_x, on_panel_y) + return xy[on_panel, :], on_panel + + def interpolate_nearest(self, xy, img, pad_with_nans=True): + """ + TODO: revisit normalization in here? + + """ + is_2d = img.ndim == 2 + right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) + + # initialize output with nans + if pad_with_nans: + int_xy = np.nan * np.ones(len(xy)) + else: + int_xy = np.zeros(len(xy)) + + # clip away points too close to or off the edges of the detector + xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) + + # get pixel indices of clipped points + i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) + j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) + + # next interpolate across cols + int_vals = img[i_src, j_src] + int_xy[on_panel] = int_vals + return int_xy + + def interpolate_bilinear(self, xy, img, pad_with_nans=True, + clip_to_panel=True, + on_panel: Optional[np.ndarray] = None): + """ + Interpolate an image array at the specified cartesian points. + + Parameters + ---------- + xy : array_like, (n, 2) + Array of cartesian coordinates in the image plane at which + to evaluate intensity. + img : array_like + 2-dimensional image array. + pad_with_nans : bool, optional + Toggle for assigning NaN to points that fall off the detector. + The default is True. + on_panel : np.ndarray, optional + If you want to skip clip_to_panel() for performance reasons, + just provide an array of which pixels are on the panel. + + Returns + ------- + int_xy : array_like, (n,) + The array of interpolated intensities at each of the n input + coordinates. + + Notes + ----- + TODO: revisit normalization in here? + """ + + is_2d = img.ndim == 2 + right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) + + # initialize output with nans + if pad_with_nans: + int_xy = np.nan * np.ones(len(xy)) + else: + int_xy = np.zeros(len(xy)) + + if on_panel is None: + # clip away points too close to or off the edges of the detector + xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) + else: + xy_clip = xy[on_panel] + + # grab fractional pixel indices of clipped points + ij_frac = self.cartToPixel(xy_clip) + + # get floors/ceils from array of pixel _centers_ + # and fix indices running off the pixel centers + # !!! notice we already clipped points to the panel! + i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) + i_floor_img = _fix_indices(i_floor, 0, self.rows - 1) + + j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) + j_floor_img = _fix_indices(j_floor, 0, self.cols - 1) + + # ceilings from floors + i_ceil = i_floor + 1 + i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1) + + j_ceil = j_floor + 1 + j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) + + # first interpolate at top/bottom rows + row_floor_int = (j_ceil - ij_frac[:, 1]) * img[ + i_floor_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_floor_img, j_ceil_img] + row_ceil_int = (j_ceil - ij_frac[:, 1]) * img[ + i_ceil_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_ceil_img, j_ceil_img] + + # next interpolate across cols + int_vals = (i_ceil - ij_frac[:, 0]) * row_floor_int + ( + ij_frac[:, 0] - i_floor + ) * row_ceil_int + int_xy[on_panel] = int_vals + return int_xy + + def make_powder_rings( + self, + pd, + merge_hkls=False, + delta_tth=None, + delta_eta=10.0, + eta_period=None, + eta_list=None, + rmat_s=ct.identity_3x3, + tvec_s=ct.zeros_3, + tvec_c=ct.zeros_3, + full_output=False, + tth_distortion=None, + ): + """ + Generate points on Debye_Scherrer rings over the detector. + + !!! it is assuming that rmat_s is built from (chi, ome) as it the case + for HEDM! + + Parameters + ---------- + pd : TYPE + DESCRIPTION. + merge_hkls : TYPE, optional + DESCRIPTION. The default is False. + delta_tth : TYPE, optional + DESCRIPTION. The default is None. + delta_eta : TYPE, optional + DESCRIPTION. The default is 10.. + eta_period : TYPE, optional + DESCRIPTION. The default is None. + eta_list : TYPE, optional + DESCRIPTION. The default is None. + rmat_s : TYPE, optional + DESCRIPTION. The default is ct.identity_3x3. + tvec_s : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + tvec_c : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + full_output : TYPE, optional + DESCRIPTION. The default is False. + tth_distortion : special class, optional + Special distortion class. The default is None. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + if tth_distortion is not None: + tnorms = mutil.rowNorm(np.vstack([tvec_s, tvec_c])) + assert ( + np.all(tnorms) < ct.sqrt_epsf + ), "If using distrotion function, translations must be zero" + + # in case you want to give it tth angles directly + if isinstance(pd, PlaneData): + pd = PlaneData(None, pd) + if delta_tth is not None: + pd.tThWidth = np.radians(delta_tth) + else: + delta_tth = np.degrees(pd.tThWidth) + + # !!! conversions, meh... + del_eta = np.radians(delta_eta) + + # do merging if asked + if merge_hkls: + _, tth_ranges = pd.getMergedRanges(cullDupl=True) + tth = np.average(tth_ranges, axis=1) + else: + tth_ranges = pd.getTThRanges() + tth = pd.getTTh() + tth_pm = tth_ranges - np.tile(tth, (2, 1)).T + sector_vertices = np.vstack( + [ + [ + i[0], + -del_eta, + i[0], + del_eta, + i[1], + del_eta, + i[1], + -del_eta, + 0.0, + 0.0, + ] + for i in tth_pm + ] + ) + else: + # Okay, we have a array-like tth specification + tth = np.array(pd).flatten() + if delta_tth is None: + raise RuntimeError( + "If supplying a 2theta list as first arg, " + + "must supply a delta_tth" + ) + tth_pm = 0.5 * delta_tth * np.r_[-1.0, 1.0] + tth_ranges = np.radians([i + tth_pm for i in tth]) # !!! units + sector_vertices = np.tile( + 0.5 + * np.radians( + [ + -delta_tth, + -delta_eta, + -delta_tth, + delta_eta, + delta_tth, + delta_eta, + delta_tth, + -delta_eta, + 0.0, + 0.0, + ] + ), + (len(tth), 1), + ) + # !! conversions, meh... + tth = np.radians(tth) + del_eta = np.radians(delta_eta) + + # for generating rings, make eta vector in correct period + if eta_period is None: + eta_period = (-np.pi, np.pi) + + if eta_list is None: + neta = int(360.0 / float(delta_eta)) + # this is the vector of ETA EDGES + eta_edges = mapAngle( + np.radians(delta_eta * np.linspace(0.0, neta, num=neta + 1)) + + eta_period[0], + eta_period, + ) + + # get eta bin centers from edges + """ + # !!! this way is probably overkill, since we have delta eta + eta_centers = np.average( + np.vstack([eta[:-1], eta[1:]), + axis=0) + """ + # !!! should be safe as eta_edges are monotonic + eta_centers = eta_edges[:-1] + 0.5 * del_eta + else: + eta_centers = np.radians(eta_list).flatten() + neta = len(eta_centers) + eta_edges = ( + np.tile(eta_centers, (2, 1)) + + np.tile(0.5 * del_eta * np.r_[-1, 1], (neta, 1)).T + ).T.flatten() + + # get chi and ome from rmat_s + # !!! API ambiguity + # !!! this assumes rmat_s was made from the composition + # !!! rmat_s = R(Xl, chi) * R(Yl, ome) + ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) + + # make list of angle tuples + angs = [ + np.vstack([i * np.ones(neta), eta_centers, ome * np.ones(neta)]) + for i in tth + ] + + # need xy coords and pixel sizes + valid_ang = [] + valid_xy = [] + map_indices = [] + npp = 5 # [ll, ul, ur, lr, center] + for i_ring in range(len(angs)): + # expand angles to patch vertices + these_angs = angs[i_ring].T + + # push to vertices to see who falls off + # FIXME: clipping is not checking if masked regions are on the + # patch interior + patch_vertices = ( + np.tile(these_angs[:, :2], (1, npp)) + + np.tile(sector_vertices[i_ring], (neta, 1)) + ).reshape(npp * neta, 2) + + # find vertices that all fall on the panel + # !!! not API ambiguity regarding rmat_s above + all_xy = self.angles_to_cart( + patch_vertices, + rmat_s=rmat_s, + tvec_s=tvec_s, + rmat_c=None, + tvec_c=tvec_c, + apply_distortion=True, + ) + + _, on_panel = self.clip_to_panel(all_xy) + + # all vertices must be on... + + patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) + patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] + + # !!! Have to apply after clipping, distortion can get wonky near + # the edeg of the panel, and it is assumed to be <~1 deg + # !!! The tth_ranges are NOT correct! + if tth_distortion is not None: + patch_valid_angs = tth_distortion.apply( + self.angles_to_cart(these_angs[patch_is_on, :2]), + return_nominal=True, + ) + patch_valid_xys = self.angles_to_cart( + patch_valid_angs, apply_distortion=True + ) + else: + patch_valid_angs = these_angs[patch_is_on, :2] + patch_valid_xys = patch_xys[:, -1, :].squeeze() + + # form output arrays + valid_ang.append(patch_valid_angs) + valid_xy.append(patch_valid_xys) + map_indices.append(patch_is_on) + # ??? is this option necessary? + if full_output: + return valid_ang, valid_xy, tth_ranges, map_indices, eta_edges + else: + return valid_ang, valid_xy, tth_ranges + + def map_to_plane(self, pts, rmat, tvec): + """ + Map detctor points to specified plane. + + Parameters + ---------- + pts : TYPE + DESCRIPTION. + rmat : TYPE + DESCRIPTION. + tvec : TYPE + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + Notes + ----- + by convention: + + n * (u*pts_l - tvec) = 0 + + [pts]_l = rmat*[pts]_m + tvec + + """ + # arg munging + pts = np.atleast_2d(pts) + npts = len(pts) + + # map plane normal & translation vector, LAB FRAME + nvec_map_lab = rmat[:, 2].reshape(3, 1) + tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1) + tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1) + + # put pts as 3-d in panel CS and transform to 3-d lab coords + pts_det = np.hstack([pts, np.zeros((npts, 1))]) + pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab + + # scaling along pts vectors to hit map plane + u = np.dot(nvec_map_lab.T, tvec_map_lab) / np.dot( + nvec_map_lab.T, pts_lab + ) + + # pts on map plane, in LAB FRAME + pts_map_lab = np.tile(u, (3, 1)) * pts_lab + + return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T + + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + chi=0.0, + tVec_s=ct.zeros_3, + wavelength=None, + ): + """ + Simulate a monochromatic rotation series for a list of grains. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_param_list : TYPE + DESCRIPTION. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + chi : TYPE, optional + DESCRIPTION. The default is 0.. + tVec_s : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + wavelength : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + valid_ids : TYPE + DESCRIPTION. + valid_hkls : TYPE + DESCRIPTION. + valid_angs : TYPE + DESCRIPTION. + valid_xys : TYPE + DESCRIPTION. + ang_pixel_size : TYPE + DESCRIPTION. + + """ + # grab B-matrix from plane data + bMat = plane_data.latVecOps['B'] + + # reconcile wavelength + # * added sanity check on exclusions here; possible to + # * make some reflections invalid (NaN) + if wavelength is None: + wavelength = plane_data.wavelength + else: + if plane_data.wavelength != wavelength: + plane_data.wavelength = ct.keVToAngstrom(wavelength) + assert not np.any( + np.isnan(plane_data.getTTh()) + ), "plane data exclusions incompatible with wavelength" + + # vstacked G-vector id, h, k, l + full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) + + """ LOOP OVER GRAINS """ + valid_ids = [] + valid_hkls = [] + valid_angs = [] + valid_xys = [] + ang_pixel_size = [] + for gparm in grain_param_list: + + # make useful parameters + rMat_c = make_rmat_of_expmap(gparm[:3]) + tVec_c = gparm[3:6] + vInv_s = gparm[6:] + + # All possible bragg conditions as vstacked [tth, eta, ome] + # for each omega solution + angList = np.vstack( + oscill_angles_of_hkls( + full_hkls[:, 1:], + chi, + rMat_c, + bMat, + wavelength, + v_inv=vInv_s, + beam_vec=self.bvec, + ) + ) + + # filter by eta and omega ranges + # ??? get eta range from detector? + allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( + full_hkls, angList, eta_ranges, ome_ranges + ) + allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) + + # find points that fall on the panel + det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( + allAngs, + self.rmat, + rMat_c, + chi, + self.tvec, + tVec_c, + tVec_s, + self.distortion, + self.bvec, + ) + xys_p, on_panel = self.clip_to_panel(det_xy) + valid_xys.append(xys_p) + + # filter angs and hkls that are on the detector plane + # !!! check this -- seems unnecessary but the results of + # _project_on_detector_plane() can have len < the input? + # the output of _project_on_detector_plane has been modified to + # hand back the index array to remedy this JVB 2020-05-27 + if np.any(~on_plane): + allAngs = np.atleast_2d(allAngs[on_plane, :]) + allHKLs = np.atleast_2d(allHKLs[on_plane, :]) + + # grab hkls and gvec ids for this panel + valid_hkls.append(allHKLs[on_panel, 1:]) + valid_ids.append(allHKLs[on_panel, 0]) + + # reflection angles (voxel centers) and pixel size in (tth, eta) + valid_angs.append(allAngs[on_panel, :]) + ang_pixel_size.append(self.angularPixelSize(xys_p)) + return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size + + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + tvec_s=None, + grain_params=None, + beam_vec=None, + ): + """ """ + if isinstance(crystal_data, PlaneData): + + plane_data = crystal_data + + # grab the expanded list of hkls from plane_data + hkls = np.hstack(plane_data.getSymHKLs()) + + # and the unit plane normals (G-vectors) in CRYSTAL FRAME + gvec_c = np.dot(plane_data.latVecOps['B'], hkls) + + # Filter out g-vectors going in the wrong direction. `gvec_to_xy()` used + # to do this, but not anymore. + to_keep = np.dot(gvec_c.T, self.bvec) <= 0 + + hkls = hkls[:, to_keep] + gvec_c = gvec_c[:, to_keep] + elif len(crystal_data) == 2: + # !!! should clean this up + hkls = np.array(crystal_data[0]) + bmat = crystal_data[1] + gvec_c = np.dot(bmat, hkls) + else: + raise RuntimeError( + f'argument list not understood: {crystal_data=}' + ) + nhkls_tot = hkls.shape[1] + + # parse energy ranges + # TODO: allow for spectrum parsing + multipleEnergyRanges = False + if hasattr(maxEnergy, '__len__'): + assert len(maxEnergy) == len( + minEnergy + ), 'energy cutoff ranges must have the same length' + multipleEnergyRanges = True + lmin = [] + lmax = [] + for i in range(len(maxEnergy)): + lmin.append(ct.keVToAngstrom(maxEnergy[i])) + lmax.append(ct.keVToAngstrom(minEnergy[i])) + else: + lmin = ct.keVToAngstrom(maxEnergy) + lmax = ct.keVToAngstrom(minEnergy) + + # parse grain parameters kwarg + if grain_params is None: + grain_params = np.atleast_2d( + np.hstack([np.zeros(6), ct.identity_6x1]) + ) + n_grains = len(grain_params) + + # sample rotation + if rmat_s is None: + rmat_s = ct.identity_3x3 + + # dummy translation vector... make input + if tvec_s is None: + tvec_s = ct.zeros_3 + + # beam vector + if beam_vec is None: + beam_vec = ct.beam_vec + + # ========================================================================= + # LOOP OVER GRAINS + # ========================================================================= + + # pre-allocate output arrays + xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan * np.ones((n_grains, nhkls_tot)) + energy = np.nan * np.ones((n_grains, nhkls_tot)) + for iG, gp in enumerate(grain_params): + rmat_c = make_rmat_of_expmap(gp[:3]) + tvec_c = gp[3:6].reshape(3, 1) + vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) + + # stretch them: V^(-1) * R * Gc + gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) + ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) + + # project + dpts = gvec_to_xy( + ghat_c_str.T, + self.rmat, + rmat_s, + rmat_c, + self.tvec, + tvec_s, + tvec_c, + beam_vec=beam_vec, + ) + + # check intersections with detector plane + canIntersect = ~np.isnan(dpts[:, 0]) + npts_in = sum(canIntersect) + + if np.any(canIntersect): + dpts = dpts[canIntersect, :].reshape(npts_in, 2) + dhkl = hkls[:, canIntersect].reshape(3, npts_in) + + rmat_b = make_beam_rmat(beam_vec, ct.eta_vec) + # back to angles + tth_eta, gvec_l = xy_to_gvec( + dpts, + self.rmat, + rmat_s, + self.tvec, + tvec_s, + tvec_c, + rmat_b=rmat_b, + ) + tth_eta = np.vstack(tth_eta).T + + # warp measured points + if self.distortion is not None: + dpts = self.distortion.apply_inverse(dpts) + + # plane spacings and energies + dsp = 1.0 / mutil.rowNorm(gvec_s_str[:, canIntersect].T) + wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) + + # clip to detector panel + _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) + + if multipleEnergyRanges: + validEnergy = np.zeros(len(wlen), dtype=bool) + for i in range(len(lmin)): + in_energy_range = np.logical_and( + wlen >= lmin[i], wlen <= lmax[i] + ) + validEnergy = validEnergy | in_energy_range + else: + validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) + + # index for valid reflections + keepers = np.where(np.logical_and(on_panel, validEnergy))[0] + + # assign output arrays + xy_det[iG][keepers, :] = dpts[keepers, :] + hkls_in[iG][:, keepers] = dhkl[:, keepers] + angles[iG][keepers, :] = tth_eta[keepers, :] + dspacing[iG, keepers] = dsp[keepers] + energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) + return xy_det, hkls_in, angles, dspacing, energy + + @staticmethod + def update_memoization_sizes(all_panels): + funcs = [ + _polarization_factor, + _lorentz_factor, + ] + + min_size = len(all_panels) + return Detector.increase_memoization_sizes(funcs, min_size) + + @staticmethod + def increase_memoization_sizes(funcs, min_size): + for f in funcs: + cache_info = f.cache_info() + if cache_info['maxsize'] < min_size: + f.set_cache_maxsize(min_size) + + def calc_physics_package_transmission(self, energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage) -> np.float64: + """get the transmission from the physics package + need to consider HED and HEDM samples separately + """ + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_sample = self.calc_transmission_sample( + seca, secb, energy, physics_package) + T_window = self.calc_transmission_window( + secb, energy, physics_package) + + transmission_physics_package = T_sample * T_window + return transmission_physics_package + + def calc_compton_physics_package_transmission( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + '''calculate the attenuation of inelastically + scattered photons. since these photons lose energy, + the attenuation length is angle dependent ergo a separate + routine than elastically scattered absorption. + ''' + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_sample = self.calc_compton_transmission( + seca, secb, energy, + physics_package, 'sample') + T_window = self.calc_compton_transmission_window( + secb, energy, physics_package) + + return T_sample * T_window + + def calc_compton_window_transmission( + self, + energy: np.floating, + rMat_s: np.ndarray, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + '''calculate the attenuation of inelastically + scattered photons just fropm the window. + since these photons lose energy, the attenuation length + is angle dependent ergo a separate routine than + elastically scattered absorption. + ''' + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_window = self.calc_compton_transmission( + seca, secb, energy, + physics_package, 'window') + T_sample = self.calc_compton_transmission_sample( + seca, energy, physics_package) + + return T_sample * T_window + + def calc_transmission_sample(self, seca: np.array, + secb: np.array, energy: np.floating, + physics_package: AbstractPhysicsPackage) -> np.array: + thickness_s = physics_package.sample_thickness # in microns + if np.isclose(thickness_s, 0): + return np.ones(self.shape) + + # in microns^-1 + mu_s = 1./physics_package.sample_absorption_length(energy) + x = (mu_s*thickness_s) + pre = 1./x/(secb - seca) + num = np.exp(-x*seca) - np.exp(-x*secb) + return pre * num + + def calc_transmission_window(self, secb: np.array, energy: np.floating, + physics_package: AbstractPhysicsPackage) -> np.array: + material_w = physics_package.window_material + thickness_w = physics_package.window_thickness # in microns + if material_w is None or np.isclose(thickness_w, 0): + return np.ones(self.shape) + + # in microns^-1 + mu_w = 1./physics_package.window_absorption_length(energy) + return np.exp(-thickness_w*mu_w*secb) + + def calc_compton_transmission( + self, + seca: np.ndarray, + secb: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + pp_layer: str, + ) -> np.ndarray: + + if pp_layer == 'sample': + formula = physics_package.sample_material + density = physics_package.sample_density + thickness = physics_package.sample_thickness + mu = 1./physics_package.sample_absorption_length(energy) + mu_prime = 1. / self.pixel_compton_attenuation_length( + energy, density, formula, + ) + elif pp_layer == 'window': + formula = physics_package.window_material + if formula is None: + return np.ones(self.shape) + + density = physics_package.window_density + thickness = physics_package.window_thickness + mu = 1./physics_package.sample_absorption_length(energy) + mu_prime = 1./self.pixel_compton_attenuation_length( + energy, density, formula) + + if thickness <= 0: + return np.ones(self.shape) + + x1 = mu*thickness*seca + x2 = mu_prime*thickness*secb + num = (np.exp(-x1) - np.exp(-x2)) + return -num/(x1 - x2) + + def calc_compton_transmission_sample( + self, + seca: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + thickness_s = physics_package.sample_thickness # in microns + + mu_s = 1./physics_package.sample_absorption_length( + energy) + return np.exp(-mu_s*thickness_s*seca) + + def calc_compton_transmission_window( + self, + secb: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + formula = physics_package.window_material + if formula is None: + return np.ones(self.shape) + + density = physics_package.window_density # in g/cc + thickness_w = physics_package.window_thickness # in microns + + mu_w_prime = 1./self.pixel_compton_attenuation_length( + energy, density, formula) + return np.exp(-mu_w_prime*thickness_w*secb) + + def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) -> np.array: + """get the effective pinhole area correction + """ + if (np.isclose(physics_package.pinhole_diameter, 0) + or np.isclose(physics_package.pinhole_thickness, 0)): + return np.ones(self.shape) + + hod = (physics_package.pinhole_thickness / + physics_package.pinhole_diameter) + bvec = self.bvec + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cth = -dvecs[:, 2].reshape(self.shape) + tanth = np.tan(np.arccos(cth)) + f = hod*tanth + f[np.abs(f) > 1.] = np.nan + asinf = np.arcsin(f) + return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) + + def calc_transmission_generic(self, + secb: np.array, + thickness: np.floating, + absorption_length: np.floating) -> np.array: + if np.isclose(thickness, 0): + return np.ones(self.shape) + + mu = 1./absorption_length # in microns^-1 + return np.exp(-thickness*mu*secb) + + def calc_transmission_phosphor(self, + secb: np.array, + thickness: np.floating, + readout_length: np.floating, + absorption_length: np.floating, + energy: np.floating, + pre_U0: np.floating) -> np.array: + if np.isclose(thickness, 0): + return np.ones(self.shape) + + f1 = absorption_length*thickness + f2 = absorption_length*readout_length + arg = (secb + 1/f2) + return pre_U0 * energy*((1.0 - np.exp(-f1*arg))/arg) + +# ============================================================================= +# UTILITY METHODS +# ============================================================================= + + +def _fix_indices(idx, lo, hi): + nidx = np.array(idx) + off_lo = nidx < lo + off_hi = nidx > hi + nidx[off_lo] = lo + nidx[off_hi] = hi + return nidx + + +def _row_edge_vec(rows, pixel_size_row): + return pixel_size_row * (0.5 * rows - np.arange(rows + 1)) + + +def _col_edge_vec(cols, pixel_size_col): + return pixel_size_col * (np.arange(cols + 1) - 0.5 * cols) + + +# FIXME find a better place for this, and maybe include loop over pixels +@numba.njit(nogil=True, cache=True) +def _solid_angle_of_triangle(vtx_list): + norms = np.sqrt(np.sum(vtx_list * vtx_list, axis=1)) + norms_prod = norms[0] * norms[1] * norms[2] + scalar_triple_product = np.dot( + vtx_list[0], np.cross(vtx_list[2], vtx_list[1]) + ) + denominator = ( + norms_prod + + norms[0] * np.dot(vtx_list[1], vtx_list[2]) + + norms[1] * np.dot(vtx_list[2], vtx_list[0]) + + norms[2] * np.dot(vtx_list[0], vtx_list[1]) + ) + + return 2.0 * np.arctan2(scalar_triple_product, denominator) diff --git a/hexrd/hed/instrument/hedm_instrument.py b/hexrd/hed/instrument/hedm_instrument.py new file mode 100644 index 000000000..1d768b47c --- /dev/null +++ b/hexrd/hed/instrument/hedm_instrument.py @@ -0,0 +1,2747 @@ +# -*- coding: utf-8 -*- +# ============================================================================= +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================================= +""" +Created on Fri Dec 9 13:05:27 2016 + +@author: bernier2 +""" +from contextlib import contextmanager +import copy +import logging +import os +from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor +from functools import partial +from typing import Optional + +from tqdm import tqdm + +import yaml + +import h5py + +import numpy as np + +from io import IOBase + +from scipy import ndimage +from scipy.linalg import logm +from skimage.measure import regionprops + +from hexrd import constants +from hexrd.imageseries import ImageSeries +from hexrd.imageseries.process import ProcessedImageSeries +from hexrd.imageseries.omega import OmegaImageSeries +from hexrd.fitting.utils import fit_ring +from hexrd.gridutil import make_tolerance_grid +from hexrd import matrixutil as mutil +from hexrd.transforms.xfcapi import ( + angles_to_gvec, + gvec_to_xy, + make_sample_rmat, + make_rmat_of_expmap, + unit_vector, +) +from hexrd import xrdutil +from hexrd.material.crystallography import PlaneData +from hexrd import constants as ct +from hexrd.rotations import mapAngle +from hexrd import distortion as distortion_pkg +from hexrd.utils.concurrent import distribute_tasks +from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict +from hexrd.utils.yaml import NumpyToNativeDumper +from hexrd.valunits import valWUnit +from hexrd.wppf import LeBail + +from .cylindrical_detector import CylindricalDetector +from .detector import ( + beam_energy_DFLT, + Detector, + max_workers_DFLT, +) +from .planar_detector import PlanarDetector + +from skimage.draw import polygon +from skimage.util import random_noise +from hexrd.wppf import wppfsupport + +try: + from fast_histogram import histogram1d + fast_histogram = True +except ImportError: + from numpy import histogram as histogram1d + fast_histogram = False + +logger = logging.getLogger() +logger.setLevel('INFO') + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +instrument_name_DFLT = 'instrument' + +beam_vec_DFLT = ct.beam_vec +source_distance_DFLT = np.inf + +eta_vec_DFLT = ct.eta_vec + +panel_id_DFLT = 'generic' +nrows_DFLT = 2048 +ncols_DFLT = 2048 +pixel_size_DFLT = (0.2, 0.2) + +tilt_params_DFLT = np.zeros(3) +t_vec_d_DFLT = np.r_[0., 0., -1000.] + +chi_DFLT = 0. +t_vec_s_DFLT = np.zeros(3) + +multi_ims_key = ct.shared_ims_key +ims_classes = (ImageSeries, ProcessedImageSeries, OmegaImageSeries) + +buffer_key = 'buffer' +distortion_key = 'distortion' + +# ============================================================================= +# UTILITY METHODS +# ============================================================================= + + +def generate_chunks(nrows, ncols, base_nrows, base_ncols, + row_gap=0, col_gap=0): + """ + Generate chunking data for regularly tiled composite detectors. + + Parameters + ---------- + nrows : int + DESCRIPTION. + ncols : int + DESCRIPTION. + base_nrows : int + DESCRIPTION. + base_ncols : int + DESCRIPTION. + row_gap : int, optional + DESCRIPTION. The default is 0. + col_gap : int, optional + DESCRIPTION. The default is 0. + + Returns + ------- + rects : array_like + The (nrows*ncols, ) list of ROI specs (see Notes). + labels : array_like + The (nrows*ncols, ) list of ROI (i, j) matrix indexing labels 'i_j'. + + Notes + ----- + ProcessedImageSeries needs a (2, 2) array for the 'rect' kwarg: + [[row_start, row_stop], + [col_start, col_stop]] + """ + row_starts = np.array([i*(base_nrows + row_gap) for i in range(nrows)]) + col_starts = np.array([i*(base_ncols + col_gap) for i in range(ncols)]) + rr = np.vstack([row_starts, row_starts + base_nrows]) + cc = np.vstack([col_starts, col_starts + base_ncols]) + rects = [] + labels = [] + for i in range(nrows): + for j in range(ncols): + this_rect = np.array( + [[rr[0, i], rr[1, i]], + [cc[0, j], cc[1, j]]] + ) + rects.append(this_rect) + labels.append('%d_%d' % (i, j)) + return rects, labels + + +def chunk_instrument(instr, rects, labels, use_roi=False): + """ + Generate chunked config fro regularly tiled composite detectors. + + Parameters + ---------- + instr : TYPE + DESCRIPTION. + rects : TYPE + DESCRIPTION. + labels : TYPE + DESCRIPTION. + + Returns + ------- + new_icfg_dict : TYPE + DESCRIPTION. + + """ + icfg_dict = instr.write_config() + new_icfg_dict = dict(beam=icfg_dict['beam'], + oscillation_stage=icfg_dict['oscillation_stage'], + detectors={}) + for panel_id, panel in instr.detectors.items(): + pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] + + for pnum, pdata in enumerate(zip(rects, labels)): + rect, label = pdata + panel_name = f'{panel_id}_{label}' + + row_col_dim = np.diff(rect) # (2, 1) + shape = tuple(row_col_dim.flatten()) + center = (rect[:, 0].reshape(2, 1) + 0.5*row_col_dim) + + sp_tvec = np.concatenate( + [panel.pixelToCart(center.T).flatten(), np.zeros(1)] + ) + + tvec = np.dot(panel.rmat, sp_tvec) + panel.tvec + + # new config dict + tmp_cfg = copy.deepcopy(pcfg_dict) + + # fix sizes + tmp_cfg['pixels']['rows'] = shape[0] + tmp_cfg['pixels']['columns'] = shape[1] + if use_roi: + tmp_cfg['pixels']['roi'] = (rect[0][0], rect[1][0]) + + # update tvec + tmp_cfg['transform']['translation'] = tvec.tolist() + + new_icfg_dict['detectors'][panel_name] = copy.deepcopy(tmp_cfg) + + if panel.panel_buffer is not None: + if panel.panel_buffer.ndim == 2: # have a mask array! + submask = panel.panel_buffer[ + rect[0, 0]:rect[0, 1], rect[1, 0]:rect[1, 1] + ] + new_icfg_dict['detectors'][panel_name]['buffer'] = submask + return new_icfg_dict + + +def _parse_imgser_dict(imgser_dict, det_key, roi=None): + """ + Associates a dict of imageseries to the target panel(s). + + Parameters + ---------- + imgser_dict : dict + The input dict of imageseries. Either `det_key` is in imgser_dict, or + the shared key is. Entries can be an ImageSeries object or a 2- or 3-d + ndarray of images. + det_key : str + The target detector key. + roi : tuple or None, optional + The roi of the target images. Format is + ((row_start, row_stop), (col_start, col_stop)) + The stops are used in the normal sense of a slice. The default is None. + + Raises + ------ + RuntimeError + If niether `det_key` nor the shared key is in the input imgser_dict; + Also, if the shared key is specified but the roi is None. + + Returns + ------- + ims : hexrd.imageseries + The desired imageseries object. + + """ + # grab imageseries for this detector + try: + ims = imgser_dict[det_key] + except KeyError: + matched_det_keys = [det_key in k for k in imgser_dict] + if multi_ims_key in imgser_dict: + images_in = imgser_dict[multi_ims_key] + elif np.any(matched_det_keys): + if sum(matched_det_keys) != 1: + raise RuntimeError( + f"multiple entries found for '{det_key}'" + ) + # use boolean array to index the proper key + # !!! these should be in the same order + img_keys = img_keys = np.asarray(list(imgser_dict.keys())) + matched_det_key = img_keys[matched_det_keys][0] # !!! only one + images_in = imgser_dict[matched_det_key] + else: + raise RuntimeError( + f"neither '{det_key}' nor '{multi_ims_key}' found" + + 'in imageseries input' + ) + + # have images now + if roi is None: + raise RuntimeError( + "roi must be specified to use shared imageseries" + ) + + if isinstance(images_in, ims_classes): + # input is an imageseries of some kind + ims = ProcessedImageSeries(images_in, [('rectangle', roi), ]) + if isinstance(images_in, OmegaImageSeries): + # if it was an OmegaImageSeries, must re-cast + ims = OmegaImageSeries(ims) + elif isinstance(images_in, np.ndarray): + # 2- or 3-d array of images + ndim = images_in.ndim + if ndim == 2: + ims = images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + elif ndim == 3: + nrows = roi[0][1] - roi[0][0] + ncols = roi[1][1] - roi[1][0] + n_images = len(images_in) + ims = np.empty((n_images, nrows, ncols), + dtype=images_in.dtype) + for i, image in images_in: + ims[i, :, :] = \ + images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + else: + raise RuntimeError( + f"image input dim must be 2 or 3; you gave {ndim}" + ) + return ims + + +def calc_beam_vec(azim, pola): + """ + Calculate unit beam propagation vector from + spherical coordinate spec in DEGREES. + + ...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL! + """ + tht = np.radians(azim) + phi = np.radians(pola) + bv = np.r_[ + np.sin(phi)*np.cos(tht), + np.cos(phi), + np.sin(phi)*np.sin(tht)] + return -bv + + +def calc_angles_from_beam_vec(bvec): + """ + Return the azimuth and polar angle from a beam + vector + """ + bvec = np.atleast_1d(bvec).flatten() + nvec = unit_vector(-bvec) + azim = float( + np.degrees(np.arctan2(nvec[2], nvec[0])) + ) + pola = float(np.degrees(np.arccos(nvec[1]))) + return azim, pola + + +def migrate_instrument_config(instrument_config): + """utility function to generate old instrument config dictionary""" + cfg_list = [] + for detector_id in instrument_config['detectors']: + cfg_list.append( + dict( + detector=instrument_config['detectors'][detector_id], + oscillation_stage=instrument_config['oscillation_stage'], + ) + ) + return cfg_list + + +def angle_in_range(angle, ranges, ccw=True, units='degrees'): + """ + Return the index of the first wedge the angle is found in + + WARNING: always clockwise; assumes wedges are not overlapping + """ + tau = 360. + if units.lower() == 'radians': + tau = 2*np.pi + w = np.nan + for i, wedge in enumerate(ranges): + amin = wedge[0] + amax = wedge[1] + check = amin + np.mod(angle - amin, tau) + if check < amax: + w = i + break + return w + + +# ???: move to gridutil? +def centers_of_edge_vec(edges): + assert np.asarray(edges).ndim == 1, "edges must be 1-d" + return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0) + + +def max_tth(instr): + """ + Return the maximum Bragg angle (in radians) subtended by the instrument. + + Parameters + ---------- + instr : hexrd.instrument.HEDMInstrument instance + the instrument class to evalutate. + + Returns + ------- + tth_max : float + The maximum observable Bragg angle by the instrument in radians. + """ + tth_max = 0. + for det in instr.detectors.values(): + ptth, peta = det.pixel_angles() + tth_max = max(np.max(ptth), tth_max) + return tth_max + + +def pixel_resolution(instr): + """ + Return the minimum, median, and maximum angular + resolution of the instrument. + + Parameters + ---------- + instr : HEDMInstrument instance + An instrument. + + Returns + ------- + tth_stats : float + min/median/max tth resolution in radians. + eta_stats : TYPE + min/median/max eta resolution in radians. + + """ + max_tth = np.inf + max_eta = np.inf + min_tth = -np.inf + min_eta = -np.inf + ang_ps_full = [] + for panel in instr.detectors.values(): + angps = panel.angularPixelSize( + np.stack( + panel.pixel_coords, + axis=0 + ).reshape(2, np.cumprod(panel.shape)[-1]).T + ) + ang_ps_full.append(angps) + max_tth = min(max_tth, np.min(angps[:, 0])) + max_eta = min(max_eta, np.min(angps[:, 1])) + min_tth = max(min_tth, np.max(angps[:, 0])) + min_eta = max(min_eta, np.max(angps[:, 1])) + med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten() + return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta) + + +def max_resolution(instr): + """ + Return the maximum angular resolution of the instrument. + + Parameters + ---------- + instr : HEDMInstrument instance + An instrument. + + Returns + ------- + max_tth : float + Maximum tth resolution in radians. + max_eta : TYPE + maximum eta resolution in radians. + + """ + max_tth = np.inf + max_eta = np.inf + for panel in instr.detectors.values(): + angps = panel.angularPixelSize( + np.stack( + panel.pixel_coords, + axis=0 + ).reshape(2, np.cumprod(panel.shape)[-1]).T + ) + max_tth = min(max_tth, np.min(angps[:, 0])) + max_eta = min(max_eta, np.min(angps[:, 1])) + return max_tth, max_eta + + +def _gaussian_dist(x, cen, fwhm): + sigm = fwhm/(2*np.sqrt(2*np.log(2))) + return np.exp(-0.5*(x - cen)**2/sigm**2) + + +def _sigma_to_fwhm(sigm): + return sigm*ct.sigma_to_fwhm + + +def _fwhm_to_sigma(fwhm): + return fwhm/ct.sigma_to_fwhm + + +# ============================================================================= +# CLASSES +# ============================================================================= + + +class HEDMInstrument(object): + """ + Abstraction of XRD instrument. + + * Distortion needs to be moved to a class with registry; tuple unworkable + * where should reference eta be defined? currently set to default config + """ + + def __init__(self, instrument_config=None, + image_series=None, eta_vector=None, + instrument_name=None, tilt_calibration_mapping=None, + max_workers=max_workers_DFLT, + physics_package=None, + active_beam_name: Optional[str] = None): + self._id = instrument_name_DFLT + + self._active_beam_name = active_beam_name + self._beam_dict = {} + + if eta_vector is None: + self._eta_vector = eta_vec_DFLT + else: + self._eta_vector = eta_vector + + self.max_workers = max_workers + + self.physics_package = physics_package + + if instrument_config is None: + # Default instrument + if instrument_name is not None: + self._id = instrument_name + self._num_panels = 1 + self._create_default_beam() + + # FIXME: must add cylindrical + self._detectors = dict( + panel_id_DFLT=PlanarDetector( + rows=nrows_DFLT, cols=ncols_DFLT, + pixel_size=pixel_size_DFLT, + tvec=t_vec_d_DFLT, + tilt=tilt_params_DFLT, + bvec=self.beam_vector, + xrs_dist=self.source_distance, + evec=self._eta_vector, + distortion=None, + roi=None, group=None, + max_workers=self.max_workers), + ) + + self._tvec = t_vec_s_DFLT + self._chi = chi_DFLT + else: + if isinstance(instrument_config, h5py.File): + tmp = {} + unwrap_h5_to_dict(instrument_config, tmp) + instrument_config = tmp['instrument'] + elif not isinstance(instrument_config, dict): + raise RuntimeError( + "instrument_config must be either an HDF5 file object" + + "or a dictionary. You gave a %s" + % type(instrument_config) + ) + if instrument_name is None: + if 'id' in instrument_config: + self._id = instrument_config['id'] + else: + self._id = instrument_name + + self._num_panels = len(instrument_config['detectors']) + + if instrument_config.get('physics_package', None) is not None: + self.physics_package = instrument_config['physics_package'] + + xrs_config = instrument_config['beam'] + is_single_beam = ( + 'energy' in xrs_config and + 'vector' in xrs_config + ) + if is_single_beam: + # Assume single beam. Load the same way as multibeam + self._create_default_beam() + xrs_config = {self.active_beam_name: xrs_config} + + # Multi beam load + for beam_name, beam in xrs_config.items(): + self._beam_dict[beam_name] = { + 'energy': beam['energy'], + 'vector': calc_beam_vec( + beam['vector']['azimuth'], + beam['vector']['polar_angle'], + ), + 'distance': beam.get('source_distance', np.inf), + } + + # Set the active beam name if not set already + if self._active_beam_name is None: + self._active_beam_name = next(iter(self._beam_dict)) + + # now build detector dict + detectors_config = instrument_config['detectors'] + det_dict = dict.fromkeys(detectors_config) + for det_id, det_info in detectors_config.items(): + det_group = det_info.get('group') # optional detector group + pixel_info = det_info['pixels'] + affine_info = det_info['transform'] + detector_type = det_info.get('detector_type', 'planar') + filter = det_info.get('filter', None) + coating = det_info.get('coating', None) + phosphor = det_info.get('phosphor', None) + try: + saturation_level = det_info['saturation_level'] + except KeyError: + saturation_level = 2**16 + shape = (pixel_info['rows'], pixel_info['columns']) + + panel_buffer = None + if buffer_key in det_info: + det_buffer = det_info[buffer_key] + if det_buffer is not None: + if isinstance(det_buffer, np.ndarray): + if det_buffer.ndim == 2: + if det_buffer.shape != shape: + msg = ( + f'Buffer shape for {det_id} ' + f'({det_buffer.shape}) does not match ' + f'detector shape ({shape})' + ) + raise BufferShapeMismatchError(msg) + else: + assert len(det_buffer) == 2 + panel_buffer = det_buffer + elif isinstance(det_buffer, list): + panel_buffer = np.asarray(det_buffer) + elif np.isscalar(det_buffer): + panel_buffer = det_buffer*np.ones(2) + else: + raise RuntimeError( + "panel buffer spec invalid for %s" % det_id + ) + + # optional roi + roi = pixel_info.get('roi') + + # handle distortion + distortion = None + if distortion_key in det_info: + distortion_cfg = det_info[distortion_key] + if distortion_cfg is not None: + try: + func_name = distortion_cfg['function_name'] + dparams = distortion_cfg['parameters'] + distortion = distortion_pkg.get_mapping( + func_name, dparams + ) + except KeyError: + raise RuntimeError( + "problem with distortion specification" + ) + if detector_type.lower() not in DETECTOR_TYPES: + msg = f'Unknown detector type: {detector_type}' + raise NotImplementedError(msg) + + DetectorClass = DETECTOR_TYPES[detector_type.lower()] + kwargs = dict( + name=det_id, + rows=pixel_info['rows'], + cols=pixel_info['columns'], + pixel_size=pixel_info['size'], + panel_buffer=panel_buffer, + saturation_level=saturation_level, + tvec=affine_info['translation'], + tilt=affine_info['tilt'], + bvec=self.beam_vector, + xrs_dist=self.source_distance, + evec=self._eta_vector, + distortion=distortion, + roi=roi, + group=det_group, + max_workers=self.max_workers, + detector_filter=filter, + detector_coating=coating, + phosphor=phosphor, + ) + + if DetectorClass is CylindricalDetector: + # Add cylindrical detector kwargs + kwargs['radius'] = det_info.get('radius', 49.51) + + det_dict[det_id] = DetectorClass(**kwargs) + + self._detectors = det_dict + + self._tvec = np.r_[ + instrument_config['oscillation_stage']['translation'] + ] + self._chi = instrument_config['oscillation_stage']['chi'] + + # grab angles from beam vec + # !!! these are in DEGREES! + azim, pola = calc_angles_from_beam_vec(self.beam_vector) + + self.update_memoization_sizes() + + @property + def mean_detector_center(self) -> np.ndarray: + """Return the mean center for all detectors""" + centers = np.array([panel.tvec for panel in self.detectors.values()]) + return centers.sum(axis=0) / len(centers) + + def mean_group_center(self, group: str) -> np.ndarray: + """Return the mean center for detectors belonging to a group""" + centers = np.array([ + x.tvec for x in self.detectors_in_group(group).values() + ]) + return centers.sum(axis=0) / len(centers) + + @property + def detector_groups(self) -> list[str]: + groups = [] + for panel in self.detectors.values(): + group = panel.group + if group is not None and group not in groups: + groups.append(group) + + return groups + + def detectors_in_group(self, group: str) -> dict[str, Detector]: + return {k: v for k, v in self.detectors.items() if v.group == group} + + # properties for physical size of rectangular detector + @property + def id(self): + return self._id + + @property + def num_panels(self): + return self._num_panels + + @property + def detectors(self): + return self._detectors + + @property + def detector_parameters(self): + pdict = {} + for key, panel in self.detectors.items(): + pdict[key] = panel.config_dict( + self.chi, self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + return pdict + + @property + def tvec(self): + return self._tvec + + @tvec.setter + def tvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3, 'input must have length = 3' + self._tvec = x + + @property + def chi(self): + return self._chi + + @chi.setter + def chi(self, x): + self._chi = float(x) + + @property + def beam_energy(self) -> float: + return self.active_beam['energy'] + + @beam_energy.setter + def beam_energy(self, x: float): + self.active_beam['energy'] = float(x) + self.beam_dict_modified() + + @property + def beam_wavelength(self): + return ct.keVToAngstrom(self.beam_energy) + + @property + def has_multi_beam(self) -> bool: + return len(self.beam_dict) > 1 + + @property + def beam_dict(self) -> dict: + return self._beam_dict + + def _create_default_beam(self): + name = 'XRS1' + self._beam_dict[name] = { + 'energy': beam_energy_DFLT, + 'vector': beam_vec_DFLT.copy(), + 'distance': np.inf, + } + + if self._active_beam_name is None: + self._active_beam_name = name + + @property + def beam_names(self) -> list[str]: + return list(self.beam_dict) + + def xrs_beam_energy(self, beam_name: Optional[str]) -> float: + if beam_name is None: + beam_name = self.active_beam_name + + return self.beam_dict[beam_name]['energy'] + + @property + def active_beam_name(self) -> str: + return self._active_beam_name + + @active_beam_name.setter + def active_beam_name(self, name: str): + if self._active_beam_name not in self.beam_dict: + raise RuntimeError( + f'"{name}" is not present in "{self.beam_names}"' + ) + + self._active_beam_name = name + + # Update anything beam related where we need to + self._update_panel_beams() + + def beam_dict_modified(self): + # A function to call to indicate that the beam dict was modified. + # Update anything beam related where we need to + self._update_panel_beams() + + @property + def active_beam(self) -> dict: + return self.beam_dict[self.active_beam_name] + + def _update_panel_beams(self): + # FIXME: maybe we shouldn't store these on the panels? + # Might be hard to fix, though... + for panel in self.detectors.values(): + panel.bvec = self.beam_vector + panel.xrs_dist = self.source_distance + + @property + def beam_vector(self) -> np.ndarray: + return self.active_beam['vector'] + + @beam_vector.setter + def beam_vector(self, x: np.ndarray): + x = np.array(x).flatten() + if len(x) == 3: + assert sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + bvec = x + elif len(x) == 2: + bvec = calc_beam_vec(*x) + else: + raise RuntimeError("input must be a unit vector or angle pair") + + # Modify the beam vector for the active beam dict + self.active_beam['vector'] = bvec + self.beam_dict_modified() + + @property + def source_distance(self): + return self.active_beam['distance'] + + @source_distance.setter + def source_distance(self, x): + assert np.isscalar(x), \ + f"'source_distance' must be a scalar; you input '{x}'" + self.active_beam['distance'] = x + self.beam_dict_modified() + + @property + def eta_vector(self): + return self._eta_vector + + @eta_vector.setter + def eta_vector(self, x): + x = np.array(x).flatten() + assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + self._eta_vector = x + # ...maybe change dictionary item behavior for 3.x compatibility? + for detector_id in self.detectors: + panel = self.detectors[detector_id] + panel.evec = self._eta_vector + + # ========================================================================= + # METHODS + # ========================================================================= + + def write_config(self, file=None, style='yaml', calibration_dict={}): + """ WRITE OUT YAML FILE """ + # initialize output dictionary + assert style.lower() in ['yaml', 'hdf5'], \ + "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + + par_dict = {} + + par_dict['id'] = self.id + + # Multi beam writer + beam_dict = {} + for beam_name, beam in self.beam_dict.items(): + azim, polar = calc_angles_from_beam_vec(beam['vector']) + beam_dict[beam_name] = { + 'energy': beam['energy'], + 'vector': { + 'azimuth': azim, + 'polar_angle': polar, + }, + } + if beam['distance'] != np.inf: + beam_dict[beam_name]['source_distance'] = beam['distance'] + + if len(beam_dict) == 1: + # Just write it out a single beam (classical way) + beam_dict = next(iter(beam_dict.values())) + + par_dict['beam'] = beam_dict + + if calibration_dict: + par_dict['calibration_crystal'] = calibration_dict + + ostage = dict( + chi=self.chi, + translation=self.tvec.tolist() + ) + par_dict['oscillation_stage'] = ostage + + det_dict = dict.fromkeys(self.detectors) + for det_name, detector in self.detectors.items(): + # grab panel config + # !!! don't need beam or tvec + # !!! have vetted style + pdict = detector.config_dict(chi=self.chi, tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style=style) + det_dict[det_name] = pdict['detector'] + par_dict['detectors'] = det_dict + + # handle output file if requested + if file is not None: + if style.lower() == 'yaml': + with open(file, 'w') as f: + yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) + else: + def _write_group(file): + instr_grp = file.create_group('instrument') + unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) + + # hdf5 + if isinstance(file, str): + with h5py.File(file, 'w') as f: + _write_group(f) + elif isinstance(file, h5py.File): + _write_group(file) + else: + raise TypeError("Unexpected file type.") + + return par_dict + + def extract_polar_maps(self, plane_data, imgser_dict, + active_hkls=None, threshold=None, + tth_tol=None, eta_tol=0.25): + """ + Extract eta-omega maps from an imageseries. + + Quick and dirty way to histogram angular patch data for make + pole figures suitable for fiber generation + + TODO: streamline projection code + TODO: normalization + !!!: images must be non-negative! + !!!: plane_data is NOT a copy! + """ + if tth_tol is not None: + plane_data.tThWidth = np.radians(tth_tol) + else: + tth_tol = np.degrees(plane_data.tThWidth) + + # make rings clipped to panel + # !!! eta_idx has the same length as plane_data.exclusions + # each entry are the integer indices into the bins + # !!! eta_edges is the list of eta bin EDGES; same for all + # detectors, so calculate it once + # !!! grab first panel + panel = next(iter(self.detectors.values())) + pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = \ + panel.make_powder_rings( + plane_data, merge_hkls=False, + delta_eta=eta_tol, full_output=True + ) + + if active_hkls is not None: + assert hasattr(active_hkls, '__len__'), \ + "active_hkls must be an iterable with __len__" + + # need to re-cast for element-wise operations + active_hkls = np.array(active_hkls) + + # these are all active reflection unique hklIDs + active_hklIDs = plane_data.getHKLID( + plane_data.hkls, master=True + ) + + # find indices + idx = np.zeros_like(active_hkls, dtype=int) + for i, input_hklID in enumerate(active_hkls): + try: + idx[i] = np.where(active_hklIDs == input_hklID)[0] + except ValueError: + raise RuntimeError(f"hklID '{input_hklID}' is invalid") + tth_ranges = tth_ranges[idx] + + delta_eta = eta_edges[1] - eta_edges[0] + ncols_eta = len(eta_edges) - 1 + + ring_maps_panel = dict.fromkeys(self.detectors) + for i_d, det_key in enumerate(self.detectors): + print("working on detector '%s'..." % det_key) + + # grab panel + panel = self.detectors[det_key] + # native_area = panel.pixel_area # pixel ref area + + # pixel angular coords for the detector panel + ptth, peta = panel.pixel_angles() + + # grab imageseries for this detector + ims = _parse_imgser_dict(imgser_dict, det_key, roi=panel.roi) + + # grab omegas from imageseries and squawk if missing + try: + omegas = ims.metadata['omega'] + except KeyError: + raise RuntimeError( + f"imageseries for '{det_key}' has no omega info" + ) + + # initialize maps and assing by row (omega/frame) + nrows_ome = len(omegas) + + # init map with NaNs + shape = (len(tth_ranges), nrows_ome, ncols_eta) + ring_maps = np.full(shape, np.nan) + + # Generate ring parameters once, and re-use them for each image + ring_params = [] + for tthr in tth_ranges: + kwargs = { + 'tthr': tthr, + 'ptth': ptth, + 'peta': peta, + 'eta_edges': eta_edges, + 'delta_eta': delta_eta, + } + ring_params.append(_generate_ring_params(**kwargs)) + + # Divide up the images among processes + tasks = distribute_tasks(len(ims), self.max_workers) + func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges, + ring_maps=ring_maps, ring_params=ring_params, + threshold=threshold) + + max_workers = self.max_workers + if max_workers == 1 or len(tasks) == 1: + # Just execute it serially. + for task in tasks: + func(task) + else: + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Evaluate the results via `list()`, so that if an + # exception is raised in a thread, it will be re-raised + # and visible to the user. + list(executor.map(func, tasks)) + + ring_maps_panel[det_key] = ring_maps + + return ring_maps_panel, eta_edges + + def extract_line_positions(self, plane_data, imgser_dict, + tth_tol=None, eta_tol=1., npdiv=2, + eta_centers=None, + collapse_eta=True, collapse_tth=False, + do_interpolation=True, do_fitting=False, + tth_distortion=None, fitting_kwargs=None): + """ + Perform annular interpolation on diffraction images. + + Provides data for extracting the line positions from powder diffraction + images, pole figure patches from imageseries, or Bragg peaks from + Laue diffraction images. + + Parameters + ---------- + plane_data : hexrd.crystallography.PlaneData object or array_like + Object determining the 2theta positions for the integration + sectors. If PlaneData, this will be all non-excluded reflections, + subject to merging within PlaneData.tThWidth. If array_like, + interpreted as a list of 2theta angles IN DEGREES. + imgser_dict : dict + Dictionary of powder diffraction images, one for each detector. + tth_tol : scalar, optional + The radial (i.e. 2theta) width of the integration sectors + IN DEGREES. This arg is required if plane_data is array_like. + The default is None. + eta_tol : scalar, optional + The azimuthal (i.e. eta) width of the integration sectors + IN DEGREES. The default is 1. + npdiv : int, optional + The number of oversampling pixel subdivision (see notes). + The default is 2. + eta_centers : array_like, optional + The desired azimuthal sector centers. The default is None. If + None, then bins are distrubted sequentially from (-180, 180). + collapse_eta : bool, optional + Flag for summing sectors in eta. The default is True. + collapse_tth : bool, optional + Flag for summing sectors in 2theta. The default is False. + do_interpolation : bool, optional + If True, perform bilinear interpolation. The default is True. + do_fitting : bool, optional + If True, then perform spectrum fitting, and append the results + to the returned data. collapse_eta must also be True for this + to have any effect. The default is False. + tth_distortion : special class, optional + for special case of pinhole camera distortions. See + hexrd.xrdutil.phutil.SampleLayerDistortion (only type supported) + fitting_kwargs : dict, optional + kwargs passed to hexrd.fitting.utils.fit_ring if do_fitting is True + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + panel_data : dict + Dictionary over the detctors with the following structure: + [list over (merged) 2theta ranges] + [list over valid eta sectors] + [angle data , + bin intensities , + fitting results ] + + Notes + ----- + TODO: May change the array_like input units to degrees. + TODO: rename function. + + """ + + if fitting_kwargs is None: + fitting_kwargs = {} + + # ===================================================================== + # LOOP OVER DETECTORS + # ===================================================================== + logger.info("Interpolating ring data") + pbar_dets = partial(tqdm, total=self.num_panels, desc="Detector", + position=self.num_panels) + + # Split up the workers among the detectors + max_workers_per_detector = max(1, self.max_workers // self.num_panels) + + kwargs = { + 'plane_data': plane_data, + 'tth_tol': tth_tol, + 'eta_tol': eta_tol, + 'eta_centers': eta_centers, + 'npdiv': npdiv, + 'collapse_tth': collapse_tth, + 'collapse_eta': collapse_eta, + 'do_interpolation': do_interpolation, + 'do_fitting': do_fitting, + 'fitting_kwargs': fitting_kwargs, + 'tth_distortion': tth_distortion, + 'max_workers': max_workers_per_detector, + } + func = partial(_extract_detector_line_positions, **kwargs) + + def make_instr_cfg(panel): + return panel.config_dict( + chi=self.chi, tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + + images = [] + for detector_id, panel in self.detectors.items(): + images.append(_parse_imgser_dict(imgser_dict, detector_id, + roi=panel.roi)) + + panels = [self.detectors[k] for k in self.detectors] + instr_cfgs = [make_instr_cfg(x) for x in panels] + pbp_array = np.arange(self.num_panels) + iter_args = zip(panels, instr_cfgs, images, pbp_array) + with ProcessPoolExecutor(mp_context=constants.mp_context, + max_workers=self.num_panels) as executor: + results = list(pbar_dets(executor.map(func, iter_args))) + + panel_data = {} + for det, res in zip(self.detectors, results): + panel_data[det] = res + + return panel_data + + def simulate_powder_pattern(self, + mat_list, + params=None, + bkgmethod=None, + origin=None, + noise=None): + """ + Generate powder diffraction iamges from specified materials. + + Parameters + ---------- + mat_list : array_like (n, ) + List of Material classes. + params : dict, optional + Dictionary of LeBail parameters (see Notes). The default is None. + bkgmethod : dict, optional + Background function specification. The default is None. + origin : array_like (3,), optional + Vector describing the origin of the diffrction volume. + The default is None, wiich is equivalent to [0, 0, 0]. + noise : str, optional + Flag describing type of noise to be applied. The default is None. + + Returns + ------- + img_dict : dict + Dictionary of diffraciton images over the detectors. + + Notes + ----- + TODO: add more controls for noise function. + TODO: modify hooks to LeBail parameters. + TODO: add optional volume fraction weights for phases in mat_list + """ + """ + >> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab, + saransh1@llnl.gov + >> @DATE: 01/22/2021 SS 1.0 original + >> @DETAILS: adding hook to WPPF class. this changes the input list + significantly + """ + if origin is None: + origin = self.tvec + origin = np.asarray(origin).squeeze() + assert len(origin) == 3, \ + "origin must be a 3-element sequence" + + if bkgmethod is None: + bkgmethod = {'chebyshev': 3} + + ''' + if params is none, fill in some sane default values + only the first value is used. the rest of the values are + the upper, lower bounds and vary flag for refinement which + are not used but required for interfacing with WPPF + + zero_error : zero shift error + U, V, W : Cagliotti parameters + P, X, Y : Lorentzian parameters + eta1, eta2, eta3 : Mixing parameters + ''' + if params is None: + # params = {'zero_error': [0.0, -1., 1., True], + # 'U': [2e-1, -1., 1., True], + # 'V': [2e-2, -1., 1., True], + # 'W': [2e-2, -1., 1., True], + # 'X': [2e-1, -1., 1., True], + # 'Y': [2e-1, -1., 1., True] + # } + params = wppfsupport._generate_default_parameters_LeBail( + mat_list, + 1, + bkgmethod, + ) + ''' + use the material list to obtain the dictionary of initial intensities + we need to make sure that the intensities are properly scaled by the + lorentz polarization factor. since the calculation is done in the + LeBail class, all that means is the initial intensity needs that factor + in there + ''' + img_dict = dict.fromkeys(self.detectors) + + # find min and max tth over all panels + tth_mi = np.inf + tth_ma = 0. + ptth_dict = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + ptth, peta = panel.pixel_angles(origin=origin) + tth_mi = min(tth_mi, ptth.min()) + tth_ma = max(tth_ma, ptth.max()) + ptth_dict[det_key] = ptth + + ''' + now make a list of two theta and dummy ones for the experimental + spectrum this is never really used so any values should be okay. We + could also pas the integrated detector image if we would like to + simulate some realistic background. But thats for another day. + ''' + # convert angles to degrees because thats what the WPPF expects + tth_mi = np.degrees(tth_mi) + tth_ma = np.degrees(tth_ma) + + # get tth angular resolution for instrument + ang_res = max_resolution(self) + + # !!! calc nsteps by oversampling + nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0]))) + + # evaulation vector for LeBail + tth = np.linspace(tth_mi, tth_ma, nsteps) + + expt = np.vstack([tth, np.ones_like(tth)]).T + + wavelength = [ + valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), + 1. + ] + + ''' + now go through the material list and get the intensity dictionary + ''' + intensity = {} + for mat in mat_list: + + multiplicity = mat.planeData.getMultiplicity() + + tth = mat.planeData.getTTh() + + LP = (1 + np.cos(tth)**2) / \ + np.cos(0.5*tth)/np.sin(0.5*tth)**2 + + intensity[mat.name] = {} + intensity[mat.name]['synchrotron'] = \ + mat.planeData.structFact * LP * multiplicity + + kwargs = { + 'expt_spectrum': expt, + 'params': params, + 'phases': mat_list, + 'wavelength': { + 'synchrotron': wavelength + }, + 'bkgmethod': bkgmethod, + 'intensity_init': intensity, + 'peakshape': 'pvtch' + } + + self.WPPFclass = LeBail(**kwargs) + + self.simulated_spectrum = self.WPPFclass.spectrum_sim + self.background = self.WPPFclass.background + + ''' + now that we have the simulated intensities, its time to get the + two theta for the detector pixels and interpolate what the intensity + for each pixel should be + ''' + + img_dict = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + ptth = ptth_dict[det_key] + + img = np.interp(np.degrees(ptth), + self.simulated_spectrum.x, + self.simulated_spectrum.y + self.background.y) + + if noise is None: + img_dict[det_key] = img + + else: + # Rescale to be between 0 and 1 so random_noise() will work + prev_max = img.max() + img /= prev_max + + if noise.lower() == 'poisson': + im_noise = random_noise(img, + mode='poisson', + clip=True) + mi = im_noise.min() + ma = im_noise.max() + if ma > mi: + im_noise = (im_noise - mi)/(ma - mi) + + elif noise.lower() == 'gaussian': + im_noise = random_noise(img, mode='gaussian', clip=True) + + elif noise.lower() == 'salt': + im_noise = random_noise(img, mode='salt') + + elif noise.lower() == 'pepper': + im_noise = random_noise(img, mode='pepper') + + elif noise.lower() == 's&p': + im_noise = random_noise(img, mode='s&p') + + elif noise.lower() == 'speckle': + im_noise = random_noise(img, mode='speckle', clip=True) + + # Now scale back up + img_dict[det_key] = im_noise * prev_max + + return img_dict + + def simulate_laue_pattern(self, crystal_data, + minEnergy=5., maxEnergy=35., + rmat_s=None, grain_params=None): + """ + Simulate Laue diffraction over the instrument. + + Parameters + ---------- + crystal_data : TYPE + DESCRIPTION. + minEnergy : TYPE, optional + DESCRIPTION. The default is 5.. + maxEnergy : TYPE, optional + DESCRIPTION. The default is 35.. + rmat_s : TYPE, optional + DESCRIPTION. The default is None. + grain_params : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + results : TYPE + DESCRIPTION. + + xy_det, hkls_in, angles, dspacing, energy + + TODO: revisit output; dict, or concatenated list? + """ + results = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + results[det_key] = panel.simulate_laue_pattern( + crystal_data, + minEnergy=minEnergy, maxEnergy=maxEnergy, + rmat_s=rmat_s, tvec_s=self.tvec, + grain_params=grain_params, + beam_vec=self.beam_vector) + return results + + def simulate_rotation_series(self, plane_data, grain_param_list, + eta_ranges=[(-np.pi, np.pi), ], + ome_ranges=[(-np.pi, np.pi), ], + ome_period=(-np.pi, np.pi), + wavelength=None): + """ + Simulate a monochromatic rotation series over the instrument. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_param_list : TYPE + DESCRIPTION. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + wavelength : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + results : TYPE + DESCRIPTION. + + TODO: revisit output; dict, or concatenated list? + """ + results = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + results[det_key] = panel.simulate_rotation_series( + plane_data, grain_param_list, + eta_ranges=eta_ranges, + ome_ranges=ome_ranges, + ome_period=ome_period, + chi=self.chi, tVec_s=self.tvec, + wavelength=wavelength) + return results + + def pull_spots(self, plane_data, grain_params, + imgser_dict, + tth_tol=0.25, eta_tol=1., ome_tol=1., + npdiv=2, threshold=10, + eta_ranges=[(-np.pi, np.pi), ], + ome_period=None, + dirname='results', filename=None, output_format='text', + return_spot_list=False, + quiet=True, check_only=False, + interp='nearest'): + """ + Exctract reflection info from a rotation series. + + Input must be encoded as an OmegaImageseries object. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_params : TYPE + DESCRIPTION. + imgser_dict : TYPE + DESCRIPTION. + tth_tol : TYPE, optional + DESCRIPTION. The default is 0.25. + eta_tol : TYPE, optional + DESCRIPTION. The default is 1.. + ome_tol : TYPE, optional + DESCRIPTION. The default is 1.. + npdiv : TYPE, optional + DESCRIPTION. The default is 2. + threshold : TYPE, optional + DESCRIPTION. The default is 10. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + dirname : TYPE, optional + DESCRIPTION. The default is 'results'. + filename : TYPE, optional + DESCRIPTION. The default is None. + output_format : TYPE, optional + DESCRIPTION. The default is 'text'. + return_spot_list : TYPE, optional + DESCRIPTION. The default is False. + quiet : TYPE, optional + DESCRIPTION. The default is True. + check_only : TYPE, optional + DESCRIPTION. The default is False. + interp : TYPE, optional + DESCRIPTION. The default is 'nearest'. + + Returns + ------- + compl : TYPE + DESCRIPTION. + output : TYPE + DESCRIPTION. + + """ + # grain parameters + rMat_c = make_rmat_of_expmap(grain_params[:3]) + tVec_c = grain_params[3:6] + + # grab omega ranges from first imageseries + # + # WARNING: all imageseries AND all wedges within are assumed to have + # the same omega values; put in a check that they are all the same??? + oims0 = next(iter(imgser_dict.values())) + ome_ranges = [np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges] + if ome_period is None: + ims = next(iter(imgser_dict.values())) + ostart = ims.omega[0, 0] + ome_period = np.radians(ostart + np.r_[0., 360.]) + + # delta omega in DEGREES grabbed from first imageseries in the dict + delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] + + # make omega grid for frame expansion around reference frame + # in DEGREES + ndiv_ome, ome_del = make_tolerance_grid( + delta_ome, ome_tol, 1, adjust_window=True, + ) + + # generate structuring element for connected component labeling + if ndiv_ome == 1: + label_struct = ndimage.generate_binary_structure(2, 2) + else: + label_struct = ndimage.generate_binary_structure(3, 3) + + # simulate rotation series + sim_results = self.simulate_rotation_series( + plane_data, [grain_params, ], + eta_ranges=eta_ranges, + ome_ranges=ome_ranges, + ome_period=ome_period) + + # patch vertex generator (global for instrument) + tol_vec = 0.5*np.radians( + [-tth_tol, -eta_tol, + -tth_tol, eta_tol, + tth_tol, eta_tol, + tth_tol, -eta_tol]) + + # prepare output if requested + if filename is not None and output_format.lower() == 'hdf5': + this_filename = os.path.join(dirname, filename) + writer = GrainDataWriter_h5( + os.path.join(dirname, filename), + self.write_config(), grain_params) + + # ===================================================================== + # LOOP OVER PANELS + # ===================================================================== + iRefl = 0 + next_invalid_peak_id = -100 + compl = [] + output = dict.fromkeys(self.detectors) + for detector_id, panel in self.detectors.items(): + # initialize text-based output writer + if filename is not None and output_format.lower() == 'text': + output_dir = os.path.join( + dirname, detector_id + ) + os.makedirs(output_dir, exist_ok=True) + this_filename = os.path.join( + output_dir, filename + ) + writer = PatchDataWriter(this_filename) + + # grab panel + instr_cfg = panel.config_dict( + self.chi, self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + native_area = panel.pixel_area # pixel ref area + + # pull out the OmegaImageSeries for this panel from input dict + ome_imgser = _parse_imgser_dict(imgser_dict, + detector_id, + roi=panel.roi) + + # extract simulation results + sim_results_p = sim_results[detector_id] + hkl_ids = sim_results_p[0][0] + hkls_p = sim_results_p[1][0] + ang_centers = sim_results_p[2][0] + xy_centers = sim_results_p[3][0] + ang_pixel_size = sim_results_p[4][0] + + # now verify that full patch falls on detector... + # ???: strictly necessary? + # + # patch vertex array from sim + nangs = len(ang_centers) + patch_vertices = ( + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4*nangs, 2) + ome_dupl = np.tile( + ang_centers[:, 2], (4, 1) + ).T.reshape(len(patch_vertices), 1) + + # find vertices that all fall on the panel + det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( + np.hstack([patch_vertices, ome_dupl]), + panel.rmat, rMat_c, self.chi, + panel.tvec, tVec_c, self.tvec, + panel.distortion) + _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) + + # all vertices must be on... + patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) + patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] + + # re-filter... + hkl_ids = hkl_ids[patch_is_on] + hkls_p = hkls_p[patch_is_on, :] + ang_centers = ang_centers[patch_is_on, :] + xy_centers = xy_centers[patch_is_on, :] + ang_pixel_size = ang_pixel_size[patch_is_on, :] + + # TODO: add polygon testing right here! + # done + if check_only: + patch_output = [] + for i_pt, angs in enumerate(ang_centers): + # the evaluation omegas; + # expand about the central value using tol vector + ome_eval = np.degrees(angs[2]) + ome_del + + # ...vectorize the omega_to_frame function to avoid loop? + frame_indices = [ + ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval + ] + if -1 in frame_indices: + if not quiet: + msg = """ + window for (%d %d %d) falls outside omega range + """ % tuple(hkls_p[i_pt, :]) + print(msg) + continue + else: + these_vertices = patch_xys[i_pt] + ijs = panel.cartToPixel(these_vertices) + ii, jj = polygon(ijs[:, 0], ijs[:, 1]) + contains_signal = False + for i_frame in frame_indices: + contains_signal = contains_signal or np.any( + ome_imgser[i_frame][ii, jj] > threshold + ) + compl.append(contains_signal) + patch_output.append((ii, jj, frame_indices)) + else: + # make the tth,eta patches for interpolation + patches = xrdutil.make_reflection_patches( + instr_cfg, + ang_centers[:, :2], ang_pixel_size, + omega=ang_centers[:, 2], + tth_tol=tth_tol, eta_tol=eta_tol, + rmat_c=rMat_c, tvec_c=tVec_c, + npdiv=npdiv, quiet=True) + + # GRAND LOOP over reflections for this panel + patch_output = [] + for i_pt, patch in enumerate(patches): + + # strip relevant objects out of current patch + vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch + + prows, pcols = areas.shape + nrm_fac = areas/float(native_area) + nrm_fac = nrm_fac / np.min(nrm_fac) + + # grab hkl info + hkl = hkls_p[i_pt, :] + hkl_id = hkl_ids[i_pt] + + # edge arrays + tth_edges = vtx_angs[0][0, :] + delta_tth = tth_edges[1] - tth_edges[0] + eta_edges = vtx_angs[1][:, 0] + delta_eta = eta_edges[1] - eta_edges[0] + + # need to reshape eval pts for interpolation + xy_eval = np.vstack([xy_eval[0].flatten(), + xy_eval[1].flatten()]).T + + # the evaluation omegas; + # expand about the central value using tol vector + ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del + + # ???: vectorize the omega_to_frame function to avoid loop? + frame_indices = [ + ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval + ] + + if -1 in frame_indices: + if not quiet: + msg = """ + window for (%d%d%d) falls outside omega range + """ % tuple(hkl) + print(msg) + continue + else: + # initialize spot data parameters + # !!! maybe change these to nan to not fuck up writer + peak_id = next_invalid_peak_id + sum_int = np.nan + max_int = np.nan + meas_angs = np.nan*np.ones(3) + meas_xy = np.nan*np.ones(2) + + # quick check for intensity + contains_signal = False + patch_data_raw = [] + for i_frame in frame_indices: + tmp = ome_imgser[i_frame][ijs[0], ijs[1]] + contains_signal = contains_signal or np.any( + tmp > threshold + ) + patch_data_raw.append(tmp) + patch_data_raw = np.stack(patch_data_raw, axis=0) + compl.append(contains_signal) + + if contains_signal: + # initialize patch data array for intensities + if interp.lower() == 'bilinear': + patch_data = np.zeros( + (len(frame_indices), prows, pcols)) + for i, i_frame in enumerate(frame_indices): + patch_data[i] = \ + panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False + ).reshape(prows, pcols) # * nrm_fac + elif interp.lower() == 'nearest': + patch_data = patch_data_raw # * nrm_fac + else: + msg = "interpolation option " + \ + "'%s' not understood" + raise RuntimeError(msg % interp) + + # now have interpolated patch data... + labels, num_peaks = ndimage.label( + patch_data > threshold, structure=label_struct + ) + slabels = np.arange(1, num_peaks + 1) + + if num_peaks > 0: + peak_id = iRefl + props = regionprops(labels, patch_data) + coms = np.vstack( + [x.weighted_centroid for x in props]) + if num_peaks > 1: + center = np.r_[patch_data.shape]*0.5 + center_t = np.tile(center, (num_peaks, 1)) + com_diff = coms - center_t + closest_peak_idx = np.argmin( + np.sum(com_diff**2, axis=1) + ) + else: + closest_peak_idx = 0 + coms = coms[closest_peak_idx] + # meas_omes = \ + # ome_edges[0] + (0.5 + coms[0])*delta_ome + meas_omes = \ + ome_eval[0] + coms[0]*delta_ome + meas_angs = np.hstack( + [tth_edges[0] + (0.5 + coms[2])*delta_tth, + eta_edges[0] + (0.5 + coms[1])*delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ) + ] + ) + + # intensities + # - summed is 'integrated' over interpolated + # data + # - max is max of raw input data + sum_int = np.sum( + patch_data[ + labels == slabels[closest_peak_idx] + ] + ) + max_int = np.max( + patch_data_raw[ + labels == slabels[closest_peak_idx] + ] + ) + # ???: Should this only use labeled pixels? + # Those are segmented from interpolated data, + # not raw; likely ok in most cases. + + # need MEASURED xy coords + # FIXME: overload angles_to_cart? + gvec_c = angles_to_gvec( + meas_angs, + chi=self.chi, + rmat_c=rMat_c, + beam_vec=self.beam_vector) + rMat_s = make_sample_rmat( + self.chi, meas_angs[2] + ) + meas_xy = gvec_to_xy( + gvec_c, + panel.rmat, rMat_s, rMat_c, + panel.tvec, self.tvec, tVec_c, + beam_vec=self.beam_vector) + if panel.distortion is not None: + meas_xy = panel.distortion.apply_inverse( + np.atleast_2d(meas_xy) + ).flatten() + # FIXME: why is this suddenly necessary??? + meas_xy = meas_xy.squeeze() + else: + patch_data = patch_data_raw + + if peak_id < 0: + # The peak is invalid. + # Decrement the next invalid peak ID. + next_invalid_peak_id -= 1 + + # write output + if filename is not None: + if output_format.lower() == 'text': + writer.dump_patch( + peak_id, hkl_id, hkl, sum_int, max_int, + ang_centers[i_pt], meas_angs, + xy_centers[i_pt], meas_xy) + elif output_format.lower() == 'hdf5': + xyc_arr = xy_eval.reshape( + prows, pcols, 2 + ).transpose(2, 0, 1) + writer.dump_patch( + detector_id, iRefl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, np.radians(ome_eval), + xyc_arr, ijs, frame_indices, patch_data, + ang_centers[i_pt], xy_centers[i_pt], + meas_angs, meas_xy) + + if return_spot_list: + # Full output + xyc_arr = xy_eval.reshape( + prows, pcols, 2 + ).transpose(2, 0, 1) + _patch_output = [ + detector_id, iRefl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, np.radians(ome_eval), + xyc_arr, ijs, frame_indices, patch_data, + ang_centers[i_pt], xy_centers[i_pt], + meas_angs, meas_xy + ] + else: + # Trimmed output + _patch_output = [ + peak_id, hkl_id, hkl, sum_int, max_int, + ang_centers[i_pt], meas_angs, meas_xy + ] + patch_output.append(_patch_output) + iRefl += 1 + output[detector_id] = patch_output + if filename is not None and output_format.lower() == 'text': + writer.close() + if filename is not None and output_format.lower() == 'hdf5': + writer.close() + return compl, output + + def update_memoization_sizes(self): + # Resize all known memoization functions to have a cache at least + # the size of the number of detectors. + all_panels = list(self.detectors.values()) + PlanarDetector.update_memoization_sizes(all_panels) + CylindricalDetector.update_memoization_sizes(all_panels) + + def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: + """calculate the transmission from the + filter and polymer coating. the inverse of this + number is the intensity correction that needs + to be applied. actual computation is done inside + the detector class + """ + if rMat_s is None: + rMat_s = ct.identity_3x3 + + energy = self.beam_energy + transmissions = {} + for det_name, det in self.detectors.items(): + transmission_filter, transmission_phosphor = ( + det.calc_filter_coating_transmission(energy)) + + transmission = transmission_filter * transmission_phosphor + + if self.physics_package is not None: + transmission_physics_package = ( + det.calc_physics_package_transmission( + energy, rMat_s, self.physics_package)) + effective_pinhole_area = det.calc_effective_pinhole_area( + self.physics_package) + + transmission = ( + transmission * + transmission_physics_package * + effective_pinhole_area + ) + + transmissions[det_name] = transmission + return transmissions + +# ============================================================================= +# UTILITIES +# ============================================================================= + + +class PatchDataWriter(object): + """Class for dumping Bragg reflection data.""" + + def __init__(self, filename): + self._delim = ' ' + header_items = ( + '# ID', 'PID', + 'H', 'K', 'L', + 'sum(int)', 'max(int)', + 'pred tth', 'pred eta', 'pred ome', + 'meas tth', 'meas eta', 'meas ome', + 'pred X', 'pred Y', + 'meas X', 'meas Y' + ) + self._header = self._delim.join([ + self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]), + self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), + self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) + ]) + if isinstance(filename, IOBase): + self.fid = filename + else: + self.fid = open(filename, 'w') + print(self._header, file=self.fid) + + def __del__(self): + self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, peak_id, hkl_id, + hkl, spot_int, max_int, + pangs, mangs, pxy, mxy): + """ + !!! maybe need to check that last four inputs are arrays + """ + if mangs is None: + spot_int = np.nan + max_int = np.nan + mangs = np.nan*np.ones(3) + mxy = np.nan*np.ones(2) + + res = [int(peak_id), int(hkl_id)] \ + + np.array(hkl, dtype=int).tolist() \ + + [spot_int, max_int] \ + + pangs.tolist() \ + + mangs.tolist() \ + + pxy.tolist() \ + + mxy.tolist() + + output_str = self._delim.join( + [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + ) + print(output_str, file=self.fid) + return output_str + + +class GrainDataWriter(object): + """Class for dumping grain data.""" + + def __init__(self, filename=None, array=None): + """Writes to either file or np array + + Array must be initialized with number of rows to be written. + """ + if filename is None and array is None: + raise RuntimeError( + 'GrainDataWriter must be specified with filename or array') + + self.array = None + self.fid = None + + # array supersedes filename + if array is not None: + assert array.shape[1] == 21, \ + f'grain data table must have 21 columns not {array.shape[21]}' + self.array = array + self._array_row = 0 + return + + self._delim = ' ' + header_items = ( + '# grain ID', 'completeness', 'chi^2', + 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', + 't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]', + 'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]', + 'inv(V_s)[1,2]*sqrt(2)', + 'inv(V_s)[0,2]*sqrt(2)', + 'inv(V_s)[0,1]*sqrt(2)', + 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]', + 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]' + ) + self._header = self._delim.join( + [self._delim.join( + np.tile('{:<12}', 3) + ).format(*header_items[:3]), + self._delim.join( + np.tile('{:<23}', len(header_items) - 3) + ).format(*header_items[3:])] + ) + if isinstance(filename, IOBase): + self.fid = filename + else: + self.fid = open(filename, 'w') + print(self._header, file=self.fid) + + def __del__(self): + self.close() + + def close(self): + if self.fid is not None: + self.fid.close() + + def dump_grain(self, grain_id, completeness, chisq, + grain_params): + assert len(grain_params) == 12, \ + "len(grain_params) must be 12, not %d" % len(grain_params) + + # extract strain + emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) + evec = mutil.symmToVecMV(emat, scale=False) + + res = [int(grain_id), completeness, chisq] \ + + grain_params.tolist() \ + + evec.tolist() + + if self.array is not None: + row = self._array_row + assert row < self.array.shape[0], \ + f'invalid row {row} in array table' + self.array[row] = res + self._array_row += 1 + return res + + # (else) format and write to file + output_str = self._delim.join( + [self._delim.join( + ['{:<12d}', '{:<12f}', '{:<12e}'] + ).format(*res[:3]), + self._delim.join( + np.tile('{:<23.16e}', len(res) - 3) + ).format(*res[3:])] + ) + print(output_str, file=self.fid) + return output_str + + +class GrainDataWriter_h5(object): + """Class for dumping grain results to an HDF5 archive. + + TODO: add material spec + """ + + def __init__(self, filename, instr_cfg, grain_params, use_attr=False): + if isinstance(filename, h5py.File): + self.fid = filename + else: + self.fid = h5py.File(filename + ".hdf5", "w") + icfg = dict(instr_cfg) + + # add instrument groups and attributes + self.instr_grp = self.fid.create_group('instrument') + unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr) + + # add grain group + self.grain_grp = self.fid.create_group('grain') + rmat_c = make_rmat_of_expmap(grain_params[:3]) + tvec_c = np.array(grain_params[3:6]).flatten() + vinv_s = np.array(grain_params[6:]).flatten() + vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) + + if use_attr: # attribute version + self.grain_grp.attrs.create('rmat_c', rmat_c) + self.grain_grp.attrs.create('tvec_c', tvec_c) + self.grain_grp.attrs.create('inv(V)_s', vinv_s) + self.grain_grp.attrs.create('vmat_s', vmat_s) + else: # dataset version + self.grain_grp.create_dataset('rmat_c', data=rmat_c) + self.grain_grp.create_dataset('tvec_c', data=tvec_c) + self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) + self.grain_grp.create_dataset('vmat_s', data=vmat_s) + + data_key = 'reflection_data' + self.data_grp = self.fid.create_group(data_key) + + for det_key in self.instr_grp['detectors'].keys(): + self.data_grp.create_group(det_key) + + # FIXME: throws exception when called after close method + # def __del__(self): + # self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, panel_id, + i_refl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, ome_centers, + xy_centers, ijs, frame_indices, + spot_data, pangs, pxy, mangs, mxy, gzip=1): + """ + to be called inside loop over patches + + default GZIP level for data arrays is 1 + """ + fi = np.array(frame_indices, dtype=int) + + panel_grp = self.data_grp[panel_id] + spot_grp = panel_grp.create_group("spot_%05d" % i_refl) + spot_grp.attrs.create('peak_id', int(peak_id)) + spot_grp.attrs.create('hkl_id', int(hkl_id)) + spot_grp.attrs.create('hkl', np.array(hkl, dtype=int)) + spot_grp.attrs.create('predicted_angles', pangs) + spot_grp.attrs.create('predicted_xy', pxy) + if mangs is None: + mangs = np.nan*np.ones(3) + spot_grp.attrs.create('measured_angles', mangs) + if mxy is None: + mxy = np.nan*np.ones(3) + spot_grp.attrs.create('measured_xy', mxy) + + # get centers crds from edge arrays + # FIXME: export full coordinate arrays, or just center vectors??? + # + # ome_crd, eta_crd, tth_crd = np.meshgrid( + # ome_centers, + # centers_of_edge_vec(eta_edges), + # centers_of_edge_vec(tth_edges), + # indexing='ij') + # + # ome_dim, eta_dim, tth_dim = spot_data.shape + + # !!! for now just exporting center vectors for spot_data + tth_crd = centers_of_edge_vec(tth_edges) + eta_crd = centers_of_edge_vec(eta_edges) + + shuffle_data = True # reduces size by 20% + spot_grp.create_dataset('tth_crd', data=tth_crd, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('eta_crd', data=eta_crd, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('ome_crd', data=ome_centers, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('xy_centers', data=xy_centers, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('ij_centers', data=ijs, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('frame_indices', data=fi, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('intensities', data=spot_data, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + return + + +class GenerateEtaOmeMaps(object): + """ + eta-ome map class derived from new image_series and YAML config + + ...for now... + + must provide: + + self.dataStore + self.planeData + self.iHKLList + self.etaEdges # IN RADIANS + self.omeEdges # IN RADIANS + self.etas # IN RADIANS + self.omegas # IN RADIANS + + """ + + def __init__(self, image_series_dict, instrument, plane_data, + active_hkls=None, eta_step=0.25, threshold=None, + ome_period=(0, 360)): + """ + image_series must be OmegaImageSeries class + instrument_params must be a dict (loaded from yaml spec) + active_hkls must be a list (required for now) + + FIXME: get rid of omega period; should get it from imageseries + """ + + self._planeData = plane_data + + # ???: change name of iHKLList? + # ???: can we change the behavior of iHKLList? + if active_hkls is None: + self._iHKLList = plane_data.getHKLID( + plane_data.hkls, master=True + ) + n_rings = len(self._iHKLList) + else: + assert hasattr(active_hkls, '__len__'), \ + "active_hkls must be an iterable with __len__" + self._iHKLList = active_hkls + n_rings = len(active_hkls) + + # grab a det key and corresponding imageseries (first will do) + # !!! assuming that the imageseries for all panels + # have the same length and omegas + det_key, this_det_ims = next(iter(image_series_dict.items())) + + # handle omegas + # !!! for multi wedge, enforncing monotonicity + # !!! wedges also cannot overlap or span more than 360 + omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES + delta_ome = omegas_array[0][-1] - omegas_array[0][0] + frame_mask = None + ome_period = omegas_array[0, 0] + np.r_[0., 360.] # !!! be careful + if this_det_ims.omegawedges.nwedges > 1: + delta_omes = [(i['ostop'] - i['ostart'])/i['nsteps'] + for i in this_det_ims.omegawedges.wedges] + check_wedges = mutil.uniqueVectors(np.atleast_2d(delta_omes), + tol=1e-6).squeeze() + assert check_wedges.size == 1, \ + "all wedges must have the same delta omega to 1e-6" + # grab representative delta ome + # !!! assuming positive delta consistent with OmegaImageSeries + delta_ome = delta_omes[0] + + # grab full-range start/stop + # !!! be sure to map to the same period to enable arithmatic + # ??? safer to do this way rather than just pulling from + # the omegas attribute? + owedges = this_det_ims.omegawedges.wedges + ostart = owedges[0]['ostart'] # !!! DEGREES + ostop = float( + mapAngle(owedges[-1]['ostop'], ome_period, units='degrees') + ) + # compute total nsteps + # FIXME: need check for roundoff badness + nsteps = int((ostop - ostart)/delta_ome) + ome_edges_full = np.linspace( + ostart, ostop, num=nsteps+1, endpoint=True + ) + omegas_array = np.vstack( + [ome_edges_full[:-1], ome_edges_full[1:]] + ).T + ome_centers = np.average(omegas_array, axis=1) + + # use OmegaImageSeries method to determine which bins have data + # !!! this array has -1 outside a wedge + # !!! again assuming the valid frame order increases monotonically + frame_mask = np.array( + [this_det_ims.omega_to_frame(ome)[0] != -1 + for ome in ome_centers] + ) + + # ???: need to pass a threshold? + eta_mapping, etas = instrument.extract_polar_maps( + plane_data, image_series_dict, + active_hkls=active_hkls, threshold=threshold, + tth_tol=None, eta_tol=eta_step) + + # for convenience grab map shape from first + map_shape = next(iter(eta_mapping.values())).shape[1:] + + # pack all detectors with masking + # FIXME: add omega masking + data_store = [] + for i_ring in range(n_rings): + # first handle etas + full_map = np.zeros(map_shape, dtype=float) + nan_mask_full = np.zeros( + (len(eta_mapping), map_shape[0], map_shape[1]) + ) + i_p = 0 + for det_key, eta_map in eta_mapping.items(): + nan_mask = ~np.isnan(eta_map[i_ring]) + nan_mask_full[i_p] = nan_mask + full_map[nan_mask] += eta_map[i_ring][nan_mask] + i_p += 1 + re_nan_these = np.sum(nan_mask_full, axis=0) == 0 + full_map[re_nan_these] = np.nan + + # now omegas + if frame_mask is not None: + # !!! must expand row dimension to include + # skipped omegas + tmp = np.ones((len(frame_mask), map_shape[1]))*np.nan + tmp[frame_mask, :] = full_map + full_map = tmp + data_store.append(full_map) + self._dataStore = data_store + + # set required attributes + self._omegas = mapAngle( + np.radians(np.average(omegas_array, axis=1)), + np.radians(ome_period) + ) + self._omeEdges = mapAngle( + np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), + np.radians(ome_period) + ) + + # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the + # indexer to work properly + if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf: + # !!! SIGNED delta ome + del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0]) + self._omeEdges[-1] = self._omeEdges[-2] + del_ome + + # handle etas + # WARNING: unlinke the omegas in imageseries metadata, + # these are in RADIANS and represent bin centers + self._etaEdges = etas + self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + + @property + def dataStore(self): + return self._dataStore + + @property + def planeData(self): + return self._planeData + + @property + def iHKLList(self): + return np.atleast_1d(self._iHKLList).flatten() + + @property + def etaEdges(self): + return self._etaEdges + + @property + def omeEdges(self): + return self._omeEdges + + @property + def etas(self): + return self._etas + + @property + def omegas(self): + return self._omegas + + def save(self, filename): + xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename) + + +def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): + # mark pixels in the spec'd tth range + pixels_in_tthr = np.logical_and( + ptth >= tthr[0], ptth <= tthr[1] + ) + + # catch case where ring isn't on detector + if not np.any(pixels_in_tthr): + return None + + pixel_ids = np.where(pixels_in_tthr) + + # grab relevant eta coords using histogram + pixel_etas = peta[pixel_ids] + reta_hist = histogram(pixel_etas, eta_edges) + bins_on_detector = np.where(reta_hist)[0] + + return pixel_etas, eta_edges, pixel_ids, bins_on_detector + + +def run_fast_histogram(x, bins, weights=None): + return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), + weights=weights) + + +def run_numpy_histogram(x, bins, weights=None): + return histogram1d(x, bins=bins, weights=weights)[0] + + +histogram = run_fast_histogram if fast_histogram else run_numpy_histogram + + +def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): + for i_row in range(*rows): + image = ims[i_row] + + # handle threshold if specified + if threshold is not None: + # !!! NaNs get preserved + image = np.array(image) + image[image < threshold] = 0. + + for i_r, tthr in enumerate(tth_ranges): + this_map = ring_maps[i_r] + params = ring_params[i_r] + if not params: + # We are supposed to skip this ring... + continue + + # Unpack the params + pixel_etas, eta_edges, pixel_ids, bins_on_detector = params + result = histogram(pixel_etas, eta_edges, weights=image[pixel_ids]) + + # Note that this preserves nan values for bins not on the detector. + this_map[i_row, bins_on_detector] = result[bins_on_detector] + + +def _extract_detector_line_positions(iter_args, plane_data, tth_tol, + eta_tol, eta_centers, npdiv, + collapse_tth, collapse_eta, + do_interpolation, do_fitting, + fitting_kwargs, tth_distortion, + max_workers): + panel, instr_cfg, images, pbp = iter_args + + if images.ndim == 2: + images = np.tile(images, (1, 1, 1)) + elif images.ndim != 3: + raise RuntimeError("images must be 2- or 3-d") + + # make rings + # !!! adding tth_distortion pass-through; comes in as dict over panels + tth_distr_cls = None + if tth_distortion is not None: + tth_distr_cls = tth_distortion[panel.name] + + pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( + plane_data, merge_hkls=True, + delta_tth=tth_tol, delta_eta=eta_tol, + eta_list=eta_centers, tth_distortion=tth_distr_cls) + + tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) + + # !!! this is only needed if doing fitting + if isinstance(plane_data, PlaneData): + tth_idx, tth_ranges = plane_data.getMergedRanges(cullDupl=True) + tth_ref = plane_data.getTTh() + tth0 = [np.degrees(tth_ref[i]) for i in tth_idx] + else: + tth0 = plane_data + + # ================================================================= + # LOOP OVER RING SETS + # ================================================================= + pbar_rings = partial(tqdm, total=len(pow_angs), desc="Ringset", + position=pbp) + + kwargs = { + 'instr_cfg': instr_cfg, + 'panel': panel, + 'eta_tol': eta_tol, + 'npdiv': npdiv, + 'collapse_tth': collapse_tth, + 'collapse_eta': collapse_eta, + 'images': images, + 'do_interpolation': do_interpolation, + 'do_fitting': do_fitting, + 'fitting_kwargs': fitting_kwargs, + 'tth_distortion': tth_distr_cls, + } + func = partial(_extract_ring_line_positions, **kwargs) + iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) + with ProcessPoolExecutor(mp_context=constants.mp_context, + max_workers=max_workers) as executor: + return list(pbar_rings(executor.map(func, iter_arg))) + + +def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, + collapse_tth, collapse_eta, images, + do_interpolation, do_fitting, fitting_kwargs, + tth_distortion): + """ + Extracts data for a single Debye-Scherrer ring . + + Parameters + ---------- + iter_args : tuple + (angs [radians], + xys [mm], + tth_tol [deg], + this_tth0 [deg]) + instr_cfg : TYPE + DESCRIPTION. + panel : TYPE + DESCRIPTION. + eta_tol : TYPE + DESCRIPTION. + npdiv : TYPE + DESCRIPTION. + collapse_tth : TYPE + DESCRIPTION. + collapse_eta : TYPE + DESCRIPTION. + images : TYPE + DESCRIPTION. + do_interpolation : TYPE + DESCRIPTION. + do_fitting : TYPE + DESCRIPTION. + fitting_kwargs : TYPE + DESCRIPTION. + tth_distortion : TYPE + DESCRIPTION. + + Yields + ------ + patch_data : TYPE + DESCRIPTION. + + """ + # points are already checked to fall on detector + angs, xys, tth_tol, this_tth0 = iter_args + + # SS 01/31/25 noticed some nans in xys even after clipping + # going to do another round of masking to get rid of those + nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) + nan_mask = np.logical_or.reduce(nan_mask, 1) + if angs.ndim > 1 and xys.ndim > 1: + angs = angs[nan_mask,:] + xys = xys[nan_mask, :] + + n_images = len(images) + native_area = panel.pixel_area + + # make the tth,eta patches for interpolation + patches = xrdutil.make_reflection_patches( + instr_cfg, angs, panel.angularPixelSize(xys), + tth_tol=tth_tol, eta_tol=eta_tol, npdiv=npdiv, quiet=True) + + # loop over patches + # FIXME: fix initialization + if collapse_tth: + patch_data = np.zeros((len(angs), n_images)) + else: + patch_data = [] + for i_p, patch in enumerate(patches): + # strip relevant objects out of current patch + vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch + + # need to reshape eval pts for interpolation + xy_eval = np.vstack([ + xys_eval[0].flatten(), + xys_eval[1].flatten()]).T + + _, on_panel = panel.clip_to_panel(xy_eval) + + if np.any(~on_panel): + continue + + if collapse_tth: + ang_data = (vtx_angs[0][0, [0, -1]], + vtx_angs[1][[0, -1], 0]) + elif collapse_eta: + # !!! yield the tth bin centers + tth_centers = np.average( + np.vstack( + [vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]] + ), + axis=0 + ) + ang_data = (tth_centers, + angs[i_p][-1]) + if do_fitting: + fit_data = [] + else: + ang_data = vtx_angs + + prows, pcols = areas.shape + area_fac = areas/float(native_area) + + # interpolate + if not collapse_tth: + ims_data = [] + for j_p in np.arange(len(images)): + # catch interpolation type + image = images[j_p] + if do_interpolation: + p_img = panel.interpolate_bilinear( + xy_eval, + image, + ).reshape(prows, pcols)*area_fac + else: + p_img = image[ijs[0], ijs[1]]*area_fac + + # catch flat spectrum data, which will cause + # fitting to fail. + # ???: best here, or make fitting handle it? + mxval = np.max(p_img) + mnval = np.min(p_img) + if mxval == 0 or (1. - mnval/mxval) < 0.01: + continue + + # catch collapsing options + if collapse_tth: + patch_data[i_p, j_p] = np.average(p_img) + # ims_data.append(np.sum(p_img)) + else: + if collapse_eta: + lineout = np.average(p_img, axis=0) + ims_data.append(lineout) + if do_fitting: + if tth_distortion is not None: + # must correct tth0 + tmp = tth_distortion.apply( + panel.angles_to_cart( + np.vstack( + [np.radians(this_tth0), + np.tile(ang_data[-1], len(this_tth0))] + ).T + ), + return_nominal=True) + pk_centers = np.degrees(tmp[:, 0]) + else: + pk_centers = this_tth0 + kwargs = { + 'tth_centers': np.degrees(tth_centers), + 'lineout': lineout, + 'tth_pred': pk_centers, + **fitting_kwargs, + } + result = fit_ring(**kwargs) + fit_data.append(result) + else: + ims_data.append(p_img) + if not collapse_tth: + output = [ang_data, ims_data] + if do_fitting: + output.append(fit_data) + patch_data.append(output) + + return patch_data + + +DETECTOR_TYPES = { + 'planar': PlanarDetector, + 'cylindrical': CylindricalDetector, +} + + +class BufferShapeMismatchError(RuntimeError): + # This is raised when the buffer shape does not match the detector shape + pass + + +@contextmanager +def switch_xray_source(instr: HEDMInstrument, xray_source: Optional[str]): + if xray_source is None: + # If the x-ray source is None, leave it as the current active one + yield + return + + prev_beam_name = instr.active_beam_name + instr.active_beam_name = xray_source + try: + yield + finally: + instr.active_beam_name = prev_beam_name diff --git a/hexrd/xrdutil/phutil.py b/hexrd/hed/xrdutil/phutil.py similarity index 100% rename from hexrd/xrdutil/phutil.py rename to hexrd/hed/xrdutil/phutil.py diff --git a/hexrd/xrdutil/utils.py b/hexrd/hed/xrdutil/utils.py similarity index 100% rename from hexrd/xrdutil/utils.py rename to hexrd/hed/xrdutil/utils.py diff --git a/hexrd/cli/__init__.py b/hexrd/hedm/cli/__init__.py similarity index 100% rename from hexrd/cli/__init__.py rename to hexrd/hedm/cli/__init__.py diff --git a/hexrd/cli/documentation.py b/hexrd/hedm/cli/documentation.py similarity index 100% rename from hexrd/cli/documentation.py rename to hexrd/hedm/cli/documentation.py diff --git a/hexrd/cli/find_orientations.py b/hexrd/hedm/cli/find_orientations.py similarity index 100% rename from hexrd/cli/find_orientations.py rename to hexrd/hedm/cli/find_orientations.py diff --git a/hexrd/cli/fit_grains.py b/hexrd/hedm/cli/fit_grains.py similarity index 100% rename from hexrd/cli/fit_grains.py rename to hexrd/hedm/cli/fit_grains.py diff --git a/hexrd/cli/help.py b/hexrd/hedm/cli/help.py similarity index 100% rename from hexrd/cli/help.py rename to hexrd/hedm/cli/help.py diff --git a/hexrd/cli/main.py b/hexrd/hedm/cli/main.py similarity index 100% rename from hexrd/cli/main.py rename to hexrd/hedm/cli/main.py diff --git a/hexrd/cli/pickle23.py b/hexrd/hedm/cli/pickle23.py similarity index 100% rename from hexrd/cli/pickle23.py rename to hexrd/hedm/cli/pickle23.py diff --git a/hexrd/cli/preprocess.py b/hexrd/hedm/cli/preprocess.py similarity index 100% rename from hexrd/cli/preprocess.py rename to hexrd/hedm/cli/preprocess.py diff --git a/hexrd/cli/test.py b/hexrd/hedm/cli/test.py similarity index 100% rename from hexrd/cli/test.py rename to hexrd/hedm/cli/test.py diff --git a/hexrd/hedm/config/__init__.py b/hexrd/hedm/config/__init__.py new file mode 100644 index 000000000..075f33989 --- /dev/null +++ b/hexrd/hedm/config/__init__.py @@ -0,0 +1,50 @@ +import os + +import yaml + +from . import root +from . import utils + +""" +Note that we need to use the open() builtin in what was formerly the "open()" +function. So we define the _open(), and then redefine open() to the new +function. +""" +open_file = open + + +def open(file_name=None): + """ + Reads configuration settings from a yaml file. + + Returns a list of configuration objects, one for each document section in + the file. + """ + if file_name is None: + return [root.RootConfig({})] + + if not os.path.isfile(file_name): + raise ValueError(f'Config file not found: "{file_name}"') + + with open_file(file_name) as f: + res = [] + for cfg in yaml.load_all(f, Loader=yaml.SafeLoader): + try: + # take the previous config section and update with values + # from the current one + res.append(utils.merge_dicts(res[-1], cfg)) + except IndexError: + # this is the first config section + res.append(cfg) + + return [root.RootConfig(i) for i in res] + + +def save(config_list, file_name): + res = [cfg._cfg for cfg in config_list] + + with open_file(file_name, 'w') as f: + if len(res) > 1: + yaml.safe_dump_all(res, f) + else: + yaml.safe_dump(res, f) diff --git a/hexrd/hedm/config/dumper.py b/hexrd/hedm/config/dumper.py new file mode 100644 index 000000000..92d3596da --- /dev/null +++ b/hexrd/hedm/config/dumper.py @@ -0,0 +1,62 @@ +import yaml +import numpy as np +from pathlib import Path + + +def _dict_path_by_id(d, value, path=()): + if id(d) == value: + return path + elif isinstance(d, dict): + for k, v in d.items(): + p = _dict_path_by_id(v, value, path + (k, )) + if p is not None: + return p + elif isinstance(d, list): + for i, v in enumerate(d): + p = _dict_path_by_id(v, value, path + (str(i),)) + if p is not None: + return p + + return None + + +class NumPyIncludeDumper(yaml.Dumper): + """ + A yaml.Dumper implementation that will dump numpy.ndarray's. The arrays are + saved using numpy.save(...) in path generate from the values path in the + YAML document, relative to the location of the YAML document. For example + + "foo": + "bar": ndarray + + The ndarray would be saved in foo/bar.npy. + + """ + def __init__(self, stream, **kwargs): + super().__init__(stream, **kwargs) + + self._basedir = Path(stream.name).parent + self._dct = None + + def ndarray_representer(self, data): + path = _dict_path_by_id(self._dct, id(data)) + path = Path(*path) + if path is None: + raise ValueError("Unable to determine array path.") + + array_path = self._basedir / path.with_suffix('.npy') + array_path.parent.mkdir(parents=True, exist_ok=True) + + np.save(array_path, data) + relative_array_path = array_path.relative_to(self._basedir) + + return self.represent_scalar('!include', str(relative_array_path)) + + # We need intercept the dict so we can lookup the paths to ndarray's + def represent(self, data): + self._dct = data + return super().represent(data) + + +NumPyIncludeDumper.add_representer(np.ndarray, + NumPyIncludeDumper.ndarray_representer) diff --git a/hexrd/config/findorientations.py b/hexrd/hedm/config/findorientations.py similarity index 100% rename from hexrd/config/findorientations.py rename to hexrd/hedm/config/findorientations.py diff --git a/hexrd/config/fitgrains.py b/hexrd/hedm/config/fitgrains.py similarity index 100% rename from hexrd/config/fitgrains.py rename to hexrd/hedm/config/fitgrains.py diff --git a/hexrd/hedm/config/instrument.py b/hexrd/hedm/config/instrument.py new file mode 100644 index 000000000..8f0a87daa --- /dev/null +++ b/hexrd/hedm/config/instrument.py @@ -0,0 +1,63 @@ +import h5py +import yaml + +from .config import Config +from .loader import NumPyIncludeLoader + +from hexrd import instrument + + +class Instrument(Config): + """Handle HEDM instrument config.""" + + def __init__(self, cfg, instr_file=None): + super().__init__(cfg) + self._configuration = instr_file + self._max_workers = self._cfg.multiprocessing + + # Note: instrument is instantiated with a yaml dictionary; use self + # to instantiate classes based on this one + @property + def configuration(self): + """Return the YAML config filename.""" + return self._configuration + + @property + def hedm(self): + """Return the HEDMInstrument class.""" + if not hasattr(self, '_hedm'): + if self.configuration is None: + raise RuntimeError("No instrument file was given") + + try: + icfg = h5py.File(self.configuration, 'r') + except(OSError): + with open(self.configuration, 'r') as f: + icfg = yaml.load(f, Loader=NumPyIncludeLoader) + + kwargs = { + 'instrument_config': icfg, + 'max_workers': self._max_workers, + } + self._hedm = instrument.HEDMInstrument(**kwargs) + return self._hedm + + @hedm.setter + def hedm(self, icfg_fname): + """Set the HEDMInstrument class.""" + try: + icfg = h5py.File(icfg_fname, 'r') + except(OSError): + with open(icfg_fname, 'r') as f: + icfg = yaml.load(f, Loader=NumPyIncludeLoader) + + kwargs = { + 'instrument_config': icfg, + 'max_workers': self._max_workers, + } + self._hedm = instrument.HEDMInstrument(**kwargs) + + @property + def detector_dict(self): + """Return dictionary of detectors.""" + return self.hedm.detectors diff --git a/hexrd/hedm/config/loader.py b/hexrd/hedm/config/loader.py new file mode 100644 index 000000000..4d378d859 --- /dev/null +++ b/hexrd/hedm/config/loader.py @@ -0,0 +1,25 @@ +import yaml +from pathlib import Path +import numpy as np + + +class NumPyIncludeLoader(yaml.SafeLoader): + """ + A yaml.Loader implemenation that allows !include . This + allows the loading of npy files into the YAML document. + """ + + def __init__(self, stream): + self._basedir = Path(stream.name).parent + + super(NumPyIncludeLoader, self).__init__(stream) + + def include(self, node): + file_path = self._basedir / self.construct_scalar(node) + + a = np.load(file_path) + + return a + + +NumPyIncludeLoader.add_constructor('!include', NumPyIncludeLoader.include) diff --git a/hexrd/hedm/config/root.py b/hexrd/hedm/config/root.py new file mode 100644 index 000000000..0fca50ed7 --- /dev/null +++ b/hexrd/hedm/config/root.py @@ -0,0 +1,203 @@ +import os +from pathlib import Path +import logging +import multiprocessing as mp + +from hexrd.constants import shared_ims_key +from hexrd import imageseries + +from .config import Config +from .instrument import Instrument +from .findorientations import FindOrientationsConfig +from .fitgrains import FitGrainsConfig +from .material import MaterialConfig + +logger = logging.getLogger('hexrd.config') + + +class RootConfig(Config): + + @property + def working_dir(self): + """Working directory, either specified in file or current directory + + If the directory is not specified in the config file, then it will + default to the current working directory. If it is specified, the + directory must exist, or it will throw an IOError. + """ + wdir = Path(self.get('working_dir', default=Path.cwd())) + if not wdir.exists(): + raise IOError(f'"working_dir": {str(wdir)} does not exist') + return wdir + + @working_dir.setter + def working_dir(self, val): + val = Path(val) + if not val.is_dir(): + raise IOError('"working_dir": "%s" does not exist' % str(val)) + self.set('working_dir', val) + + @property + def analysis_name(self): + """Name of the analysis + + This will be used to set up the output directory. The name can + contain slash ("/") characters, which will generate a subdirectory + structure in the `analysis_dir`. + """ + return str(self.get('analysis_name', default='analysis')) + + @analysis_name.setter + def analysis_name(self, val): + self.set('analysis_name', val) + + @property + def analysis_dir(self): + """Analysis directory, where output files go + + The name is derived from `working_dir` and `analysis_name`. This + property returns a Path object. The directory and any intermediate + directories can be created with the `mkdir()` method, e.g. + + >>> analysis_dir.mkdir(parents=True, exist_ok=True) + """ + adir = Path(self.working_dir) / self.analysis_name + return adir + + @property + def analysis_id(self): + return '_'.join( + [self.analysis_name.strip().replace(' ', '-'), + self.material.active.strip().replace(' ', '-')] + ) + + @property + def new_file_placement(self): + """Use new file placements for find-orientations and fit-grains + + The new file placement rules put several files in the `analysis_dir` + instead of the `working_dir`. + """ + return self.get('new_file_placement', default=False) + + @property + def find_orientations(self): + return FindOrientationsConfig(self) + + @property + def fit_grains(self): + if not hasattr(self, "_fitgrain_config"): + self._fitgrain_config = FitGrainsConfig(self) + return self._fitgrain_config + + @property + def instrument(self): + if not hasattr(self, '_instr_config'): + instr_file = self.get('instrument', None) + if instr_file is not None: + instr_file = self.check_filename(instr_file, self.working_dir) + self._instr_config = Instrument(self, instr_file) + return self._instr_config + + @instrument.setter + def instrument(self, instr_config): + self._instr_config = instr_config + + @property + def material(self): + if not hasattr(self, '_material_config'): + self._material_config = MaterialConfig(self) + + if self.instrument.configuration is not None: + # !!! must make matl beam energy consistent with the instrument + beam_energy = self.instrument.hedm.beam_energy + self._material_config.beam_energy = beam_energy + + return self._material_config + + @material.setter + def material(self, material_config): + self._material_config = material_config + + @property + def multiprocessing(self): + # determine number of processes to run in parallel + multiproc = self.get('multiprocessing', default=-1) + ncpus = mp.cpu_count() + if multiproc == 'all': + res = ncpus + elif multiproc == 'half': + temp = ncpus // 2 + res = temp if temp else 1 + elif isinstance(multiproc, int): + if multiproc >= 0: + if multiproc > ncpus: + logger.warning( + 'Resuested %s processes, %d available', + multiproc, ncpus + ) + res = ncpus + else: + res = multiproc if multiproc else 1 + else: + temp = ncpus + multiproc + if temp < 1: + logger.warning( + 'Cannot use less than 1 process, requested %d of %d', + temp, ncpus + ) + res = 1 + else: + res = temp + else: + temp = ncpus - 1 + logger.warning( + "Invalid value %s for multiprocessing", + multiproc + ) + res = temp + return res + + @multiprocessing.setter + def multiprocessing(self, val): + isint = isinstance(val, int) + if val in ('half', 'all', -1): + self.set('multiprocessing', val) + elif (isint and val >= 0 and val <= mp.cpu_count()): + self.set('multiprocessing', int(val)) + else: + raise RuntimeError( + '"multiprocessing": must be 1:%d, got %s' + % (mp.cpu_count(), val) + ) + + @property + def image_series(self): + """Return the imageseries dictionary.""" + if not hasattr(self, '_image_dict'): + self._image_dict = dict() + fmt = self.get('image_series:format') + imsdata = self.get('image_series:data') + for ispec in imsdata: + fname = self.check_filename(ispec['file'], self.working_dir) + args = ispec['args'] + ims = imageseries.open(fname, fmt, **args) + oms = imageseries.omega.OmegaImageSeries(ims) + try: + panel = ispec['panel'] + if isinstance(panel, (tuple, list)): + panel = '_'.join(panel) + elif panel is None: + panel = shared_ims_key + except(KeyError): + try: + panel = oms.metadata['panel'] + except(KeyError): + panel = shared_ims_key + self._image_dict[panel] = oms + + return self._image_dict + + @image_series.setter + def image_series(self, ims_dict): + self._image_dict = ims_dict diff --git a/hexrd/hedm/config/utils.py b/hexrd/hedm/config/utils.py new file mode 100644 index 000000000..e31322f1b --- /dev/null +++ b/hexrd/hedm/config/utils.py @@ -0,0 +1,73 @@ +from collections import namedtuple +import copy +import warnings + + +ExclusionParameters = namedtuple( + 'ExclusionParameters', ["dmin", "dmax", "tthmin", "tthmax", + "sfacmin", "sfacmax", "pintmin", "pintmax"] +) + + +class Null(): + pass + + +null = Null() + + +def merge_dicts(a, b): + """Return a merged dict, updating values from `a` with values from `b`.""" + # need to pass a deep copy of a at the top level only: + return _merge_dicts(copy.deepcopy(a), b) + + +def _merge_dicts(a, b): + for k, v in b.items(): + if isinstance(v, dict): + if a.get(k) is None: + # happens in cases where all but section head is commented + a[k] = {} + _merge_dicts(a[k], v) + else: + if v is None and a.get(k) is not None: + # entire section commented out. Inherit, don't overwrite + pass + else: + a[k] = v + return a + + +def get_exclusion_parameters(cfg, prefix): + """Return flag use saved parameters and exclusion parameters""" + # + yaml_key = lambda s: ":".join((prefix, s)) + # + # Check for value from old spec for "sfacmin"; use that if it is given, + # but if the new spec is also there, it will override. Likewise for + # "tth_max", as used in fit_grains. + # -- Should add a deprecated warning if min_sfac_ratio is used + # + sfmin_dflt = cfg.get(yaml_key("min_sfac_ratio"), None) + if sfmin_dflt is not None: + warnings.warn( + '"min_sfac_ratio" is deprecated, use "sfacmin" instead', + DeprecationWarning + ) + # Default for reset_exclusions is True so that old config files will + # produce the same behavior. + reset_exclusions= cfg.get(yaml_key("reset_exclusions"), True) + + return( + reset_exclusions, + ExclusionParameters( + dmin = cfg.get(yaml_key("dmin"), None), + dmax = cfg.get(yaml_key("dmax"), None), + tthmin = cfg.get(yaml_key("tthmin"), None), + tthmax = cfg.get(yaml_key("tthmax"), None), + sfacmin = cfg.get(yaml_key("sfacmin"), sfmin_dflt), + sfacmax = cfg.get(yaml_key("sfacmax"), None), + pintmin = cfg.get(yaml_key("pintmin"), None), + pintmax = cfg.get(yaml_key("pintmax"), None), + ) + ) diff --git a/hexrd/findorientations.py b/hexrd/hedm/findorientations.py old mode 100755 new mode 100644 similarity index 100% rename from hexrd/findorientations.py rename to hexrd/hedm/findorientations.py diff --git a/hexrd/fitgrains.py b/hexrd/hedm/fitgrains.py similarity index 100% rename from hexrd/fitgrains.py rename to hexrd/hedm/fitgrains.py diff --git a/hexrd/fitting/grains.py b/hexrd/hedm/fitting/grains.py similarity index 100% rename from hexrd/fitting/grains.py rename to hexrd/hedm/fitting/grains.py diff --git a/hexrd/grainmap/__init__.py b/hexrd/hedm/grainmap/__init__.py similarity index 100% rename from hexrd/grainmap/__init__.py rename to hexrd/hedm/grainmap/__init__.py diff --git a/hexrd/grainmap/nfutil.py b/hexrd/hedm/grainmap/nfutil.py similarity index 100% rename from hexrd/grainmap/nfutil.py rename to hexrd/hedm/grainmap/nfutil.py diff --git a/hexrd/grainmap/tomoutil.py b/hexrd/hedm/grainmap/tomoutil.py similarity index 100% rename from hexrd/grainmap/tomoutil.py rename to hexrd/hedm/grainmap/tomoutil.py diff --git a/hexrd/grainmap/vtkutil.py b/hexrd/hedm/grainmap/vtkutil.py similarity index 100% rename from hexrd/grainmap/vtkutil.py rename to hexrd/hedm/grainmap/vtkutil.py diff --git a/hexrd/indexer.py b/hexrd/hedm/indexer.py similarity index 100% rename from hexrd/indexer.py rename to hexrd/hedm/indexer.py diff --git a/hexrd/hedm/instrument/detector.py b/hexrd/hedm/instrument/detector.py new file mode 100644 index 000000000..db4f95d1a --- /dev/null +++ b/hexrd/hedm/instrument/detector.py @@ -0,0 +1,2086 @@ +from abc import abstractmethod +import copy +import os +from typing import Optional + +from hexrd.instrument.constants import ( + COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +) +from hexrd.instrument.physics_package import AbstractPhysicsPackage +import numpy as np +import numba + +from hexrd import constants as ct +from hexrd import distortion as distortion_pkg +from hexrd import matrixutil as mutil +from hexrd import xrdutil +from hexrd.rotations import mapAngle + +from hexrd.material import crystallography +from hexrd.material.crystallography import PlaneData + +from hexrd.transforms.xfcapi import ( + xy_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_rmat_of_expmap, + oscill_angles_of_hkls, + angles_to_dvec, +) + +from hexrd.utils.decorators import memoize +from hexrd.gridutil import cellIndices +from hexrd.instrument import detector_coatings +from hexrd.material.utils import ( + calculate_linear_absorption_length, + calculate_incoherent_scattering) + +distortion_registry = distortion_pkg.Registry() + +max_workers_DFLT = max(1, os.cpu_count() - 1) + +beam_energy_DFLT = 65.351 + +# Memoize these, so each detector can avoid re-computing if nothing +# has changed. +_lorentz_factor = memoize(crystallography.lorentz_factor) +_polarization_factor = memoize(crystallography.polarization_factor) + + +class Detector: + """ + Base class for 2D detectors with functions and properties + common to planar and cylindrical detectors. This class + will be inherited by both those classes. + """ + + __pixelPitchUnit = 'mm' + + # Abstract methods that must be redefined in derived classes + @property + @abstractmethod + def detector_type(self): + raise NotImplementedError + + @abstractmethod + def cart_to_angles( + self, + xy_data, + rmat_s=None, + tvec_s=None, + tvec_c=None, + apply_distortion=False, + ): + """ + Transform cartesian coordinates to angular. + + Parameters + ---------- + xy_data : TYPE + The (n, 2) array of n (x, y) coordinates to be transformed in + either the raw or ideal cartesian plane (see `apply_distortion` + kwarg below). + rmat_s : array_like, optional + The (3, 3) COB matrix for the sample frame. The default is None. + tvec_s : array_like, optional + The (3, ) translation vector for the sample frame. + The default is None. + tvec_c : array_like, optional + The (3, ) translation vector for the crystal frame. + The default is None. + apply_distortion : bool, optional + If True, apply distortion to the inpout cartesian coordinates. + The default is False. + + Returns + ------- + tth_eta : TYPE + DESCRIPTION. + g_vec : TYPE + DESCRIPTION. + + """ + raise NotImplementedError + + @abstractmethod + def angles_to_cart( + self, + tth_eta, + rmat_s=None, + tvec_s=None, + rmat_c=None, + tvec_c=None, + apply_distortion=False, + ): + """ + Transform angular coordinates to cartesian. + + Parameters + ---------- + tth_eta : array_like + The (n, 2) array of n (tth, eta) coordinates to be transformed. + rmat_s : array_like, optional + The (3, 3) COB matrix for the sample frame. The default is None. + tvec_s : array_like, optional + The (3, ) translation vector for the sample frame. + The default is None. + rmat_c : array_like, optional + (3, 3) COB matrix for the crystal frame. + The default is None. + tvec_c : array_like, optional + The (3, ) translation vector for the crystal frame. + The default is None. + apply_distortion : bool, optional + If True, apply distortion to take cartesian coordinates to the + "warped" configuration. The default is False. + + Returns + ------- + xy_det : array_like + The (n, 2) array on the n input coordinates in the . + + """ + raise NotImplementedError + + @abstractmethod + def cart_to_dvecs(self, xy_data): + """Convert cartesian coordinates to dvectors""" + raise NotImplementedError + + @abstractmethod + def pixel_angles(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def pixel_tth_gradient(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def pixel_eta_gradient(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def calc_filter_coating_transmission(self, energy): + pass + + @property + @abstractmethod + def beam_position(self): + """ + returns the coordinates of the beam in the cartesian detector + frame {Xd, Yd, Zd}. NaNs if no intersection. + """ + raise NotImplementedError + + @property + def extra_config_kwargs(self): + return {} + + # End of abstract methods + + def __init__( + self, + rows=2048, + cols=2048, + pixel_size=(0.2, 0.2), + tvec=np.r_[0.0, 0.0, -1000.0], + tilt=ct.zeros_3, + name='default', + bvec=ct.beam_vec, + xrs_dist=None, + evec=ct.eta_vec, + saturation_level=None, + panel_buffer=None, + tth_distortion=None, + roi=None, + group=None, + distortion=None, + max_workers=max_workers_DFLT, + detector_filter: Optional[detector_coatings.Filter] = None, + detector_coating: Optional[detector_coatings.Coating] = None, + phosphor: Optional[detector_coatings.Phosphor] = None, + ): + """ + Instantiate a PlanarDetector object. + + Parameters + ---------- + rows : TYPE, optional + DESCRIPTION. The default is 2048. + cols : TYPE, optional + DESCRIPTION. The default is 2048. + pixel_size : TYPE, optional + DESCRIPTION. The default is (0.2, 0.2). + tvec : TYPE, optional + DESCRIPTION. The default is np.r_[0., 0., -1000.]. + tilt : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + name : TYPE, optional + DESCRIPTION. The default is 'default'. + bvec : TYPE, optional + DESCRIPTION. The default is ct.beam_vec. + evec : TYPE, optional + DESCRIPTION. The default is ct.eta_vec. + saturation_level : TYPE, optional + DESCRIPTION. The default is None. + panel_buffer : TYPE, optional + If a scalar or len(2) array_like, the interpretation is a border + in mm. If an array with shape (nrows, ncols), interpretation is a + boolean with True marking valid pixels. The default is None. + roi : TYPE, optional + DESCRIPTION. The default is None. + group : TYPE, optional + DESCRIPTION. The default is None. + distortion : TYPE, optional + DESCRIPTION. The default is None. + detector_filter : detector_coatings.Filter, optional + filter specifications including material type, + density and thickness. Used for absorption correction + calculations. + detector_coating : detector_coatings.Coating, optional + coating specifications including material type, + density and thickness. Used for absorption correction + calculations. + phosphor : detector_coatings.Phosphor, optional + phosphor specifications including material type, + density and thickness. Used for absorption correction + calculations. + + Returns + ------- + None. + + """ + self._name = name + + self._rows = rows + self._cols = cols + + self._pixel_size_row = pixel_size[0] + self._pixel_size_col = pixel_size[1] + + self._saturation_level = saturation_level + + self._panel_buffer = panel_buffer + + self._tth_distortion = tth_distortion + + if roi is None: + self._roi = roi + else: + assert len(roi) == 2, "roi is set via (start_row, start_col)" + self._roi = ( + (roi[0], roi[0] + self._rows), + (roi[1], roi[1] + self._cols), + ) + + self._tvec = np.array(tvec).flatten() + self._tilt = np.array(tilt).flatten() + + self._bvec = np.array(bvec).flatten() + self._xrs_dist = xrs_dist + + self._evec = np.array(evec).flatten() + + self._distortion = distortion + + self.max_workers = max_workers + + self.group = group + + if detector_filter is None: + detector_filter = detector_coatings.Filter( + **FILTER_DEFAULTS.TARDIS) + self.filter = detector_filter + + if detector_coating is None: + detector_coating = detector_coatings.Coating(**COATING_DEFAULT) + self.coating = detector_coating + + if phosphor is None: + phosphor = detector_coatings.Phosphor(**PHOSPHOR_DEFAULT) + self.phosphor = phosphor + + # detector ID + @property + def name(self): + return self._name + + @name.setter + def name(self, s): + assert isinstance(s, str), "requires string input" + self._name = s + + @property + def lmfit_name(self): + # lmfit requires underscores instead of dashes + return self.name.replace('-', '_') + + # properties for physical size of rectangular detector + @property + def rows(self): + return self._rows + + @rows.setter + def rows(self, x): + assert isinstance(x, int) + self._rows = x + + @property + def cols(self): + return self._cols + + @cols.setter + def cols(self, x): + assert isinstance(x, int) + self._cols = x + + @property + def pixel_size_row(self): + return self._pixel_size_row + + @pixel_size_row.setter + def pixel_size_row(self, x): + self._pixel_size_row = float(x) + + @property + def pixel_size_col(self): + return self._pixel_size_col + + @pixel_size_col.setter + def pixel_size_col(self, x): + self._pixel_size_col = float(x) + + @property + def pixel_area(self): + return self.pixel_size_row * self.pixel_size_col + + @property + def saturation_level(self): + return self._saturation_level + + @saturation_level.setter + def saturation_level(self, x): + if x is not None: + assert np.isreal(x) + self._saturation_level = x + + @property + def panel_buffer(self): + return self._panel_buffer + + @panel_buffer.setter + def panel_buffer(self, x): + """if not None, a buffer in mm (x, y)""" + if x is not None: + assert len(x) == 2 or x.ndim == 2 + self._panel_buffer = x + + @property + def tth_distortion(self): + return self._tth_distortion + + @tth_distortion.setter + def tth_distortion(self, x): + """if not None, a buffer in mm (x, y)""" + if x is not None: + assert x.ndim == 2 and x.shape == self.shape + self._tth_distortion = x + + @property + def roi(self): + return self._roi + + @roi.setter + def roi(self, vertex_array): + """ + !!! vertex array must be (r0, c0) + """ + if vertex_array is not None: + assert ( + len(vertex_array) == 2 + ), "roi is set via (start_row, start_col)" + self._roi = ( + (vertex_array[0], vertex_array[0] + self.rows), + (vertex_array[1], vertex_array[1] + self.cols), + ) + + @property + def row_dim(self): + return self.rows * self.pixel_size_row + + @property + def col_dim(self): + return self.cols * self.pixel_size_col + + @property + def row_pixel_vec(self): + return self.pixel_size_row * ( + 0.5 * (self.rows - 1) - np.arange(self.rows) + ) + + @property + def row_edge_vec(self): + return _row_edge_vec(self.rows, self.pixel_size_row) + + @property + def col_pixel_vec(self): + return self.pixel_size_col * ( + np.arange(self.cols) - 0.5 * (self.cols - 1) + ) + + @property + def col_edge_vec(self): + return _col_edge_vec(self.cols, self.pixel_size_col) + + @property + def corner_ul(self): + return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] + + @property + def corner_ll(self): + return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim] + + @property + def corner_lr(self): + return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim] + + @property + def corner_ur(self): + return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] + + @property + def shape(self): + return (self.rows, self.cols) + + @property + def tvec(self): + return self._tvec + + @tvec.setter + def tvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3, 'input must have length = 3' + self._tvec = x + + @property + def tilt(self): + return self._tilt + + @tilt.setter + def tilt(self, x): + assert len(x) == 3, 'input must have length = 3' + self._tilt = np.array(x).squeeze() + + @property + def bvec(self): + return self._bvec + + @bvec.setter + def bvec(self, x): + x = np.array(x).flatten() + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' + self._bvec = x + + @property + def xrs_dist(self): + return self._xrs_dist + + @xrs_dist.setter + def xrs_dist(self, x): + assert x is None or np.isscalar( + x + ), f"'source_distance' must be None or scalar; you input '{x}'" + self._xrs_dist = x + + @property + def evec(self): + return self._evec + + @evec.setter + def evec(self, x): + x = np.array(x).flatten() + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' + self._evec = x + + @property + def distortion(self): + return self._distortion + + @distortion.setter + def distortion(self, x): + if x is not None: + registry = distortion_registry.distortion_registry + check_arg = np.zeros(len(registry), dtype=bool) + for i, dcls in enumerate(registry.values()): + check_arg[i] = isinstance(x, dcls) + assert np.any(check_arg), 'input distortion is not in registry!' + self._distortion = x + + @property + def rmat(self): + return make_rmat_of_expmap(self.tilt) + + @property + def normal(self): + return self.rmat[:, 2] + + # ...memoize??? + @property + def pixel_coords(self): + pix_i, pix_j = np.meshgrid( + self.row_pixel_vec, self.col_pixel_vec, indexing='ij' + ) + return pix_i, pix_j + + # ========================================================================= + # METHODS + # ========================================================================= + + def pixel_Q(self, energy: np.floating, + origin: np.ndarray = ct.zeros_3) -> np.ndarray: + '''get the equivalent momentum transfer + for the angles. + + Parameters + ---------- + energy: float + incident photon energy in keV + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise Q in A^-1 + + ''' + lam = ct.keVToAngstrom(energy) + tth, _ = self.pixel_angles(origin=origin) + return 4.*np.pi*np.sin(tth*0.5)/lam + + def pixel_compton_energy_loss( + self, + energy: np.floating, + origin: np.ndarray = ct.zeros_3, + ) -> np.ndarray: + '''inelastic compton scattering leads + to energy loss of the incident photons. + compute the final energy of the photons + for each pixel. + + Parameters + ---------- + energy: float + incident photon energy in keV + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise energy of inelastically + scatterd photons in keV + ''' + energy = np.asarray(energy) + tth, _ = self.pixel_angles() + ang_fact = (1 - np.cos(tth)) + beta = energy/ct.cRestmasskeV + return energy/(1 + beta*ang_fact) + + def pixel_compton_attenuation_length( + self, + energy: np.floating, + density: np.floating, + formula: str, + origin: np.ndarray = ct.zeros_3, + ) -> np.ndarray: + '''each pixel intercepts inelastically + scattered photons of different energy. + the attenuation length and the transmission + for these photons are different. this function + calculate attenuatin length for each pixel + on the detector. + + Parameters + ---------- + energy: float + incident photon energy in keV + density: float + density of material in g/cc + formula: str + formula of the material scattering + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise attentuation length of compton + scattered photons + ''' + pixel_energy = self.pixel_compton_energy_loss(energy) + + pixel_attenuation_length = calculate_linear_absorption_length( + density, + formula, + pixel_energy.flatten(), + ) + return pixel_attenuation_length.reshape(self.shape) + + def compute_compton_scattering_intensity( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + origin: np.array = ct.zeros_3, + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + + ''' compute the theoretical compton scattering + signal on the detector. this value is corrected + for the transmission of compton scattered photons + and normlaized before getting subtracting from the + raw intensity + + Parameters + ----------- + energy: float + energy of incident photon + rMat_s: np.ndarray + rotation matrix of sample orientation + physics_package: AbstractPhysicsPackage + physics package information + Returns + ------- + compton_intensity: np.ndarray + transmission corrected compton scattering + intensity + ''' + + q = self.pixel_Q(energy) + inc_s = calculate_incoherent_scattering( + physics_package.sample_material, + q.flatten()).reshape(self.shape) + + inc_w = calculate_incoherent_scattering( + physics_package.window_material, + q.flatten()).reshape(self.shape) + + t_s = self.calc_compton_physics_package_transmission( + energy, rMat_s, physics_package) + + t_w = self.calc_compton_window_transmission( + energy, rMat_s, physics_package) + + return inc_s * t_s + inc_w * t_w, t_s, t_w + + def polarization_factor(self, f_hor, f_vert, unpolarized=False): + """ + Calculated the polarization factor for every pixel. + + Parameters + ---------- + f_hor : float + the fraction of horizontal polarization. for XFELs + this is close to 1. + f_vert : TYPE + the fraction of vertical polarization, which is ~0 for XFELs. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + s = f_hor + f_vert + if np.abs(s - 1) > ct.sqrt_epsf: + msg = ( + "sum of fraction of " + "horizontal and vertical polarizations " + "must be equal to 1." + ) + raise RuntimeError(msg) + + if f_hor < 0 or f_vert < 0: + msg = ( + "fraction of polarization in horizontal " + "or vertical directions can't be negative." + ) + raise RuntimeError(msg) + + tth, eta = self.pixel_angles() + kwargs = { + 'tth': tth, + 'eta': eta, + 'f_hor': f_hor, + 'f_vert': f_vert, + 'unpolarized': unpolarized, + } + + return _polarization_factor(**kwargs) + + def lorentz_factor(self): + """ + calculate the lorentz factor for every pixel + + Parameters + ---------- + None + + Raises + ------ + None + + Returns + ------- + numpy.ndarray + returns an array the same size as the detector panel + with each element containg the lorentz factor of the + corresponding pixel + """ + tth, eta = self.pixel_angles() + return _lorentz_factor(tth) + + def config_dict( + self, + chi=0, + tvec=ct.zeros_3, + beam_energy=beam_energy_DFLT, + beam_vector=ct.beam_vec, + sat_level=None, + panel_buffer=None, + style='yaml', + ): + """ + Return a dictionary of detector parameters. + + Optional instrument level parameters. This is a convenience function + to work with the APIs in several functions in xrdutil. + + Parameters + ---------- + chi : float, optional + DESCRIPTION. The default is 0. + tvec : array_like (3,), optional + DESCRIPTION. The default is ct.zeros_3. + beam_energy : float, optional + DESCRIPTION. The default is beam_energy_DFLT. + beam_vector : aray_like (3,), optional + DESCRIPTION. The default is ct.beam_vec. + sat_level : scalar, optional + DESCRIPTION. The default is None. + panel_buffer : scalar, array_like (2,), optional + DESCRIPTION. The default is None. + + Returns + ------- + config_dict : dict + DESCRIPTION. + + """ + assert style.lower() in ['yaml', 'hdf5'], ( + "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) + + config_dict = {} + + # ===================================================================== + # DETECTOR PARAMETERS + # ===================================================================== + # transform and pixels + # + # assign local vars; listify if necessary + tilt = self.tilt + translation = self.tvec + roi = ( + None + if self.roi is None + else np.array([self.roi[0][0], self.roi[1][0]]).flatten() + ) + if style.lower() == 'yaml': + tilt = tilt.tolist() + translation = translation.tolist() + tvec = tvec.tolist() + roi = None if roi is None else roi.tolist() + + det_dict = dict( + detector_type=self.detector_type, + transform=dict( + tilt=tilt, + translation=translation, + ), + pixels=dict( + rows=int(self.rows), + columns=int(self.cols), + size=[float(self.pixel_size_row), float(self.pixel_size_col)], + ), + ) + + if roi is not None: + # Only add roi if it is not None + det_dict['pixels']['roi'] = roi + + if self.group is not None: + # Only add group if it is not None + det_dict['group'] = self.group + + # distortion + if self.distortion is not None: + dparams = self.distortion.params + if style.lower() == 'yaml': + dparams = dparams.tolist() + dist_d = dict( + function_name=self.distortion.maptype, parameters=dparams + ) + det_dict['distortion'] = dist_d + + # saturation level + if sat_level is None: + sat_level = self.saturation_level + det_dict['saturation_level'] = float(sat_level) + + # panel buffer + if panel_buffer is None: + # could be none, a 2-element list, or a 2-d array (rows, cols) + panel_buffer = copy.deepcopy(self.panel_buffer) + # !!! now we have to do some style-dependent munging of panel_buffer + if isinstance(panel_buffer, np.ndarray): + if panel_buffer.ndim == 1: + assert len(panel_buffer) == 2, "length of 1-d buffer must be 2" + # if here is a 2-element array + if style.lower() == 'yaml': + panel_buffer = panel_buffer.tolist() + elif panel_buffer.ndim == 2: + if style.lower() == 'yaml': + # !!! can't practically write array-like buffers to YAML + # so forced to clobber + print("clobbering panel buffer array in yaml-ready output") + panel_buffer = [0.0, 0.0] + else: + raise RuntimeError( + "panel buffer ndim must be 1 or 2; you specified %d" + % panel_buffer.ndmin + ) + elif panel_buffer is None: + # still None on self + # !!! this gets handled by unwrap_dict_to_h5 now + + # if style.lower() == 'hdf5': + # # !!! can't write None to hdf5; substitute with zeros + # panel_buffer = np.r_[0., 0.] + pass + det_dict['buffer'] = panel_buffer + + det_dict.update(self.extra_config_kwargs) + + # ===================================================================== + # SAMPLE STAGE PARAMETERS + # ===================================================================== + stage_dict = dict(chi=chi, translation=tvec) + + # ===================================================================== + # BEAM PARAMETERS + # ===================================================================== + # !!! make_reflection_patches is still using the vector + # azim, pola = calc_angles_from_beam_vec(beam_vector) + # beam_dict = dict( + # energy=beam_energy, + # vector=dict( + # azimuth=azim, + # polar_angle=pola + # ) + # ) + beam_dict = dict(energy=beam_energy, vector=beam_vector) + + config_dict['detector'] = det_dict + config_dict['oscillation_stage'] = stage_dict + config_dict['beam'] = beam_dict + + return config_dict + + def cartToPixel(self, xy_det, pixels=False, apply_distortion=False): + """ + Coverts cartesian coordinates to pixel coordinates + + Parameters + ---------- + xy_det : array_like + The (n, 2) vstacked array of (x, y) pairs in the reference + cartesian frame (possibly subject to distortion). + pixels : bool, optional + If True, return discrete pixel indices; otherwise fractional pixel + coordinates are returned. The default is False. + apply_distortion : bool, optional + If True, apply self.distortion to the input (if applicable). + The default is False. + + Returns + ------- + ij_det : array_like + The (n, 2) array of vstacked (i, j) coordinates in the pixel + reference frame where i is the (slow) row dimension and j is the + (fast) column dimension. + + """ + xy_det = np.atleast_2d(xy_det) + if apply_distortion and self.distortion is not None: + xy_det = self.distortion.apply(xy_det) + + npts = len(xy_det) + + tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1)) + i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5 + j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5 + + ij_det = np.vstack([i_pix, j_pix]).T + if pixels: + # Hide any runtime warnings in this conversion. Their output values + # will certainly be off the detector, which is fine. + with np.errstate(invalid='ignore'): + ij_det = np.array(np.round(ij_det), dtype=int) + + return ij_det + + def pixelToCart(self, ij_det): + """ + Convert vstacked array or list of [i,j] pixel indices + (or UL corner-based points) and convert to (x,y) in the + cartesian frame {Xd, Yd, Zd} + """ + ij_det = np.atleast_2d(ij_det) + + x = (ij_det[:, 1] + 0.5) * self.pixel_size_col + self.corner_ll[0] + y = ( + self.rows - ij_det[:, 0] - 0.5 + ) * self.pixel_size_row + self.corner_ll[1] + return np.vstack([x, y]).T + + def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): + """ + Notes + ----- + !!! assumes xy are in raw (distorted) frame, if applicable + """ + # munge kwargs + if rMat_s is None: + rMat_s = ct.identity_3x3 + if tVec_s is None: + tVec_s = ct.zeros_3x1 + if tVec_c is None: + tVec_c = ct.zeros_3x1 + + # FIXME: perhaps not necessary, but safe... + xy = np.atleast_2d(xy) + + ''' + # --------------------------------------------------------------------- + # TODO: needs testing and memoized gradient arrays! + # --------------------------------------------------------------------- + # need origin arg + origin = np.dot(rMat_s, tVec_c).flatten() + tVec_s.flatten() + + # get pixel indices + i_crds = cellIndices(self.row_edge_vec, xy[:, 1]) + j_crds = cellIndices(self.col_edge_vec, xy[:, 0]) + + ptth_grad = self.pixel_tth_gradient(origin=origin)[i_crds, j_crds] + peta_grad = self.pixel_eta_gradient(origin=origin)[i_crds, j_crds] + + return np.vstack([ptth_grad, peta_grad]).T + ''' + # call xrdutil function + ang_ps = xrdutil.angularPixelSize( + xy, + (self.pixel_size_row, self.pixel_size_col), + self.rmat, + rMat_s, + self.tvec, + tVec_s, + tVec_c, + distortion=self.distortion, + beamVec=self.bvec, + etaVec=self.evec, + ) + return ang_ps + + def clip_to_panel(self, xy, buffer_edges=True): + """ + if self.roi is not None, uses it by default + + TODO: check if need shape kwarg + TODO: optimize ROI search better than list comprehension below + TODO: panel_buffer can be a 2-d boolean mask, but needs testing + + """ + xy = np.atleast_2d(xy) + + ''' + # !!! THIS LOGIC IS OBSOLETE + if self.roi is not None: + ij_crds = self.cartToPixel(xy, pixels=True) + ii, jj = polygon(self.roi[:, 0], self.roi[:, 1], + shape=(self.rows, self.cols)) + on_panel_rows = [i in ii for i in ij_crds[:, 0]] + on_panel_cols = [j in jj for j in ij_crds[:, 1]] + on_panel = np.logical_and(on_panel_rows, on_panel_cols) + else: + ''' + xlim = 0.5 * self.col_dim + ylim = 0.5 * self.row_dim + if buffer_edges and self.panel_buffer is not None: + if self.panel_buffer.ndim == 2: + pix = self.cartToPixel(xy, pixels=True) + + roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows) + coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols) + + idx = np.logical_or(roff, coff) + + on_panel = np.full(pix.shape[0], False) + valid_pix = pix[~idx, :] + on_panel[~idx] = self.panel_buffer[ + valid_pix[:, 0], valid_pix[:, 1] + ] + else: + xlim -= self.panel_buffer[0] + ylim -= self.panel_buffer[1] + on_panel_x = np.logical_and( + xy[:, 0] >= -xlim, xy[:, 0] <= xlim + ) + on_panel_y = np.logical_and( + xy[:, 1] >= -ylim, xy[:, 1] <= ylim + ) + on_panel = np.logical_and(on_panel_x, on_panel_y) + elif not buffer_edges or self.panel_buffer is None: + on_panel_x = np.logical_and(xy[:, 0] >= -xlim, xy[:, 0] <= xlim) + on_panel_y = np.logical_and(xy[:, 1] >= -ylim, xy[:, 1] <= ylim) + on_panel = np.logical_and(on_panel_x, on_panel_y) + return xy[on_panel, :], on_panel + + def interpolate_nearest(self, xy, img, pad_with_nans=True): + """ + TODO: revisit normalization in here? + + """ + is_2d = img.ndim == 2 + right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) + + # initialize output with nans + if pad_with_nans: + int_xy = np.nan * np.ones(len(xy)) + else: + int_xy = np.zeros(len(xy)) + + # clip away points too close to or off the edges of the detector + xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) + + # get pixel indices of clipped points + i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) + j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) + + # next interpolate across cols + int_vals = img[i_src, j_src] + int_xy[on_panel] = int_vals + return int_xy + + def interpolate_bilinear(self, xy, img, pad_with_nans=True, + clip_to_panel=True, + on_panel: Optional[np.ndarray] = None): + """ + Interpolate an image array at the specified cartesian points. + + Parameters + ---------- + xy : array_like, (n, 2) + Array of cartesian coordinates in the image plane at which + to evaluate intensity. + img : array_like + 2-dimensional image array. + pad_with_nans : bool, optional + Toggle for assigning NaN to points that fall off the detector. + The default is True. + on_panel : np.ndarray, optional + If you want to skip clip_to_panel() for performance reasons, + just provide an array of which pixels are on the panel. + + Returns + ------- + int_xy : array_like, (n,) + The array of interpolated intensities at each of the n input + coordinates. + + Notes + ----- + TODO: revisit normalization in here? + """ + + is_2d = img.ndim == 2 + right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) + + # initialize output with nans + if pad_with_nans: + int_xy = np.nan * np.ones(len(xy)) + else: + int_xy = np.zeros(len(xy)) + + if on_panel is None: + # clip away points too close to or off the edges of the detector + xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) + else: + xy_clip = xy[on_panel] + + # grab fractional pixel indices of clipped points + ij_frac = self.cartToPixel(xy_clip) + + # get floors/ceils from array of pixel _centers_ + # and fix indices running off the pixel centers + # !!! notice we already clipped points to the panel! + i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) + i_floor_img = _fix_indices(i_floor, 0, self.rows - 1) + + j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) + j_floor_img = _fix_indices(j_floor, 0, self.cols - 1) + + # ceilings from floors + i_ceil = i_floor + 1 + i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1) + + j_ceil = j_floor + 1 + j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) + + # first interpolate at top/bottom rows + row_floor_int = (j_ceil - ij_frac[:, 1]) * img[ + i_floor_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_floor_img, j_ceil_img] + row_ceil_int = (j_ceil - ij_frac[:, 1]) * img[ + i_ceil_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_ceil_img, j_ceil_img] + + # next interpolate across cols + int_vals = (i_ceil - ij_frac[:, 0]) * row_floor_int + ( + ij_frac[:, 0] - i_floor + ) * row_ceil_int + int_xy[on_panel] = int_vals + return int_xy + + def make_powder_rings( + self, + pd, + merge_hkls=False, + delta_tth=None, + delta_eta=10.0, + eta_period=None, + eta_list=None, + rmat_s=ct.identity_3x3, + tvec_s=ct.zeros_3, + tvec_c=ct.zeros_3, + full_output=False, + tth_distortion=None, + ): + """ + Generate points on Debye_Scherrer rings over the detector. + + !!! it is assuming that rmat_s is built from (chi, ome) as it the case + for HEDM! + + Parameters + ---------- + pd : TYPE + DESCRIPTION. + merge_hkls : TYPE, optional + DESCRIPTION. The default is False. + delta_tth : TYPE, optional + DESCRIPTION. The default is None. + delta_eta : TYPE, optional + DESCRIPTION. The default is 10.. + eta_period : TYPE, optional + DESCRIPTION. The default is None. + eta_list : TYPE, optional + DESCRIPTION. The default is None. + rmat_s : TYPE, optional + DESCRIPTION. The default is ct.identity_3x3. + tvec_s : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + tvec_c : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + full_output : TYPE, optional + DESCRIPTION. The default is False. + tth_distortion : special class, optional + Special distortion class. The default is None. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + if tth_distortion is not None: + tnorms = mutil.rowNorm(np.vstack([tvec_s, tvec_c])) + assert ( + np.all(tnorms) < ct.sqrt_epsf + ), "If using distrotion function, translations must be zero" + + # in case you want to give it tth angles directly + if isinstance(pd, PlaneData): + pd = PlaneData(None, pd) + if delta_tth is not None: + pd.tThWidth = np.radians(delta_tth) + else: + delta_tth = np.degrees(pd.tThWidth) + + # !!! conversions, meh... + del_eta = np.radians(delta_eta) + + # do merging if asked + if merge_hkls: + _, tth_ranges = pd.getMergedRanges(cullDupl=True) + tth = np.average(tth_ranges, axis=1) + else: + tth_ranges = pd.getTThRanges() + tth = pd.getTTh() + tth_pm = tth_ranges - np.tile(tth, (2, 1)).T + sector_vertices = np.vstack( + [ + [ + i[0], + -del_eta, + i[0], + del_eta, + i[1], + del_eta, + i[1], + -del_eta, + 0.0, + 0.0, + ] + for i in tth_pm + ] + ) + else: + # Okay, we have a array-like tth specification + tth = np.array(pd).flatten() + if delta_tth is None: + raise RuntimeError( + "If supplying a 2theta list as first arg, " + + "must supply a delta_tth" + ) + tth_pm = 0.5 * delta_tth * np.r_[-1.0, 1.0] + tth_ranges = np.radians([i + tth_pm for i in tth]) # !!! units + sector_vertices = np.tile( + 0.5 + * np.radians( + [ + -delta_tth, + -delta_eta, + -delta_tth, + delta_eta, + delta_tth, + delta_eta, + delta_tth, + -delta_eta, + 0.0, + 0.0, + ] + ), + (len(tth), 1), + ) + # !! conversions, meh... + tth = np.radians(tth) + del_eta = np.radians(delta_eta) + + # for generating rings, make eta vector in correct period + if eta_period is None: + eta_period = (-np.pi, np.pi) + + if eta_list is None: + neta = int(360.0 / float(delta_eta)) + # this is the vector of ETA EDGES + eta_edges = mapAngle( + np.radians(delta_eta * np.linspace(0.0, neta, num=neta + 1)) + + eta_period[0], + eta_period, + ) + + # get eta bin centers from edges + """ + # !!! this way is probably overkill, since we have delta eta + eta_centers = np.average( + np.vstack([eta[:-1], eta[1:]), + axis=0) + """ + # !!! should be safe as eta_edges are monotonic + eta_centers = eta_edges[:-1] + 0.5 * del_eta + else: + eta_centers = np.radians(eta_list).flatten() + neta = len(eta_centers) + eta_edges = ( + np.tile(eta_centers, (2, 1)) + + np.tile(0.5 * del_eta * np.r_[-1, 1], (neta, 1)).T + ).T.flatten() + + # get chi and ome from rmat_s + # !!! API ambiguity + # !!! this assumes rmat_s was made from the composition + # !!! rmat_s = R(Xl, chi) * R(Yl, ome) + ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) + + # make list of angle tuples + angs = [ + np.vstack([i * np.ones(neta), eta_centers, ome * np.ones(neta)]) + for i in tth + ] + + # need xy coords and pixel sizes + valid_ang = [] + valid_xy = [] + map_indices = [] + npp = 5 # [ll, ul, ur, lr, center] + for i_ring in range(len(angs)): + # expand angles to patch vertices + these_angs = angs[i_ring].T + + # push to vertices to see who falls off + # FIXME: clipping is not checking if masked regions are on the + # patch interior + patch_vertices = ( + np.tile(these_angs[:, :2], (1, npp)) + + np.tile(sector_vertices[i_ring], (neta, 1)) + ).reshape(npp * neta, 2) + + # find vertices that all fall on the panel + # !!! not API ambiguity regarding rmat_s above + all_xy = self.angles_to_cart( + patch_vertices, + rmat_s=rmat_s, + tvec_s=tvec_s, + rmat_c=None, + tvec_c=tvec_c, + apply_distortion=True, + ) + + _, on_panel = self.clip_to_panel(all_xy) + + # all vertices must be on... + + patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) + patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] + + # !!! Have to apply after clipping, distortion can get wonky near + # the edeg of the panel, and it is assumed to be <~1 deg + # !!! The tth_ranges are NOT correct! + if tth_distortion is not None: + patch_valid_angs = tth_distortion.apply( + self.angles_to_cart(these_angs[patch_is_on, :2]), + return_nominal=True, + ) + patch_valid_xys = self.angles_to_cart( + patch_valid_angs, apply_distortion=True + ) + else: + patch_valid_angs = these_angs[patch_is_on, :2] + patch_valid_xys = patch_xys[:, -1, :].squeeze() + + # form output arrays + valid_ang.append(patch_valid_angs) + valid_xy.append(patch_valid_xys) + map_indices.append(patch_is_on) + # ??? is this option necessary? + if full_output: + return valid_ang, valid_xy, tth_ranges, map_indices, eta_edges + else: + return valid_ang, valid_xy, tth_ranges + + def map_to_plane(self, pts, rmat, tvec): + """ + Map detctor points to specified plane. + + Parameters + ---------- + pts : TYPE + DESCRIPTION. + rmat : TYPE + DESCRIPTION. + tvec : TYPE + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + Notes + ----- + by convention: + + n * (u*pts_l - tvec) = 0 + + [pts]_l = rmat*[pts]_m + tvec + + """ + # arg munging + pts = np.atleast_2d(pts) + npts = len(pts) + + # map plane normal & translation vector, LAB FRAME + nvec_map_lab = rmat[:, 2].reshape(3, 1) + tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1) + tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1) + + # put pts as 3-d in panel CS and transform to 3-d lab coords + pts_det = np.hstack([pts, np.zeros((npts, 1))]) + pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab + + # scaling along pts vectors to hit map plane + u = np.dot(nvec_map_lab.T, tvec_map_lab) / np.dot( + nvec_map_lab.T, pts_lab + ) + + # pts on map plane, in LAB FRAME + pts_map_lab = np.tile(u, (3, 1)) * pts_lab + + return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T + + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + chi=0.0, + tVec_s=ct.zeros_3, + wavelength=None, + ): + """ + Simulate a monochromatic rotation series for a list of grains. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_param_list : TYPE + DESCRIPTION. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + chi : TYPE, optional + DESCRIPTION. The default is 0.. + tVec_s : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + wavelength : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + valid_ids : TYPE + DESCRIPTION. + valid_hkls : TYPE + DESCRIPTION. + valid_angs : TYPE + DESCRIPTION. + valid_xys : TYPE + DESCRIPTION. + ang_pixel_size : TYPE + DESCRIPTION. + + """ + # grab B-matrix from plane data + bMat = plane_data.latVecOps['B'] + + # reconcile wavelength + # * added sanity check on exclusions here; possible to + # * make some reflections invalid (NaN) + if wavelength is None: + wavelength = plane_data.wavelength + else: + if plane_data.wavelength != wavelength: + plane_data.wavelength = ct.keVToAngstrom(wavelength) + assert not np.any( + np.isnan(plane_data.getTTh()) + ), "plane data exclusions incompatible with wavelength" + + # vstacked G-vector id, h, k, l + full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) + + """ LOOP OVER GRAINS """ + valid_ids = [] + valid_hkls = [] + valid_angs = [] + valid_xys = [] + ang_pixel_size = [] + for gparm in grain_param_list: + + # make useful parameters + rMat_c = make_rmat_of_expmap(gparm[:3]) + tVec_c = gparm[3:6] + vInv_s = gparm[6:] + + # All possible bragg conditions as vstacked [tth, eta, ome] + # for each omega solution + angList = np.vstack( + oscill_angles_of_hkls( + full_hkls[:, 1:], + chi, + rMat_c, + bMat, + wavelength, + v_inv=vInv_s, + beam_vec=self.bvec, + ) + ) + + # filter by eta and omega ranges + # ??? get eta range from detector? + allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( + full_hkls, angList, eta_ranges, ome_ranges + ) + allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) + + # find points that fall on the panel + det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( + allAngs, + self.rmat, + rMat_c, + chi, + self.tvec, + tVec_c, + tVec_s, + self.distortion, + self.bvec, + ) + xys_p, on_panel = self.clip_to_panel(det_xy) + valid_xys.append(xys_p) + + # filter angs and hkls that are on the detector plane + # !!! check this -- seems unnecessary but the results of + # _project_on_detector_plane() can have len < the input? + # the output of _project_on_detector_plane has been modified to + # hand back the index array to remedy this JVB 2020-05-27 + if np.any(~on_plane): + allAngs = np.atleast_2d(allAngs[on_plane, :]) + allHKLs = np.atleast_2d(allHKLs[on_plane, :]) + + # grab hkls and gvec ids for this panel + valid_hkls.append(allHKLs[on_panel, 1:]) + valid_ids.append(allHKLs[on_panel, 0]) + + # reflection angles (voxel centers) and pixel size in (tth, eta) + valid_angs.append(allAngs[on_panel, :]) + ang_pixel_size.append(self.angularPixelSize(xys_p)) + return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size + + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + tvec_s=None, + grain_params=None, + beam_vec=None, + ): + """ """ + if isinstance(crystal_data, PlaneData): + + plane_data = crystal_data + + # grab the expanded list of hkls from plane_data + hkls = np.hstack(plane_data.getSymHKLs()) + + # and the unit plane normals (G-vectors) in CRYSTAL FRAME + gvec_c = np.dot(plane_data.latVecOps['B'], hkls) + + # Filter out g-vectors going in the wrong direction. `gvec_to_xy()` used + # to do this, but not anymore. + to_keep = np.dot(gvec_c.T, self.bvec) <= 0 + + hkls = hkls[:, to_keep] + gvec_c = gvec_c[:, to_keep] + elif len(crystal_data) == 2: + # !!! should clean this up + hkls = np.array(crystal_data[0]) + bmat = crystal_data[1] + gvec_c = np.dot(bmat, hkls) + else: + raise RuntimeError( + f'argument list not understood: {crystal_data=}' + ) + nhkls_tot = hkls.shape[1] + + # parse energy ranges + # TODO: allow for spectrum parsing + multipleEnergyRanges = False + if hasattr(maxEnergy, '__len__'): + assert len(maxEnergy) == len( + minEnergy + ), 'energy cutoff ranges must have the same length' + multipleEnergyRanges = True + lmin = [] + lmax = [] + for i in range(len(maxEnergy)): + lmin.append(ct.keVToAngstrom(maxEnergy[i])) + lmax.append(ct.keVToAngstrom(minEnergy[i])) + else: + lmin = ct.keVToAngstrom(maxEnergy) + lmax = ct.keVToAngstrom(minEnergy) + + # parse grain parameters kwarg + if grain_params is None: + grain_params = np.atleast_2d( + np.hstack([np.zeros(6), ct.identity_6x1]) + ) + n_grains = len(grain_params) + + # sample rotation + if rmat_s is None: + rmat_s = ct.identity_3x3 + + # dummy translation vector... make input + if tvec_s is None: + tvec_s = ct.zeros_3 + + # beam vector + if beam_vec is None: + beam_vec = ct.beam_vec + + # ========================================================================= + # LOOP OVER GRAINS + # ========================================================================= + + # pre-allocate output arrays + xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan * np.ones((n_grains, nhkls_tot)) + energy = np.nan * np.ones((n_grains, nhkls_tot)) + for iG, gp in enumerate(grain_params): + rmat_c = make_rmat_of_expmap(gp[:3]) + tvec_c = gp[3:6].reshape(3, 1) + vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) + + # stretch them: V^(-1) * R * Gc + gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) + ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) + + # project + dpts = gvec_to_xy( + ghat_c_str.T, + self.rmat, + rmat_s, + rmat_c, + self.tvec, + tvec_s, + tvec_c, + beam_vec=beam_vec, + ) + + # check intersections with detector plane + canIntersect = ~np.isnan(dpts[:, 0]) + npts_in = sum(canIntersect) + + if np.any(canIntersect): + dpts = dpts[canIntersect, :].reshape(npts_in, 2) + dhkl = hkls[:, canIntersect].reshape(3, npts_in) + + rmat_b = make_beam_rmat(beam_vec, ct.eta_vec) + # back to angles + tth_eta, gvec_l = xy_to_gvec( + dpts, + self.rmat, + rmat_s, + self.tvec, + tvec_s, + tvec_c, + rmat_b=rmat_b, + ) + tth_eta = np.vstack(tth_eta).T + + # warp measured points + if self.distortion is not None: + dpts = self.distortion.apply_inverse(dpts) + + # plane spacings and energies + dsp = 1.0 / mutil.rowNorm(gvec_s_str[:, canIntersect].T) + wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) + + # clip to detector panel + _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) + + if multipleEnergyRanges: + validEnergy = np.zeros(len(wlen), dtype=bool) + for i in range(len(lmin)): + in_energy_range = np.logical_and( + wlen >= lmin[i], wlen <= lmax[i] + ) + validEnergy = validEnergy | in_energy_range + else: + validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) + + # index for valid reflections + keepers = np.where(np.logical_and(on_panel, validEnergy))[0] + + # assign output arrays + xy_det[iG][keepers, :] = dpts[keepers, :] + hkls_in[iG][:, keepers] = dhkl[:, keepers] + angles[iG][keepers, :] = tth_eta[keepers, :] + dspacing[iG, keepers] = dsp[keepers] + energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) + return xy_det, hkls_in, angles, dspacing, energy + + @staticmethod + def update_memoization_sizes(all_panels): + funcs = [ + _polarization_factor, + _lorentz_factor, + ] + + min_size = len(all_panels) + return Detector.increase_memoization_sizes(funcs, min_size) + + @staticmethod + def increase_memoization_sizes(funcs, min_size): + for f in funcs: + cache_info = f.cache_info() + if cache_info['maxsize'] < min_size: + f.set_cache_maxsize(min_size) + + def calc_physics_package_transmission(self, energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage) -> np.float64: + """get the transmission from the physics package + need to consider HED and HEDM samples separately + """ + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_sample = self.calc_transmission_sample( + seca, secb, energy, physics_package) + T_window = self.calc_transmission_window( + secb, energy, physics_package) + + transmission_physics_package = T_sample * T_window + return transmission_physics_package + + def calc_compton_physics_package_transmission( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + '''calculate the attenuation of inelastically + scattered photons. since these photons lose energy, + the attenuation length is angle dependent ergo a separate + routine than elastically scattered absorption. + ''' + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_sample = self.calc_compton_transmission( + seca, secb, energy, + physics_package, 'sample') + T_window = self.calc_compton_transmission_window( + secb, energy, physics_package) + + return T_sample * T_window + + def calc_compton_window_transmission( + self, + energy: np.floating, + rMat_s: np.ndarray, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + '''calculate the attenuation of inelastically + scattered photons just fropm the window. + since these photons lose energy, the attenuation length + is angle dependent ergo a separate routine than + elastically scattered absorption. + ''' + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_window = self.calc_compton_transmission( + seca, secb, energy, + physics_package, 'window') + T_sample = self.calc_compton_transmission_sample( + seca, energy, physics_package) + + return T_sample * T_window + + def calc_transmission_sample(self, seca: np.array, + secb: np.array, energy: np.floating, + physics_package: AbstractPhysicsPackage) -> np.array: + thickness_s = physics_package.sample_thickness # in microns + if np.isclose(thickness_s, 0): + return np.ones(self.shape) + + # in microns^-1 + mu_s = 1./physics_package.sample_absorption_length(energy) + x = (mu_s*thickness_s) + pre = 1./x/(secb - seca) + num = np.exp(-x*seca) - np.exp(-x*secb) + return pre * num + + def calc_transmission_window(self, secb: np.array, energy: np.floating, + physics_package: AbstractPhysicsPackage) -> np.array: + material_w = physics_package.window_material + thickness_w = physics_package.window_thickness # in microns + if material_w is None or np.isclose(thickness_w, 0): + return np.ones(self.shape) + + # in microns^-1 + mu_w = 1./physics_package.window_absorption_length(energy) + return np.exp(-thickness_w*mu_w*secb) + + def calc_compton_transmission( + self, + seca: np.ndarray, + secb: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + pp_layer: str, + ) -> np.ndarray: + + if pp_layer == 'sample': + formula = physics_package.sample_material + density = physics_package.sample_density + thickness = physics_package.sample_thickness + mu = 1./physics_package.sample_absorption_length(energy) + mu_prime = 1. / self.pixel_compton_attenuation_length( + energy, density, formula, + ) + elif pp_layer == 'window': + formula = physics_package.window_material + if formula is None: + return np.ones(self.shape) + + density = physics_package.window_density + thickness = physics_package.window_thickness + mu = 1./physics_package.sample_absorption_length(energy) + mu_prime = 1./self.pixel_compton_attenuation_length( + energy, density, formula) + + if thickness <= 0: + return np.ones(self.shape) + + x1 = mu*thickness*seca + x2 = mu_prime*thickness*secb + num = (np.exp(-x1) - np.exp(-x2)) + return -num/(x1 - x2) + + def calc_compton_transmission_sample( + self, + seca: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + thickness_s = physics_package.sample_thickness # in microns + + mu_s = 1./physics_package.sample_absorption_length( + energy) + return np.exp(-mu_s*thickness_s*seca) + + def calc_compton_transmission_window( + self, + secb: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + formula = physics_package.window_material + if formula is None: + return np.ones(self.shape) + + density = physics_package.window_density # in g/cc + thickness_w = physics_package.window_thickness # in microns + + mu_w_prime = 1./self.pixel_compton_attenuation_length( + energy, density, formula) + return np.exp(-mu_w_prime*thickness_w*secb) + + def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) -> np.array: + """get the effective pinhole area correction + """ + if (np.isclose(physics_package.pinhole_diameter, 0) + or np.isclose(physics_package.pinhole_thickness, 0)): + return np.ones(self.shape) + + hod = (physics_package.pinhole_thickness / + physics_package.pinhole_diameter) + bvec = self.bvec + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cth = -dvecs[:, 2].reshape(self.shape) + tanth = np.tan(np.arccos(cth)) + f = hod*tanth + f[np.abs(f) > 1.] = np.nan + asinf = np.arcsin(f) + return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) + + def calc_transmission_generic(self, + secb: np.array, + thickness: np.floating, + absorption_length: np.floating) -> np.array: + if np.isclose(thickness, 0): + return np.ones(self.shape) + + mu = 1./absorption_length # in microns^-1 + return np.exp(-thickness*mu*secb) + + def calc_transmission_phosphor(self, + secb: np.array, + thickness: np.floating, + readout_length: np.floating, + absorption_length: np.floating, + energy: np.floating, + pre_U0: np.floating) -> np.array: + if np.isclose(thickness, 0): + return np.ones(self.shape) + + f1 = absorption_length*thickness + f2 = absorption_length*readout_length + arg = (secb + 1/f2) + return pre_U0 * energy*((1.0 - np.exp(-f1*arg))/arg) + +# ============================================================================= +# UTILITY METHODS +# ============================================================================= + + +def _fix_indices(idx, lo, hi): + nidx = np.array(idx) + off_lo = nidx < lo + off_hi = nidx > hi + nidx[off_lo] = lo + nidx[off_hi] = hi + return nidx + + +def _row_edge_vec(rows, pixel_size_row): + return pixel_size_row * (0.5 * rows - np.arange(rows + 1)) + + +def _col_edge_vec(cols, pixel_size_col): + return pixel_size_col * (np.arange(cols + 1) - 0.5 * cols) + + +# FIXME find a better place for this, and maybe include loop over pixels +@numba.njit(nogil=True, cache=True) +def _solid_angle_of_triangle(vtx_list): + norms = np.sqrt(np.sum(vtx_list * vtx_list, axis=1)) + norms_prod = norms[0] * norms[1] * norms[2] + scalar_triple_product = np.dot( + vtx_list[0], np.cross(vtx_list[2], vtx_list[1]) + ) + denominator = ( + norms_prod + + norms[0] * np.dot(vtx_list[1], vtx_list[2]) + + norms[1] * np.dot(vtx_list[2], vtx_list[0]) + + norms[2] * np.dot(vtx_list[0], vtx_list[1]) + ) + + return 2.0 * np.arctan2(scalar_triple_product, denominator) diff --git a/hexrd/hedm/instrument/hedm_instrument.py b/hexrd/hedm/instrument/hedm_instrument.py new file mode 100644 index 000000000..1d768b47c --- /dev/null +++ b/hexrd/hedm/instrument/hedm_instrument.py @@ -0,0 +1,2747 @@ +# -*- coding: utf-8 -*- +# ============================================================================= +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================================= +""" +Created on Fri Dec 9 13:05:27 2016 + +@author: bernier2 +""" +from contextlib import contextmanager +import copy +import logging +import os +from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor +from functools import partial +from typing import Optional + +from tqdm import tqdm + +import yaml + +import h5py + +import numpy as np + +from io import IOBase + +from scipy import ndimage +from scipy.linalg import logm +from skimage.measure import regionprops + +from hexrd import constants +from hexrd.imageseries import ImageSeries +from hexrd.imageseries.process import ProcessedImageSeries +from hexrd.imageseries.omega import OmegaImageSeries +from hexrd.fitting.utils import fit_ring +from hexrd.gridutil import make_tolerance_grid +from hexrd import matrixutil as mutil +from hexrd.transforms.xfcapi import ( + angles_to_gvec, + gvec_to_xy, + make_sample_rmat, + make_rmat_of_expmap, + unit_vector, +) +from hexrd import xrdutil +from hexrd.material.crystallography import PlaneData +from hexrd import constants as ct +from hexrd.rotations import mapAngle +from hexrd import distortion as distortion_pkg +from hexrd.utils.concurrent import distribute_tasks +from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict +from hexrd.utils.yaml import NumpyToNativeDumper +from hexrd.valunits import valWUnit +from hexrd.wppf import LeBail + +from .cylindrical_detector import CylindricalDetector +from .detector import ( + beam_energy_DFLT, + Detector, + max_workers_DFLT, +) +from .planar_detector import PlanarDetector + +from skimage.draw import polygon +from skimage.util import random_noise +from hexrd.wppf import wppfsupport + +try: + from fast_histogram import histogram1d + fast_histogram = True +except ImportError: + from numpy import histogram as histogram1d + fast_histogram = False + +logger = logging.getLogger() +logger.setLevel('INFO') + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +instrument_name_DFLT = 'instrument' + +beam_vec_DFLT = ct.beam_vec +source_distance_DFLT = np.inf + +eta_vec_DFLT = ct.eta_vec + +panel_id_DFLT = 'generic' +nrows_DFLT = 2048 +ncols_DFLT = 2048 +pixel_size_DFLT = (0.2, 0.2) + +tilt_params_DFLT = np.zeros(3) +t_vec_d_DFLT = np.r_[0., 0., -1000.] + +chi_DFLT = 0. +t_vec_s_DFLT = np.zeros(3) + +multi_ims_key = ct.shared_ims_key +ims_classes = (ImageSeries, ProcessedImageSeries, OmegaImageSeries) + +buffer_key = 'buffer' +distortion_key = 'distortion' + +# ============================================================================= +# UTILITY METHODS +# ============================================================================= + + +def generate_chunks(nrows, ncols, base_nrows, base_ncols, + row_gap=0, col_gap=0): + """ + Generate chunking data for regularly tiled composite detectors. + + Parameters + ---------- + nrows : int + DESCRIPTION. + ncols : int + DESCRIPTION. + base_nrows : int + DESCRIPTION. + base_ncols : int + DESCRIPTION. + row_gap : int, optional + DESCRIPTION. The default is 0. + col_gap : int, optional + DESCRIPTION. The default is 0. + + Returns + ------- + rects : array_like + The (nrows*ncols, ) list of ROI specs (see Notes). + labels : array_like + The (nrows*ncols, ) list of ROI (i, j) matrix indexing labels 'i_j'. + + Notes + ----- + ProcessedImageSeries needs a (2, 2) array for the 'rect' kwarg: + [[row_start, row_stop], + [col_start, col_stop]] + """ + row_starts = np.array([i*(base_nrows + row_gap) for i in range(nrows)]) + col_starts = np.array([i*(base_ncols + col_gap) for i in range(ncols)]) + rr = np.vstack([row_starts, row_starts + base_nrows]) + cc = np.vstack([col_starts, col_starts + base_ncols]) + rects = [] + labels = [] + for i in range(nrows): + for j in range(ncols): + this_rect = np.array( + [[rr[0, i], rr[1, i]], + [cc[0, j], cc[1, j]]] + ) + rects.append(this_rect) + labels.append('%d_%d' % (i, j)) + return rects, labels + + +def chunk_instrument(instr, rects, labels, use_roi=False): + """ + Generate chunked config fro regularly tiled composite detectors. + + Parameters + ---------- + instr : TYPE + DESCRIPTION. + rects : TYPE + DESCRIPTION. + labels : TYPE + DESCRIPTION. + + Returns + ------- + new_icfg_dict : TYPE + DESCRIPTION. + + """ + icfg_dict = instr.write_config() + new_icfg_dict = dict(beam=icfg_dict['beam'], + oscillation_stage=icfg_dict['oscillation_stage'], + detectors={}) + for panel_id, panel in instr.detectors.items(): + pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] + + for pnum, pdata in enumerate(zip(rects, labels)): + rect, label = pdata + panel_name = f'{panel_id}_{label}' + + row_col_dim = np.diff(rect) # (2, 1) + shape = tuple(row_col_dim.flatten()) + center = (rect[:, 0].reshape(2, 1) + 0.5*row_col_dim) + + sp_tvec = np.concatenate( + [panel.pixelToCart(center.T).flatten(), np.zeros(1)] + ) + + tvec = np.dot(panel.rmat, sp_tvec) + panel.tvec + + # new config dict + tmp_cfg = copy.deepcopy(pcfg_dict) + + # fix sizes + tmp_cfg['pixels']['rows'] = shape[0] + tmp_cfg['pixels']['columns'] = shape[1] + if use_roi: + tmp_cfg['pixels']['roi'] = (rect[0][0], rect[1][0]) + + # update tvec + tmp_cfg['transform']['translation'] = tvec.tolist() + + new_icfg_dict['detectors'][panel_name] = copy.deepcopy(tmp_cfg) + + if panel.panel_buffer is not None: + if panel.panel_buffer.ndim == 2: # have a mask array! + submask = panel.panel_buffer[ + rect[0, 0]:rect[0, 1], rect[1, 0]:rect[1, 1] + ] + new_icfg_dict['detectors'][panel_name]['buffer'] = submask + return new_icfg_dict + + +def _parse_imgser_dict(imgser_dict, det_key, roi=None): + """ + Associates a dict of imageseries to the target panel(s). + + Parameters + ---------- + imgser_dict : dict + The input dict of imageseries. Either `det_key` is in imgser_dict, or + the shared key is. Entries can be an ImageSeries object or a 2- or 3-d + ndarray of images. + det_key : str + The target detector key. + roi : tuple or None, optional + The roi of the target images. Format is + ((row_start, row_stop), (col_start, col_stop)) + The stops are used in the normal sense of a slice. The default is None. + + Raises + ------ + RuntimeError + If niether `det_key` nor the shared key is in the input imgser_dict; + Also, if the shared key is specified but the roi is None. + + Returns + ------- + ims : hexrd.imageseries + The desired imageseries object. + + """ + # grab imageseries for this detector + try: + ims = imgser_dict[det_key] + except KeyError: + matched_det_keys = [det_key in k for k in imgser_dict] + if multi_ims_key in imgser_dict: + images_in = imgser_dict[multi_ims_key] + elif np.any(matched_det_keys): + if sum(matched_det_keys) != 1: + raise RuntimeError( + f"multiple entries found for '{det_key}'" + ) + # use boolean array to index the proper key + # !!! these should be in the same order + img_keys = img_keys = np.asarray(list(imgser_dict.keys())) + matched_det_key = img_keys[matched_det_keys][0] # !!! only one + images_in = imgser_dict[matched_det_key] + else: + raise RuntimeError( + f"neither '{det_key}' nor '{multi_ims_key}' found" + + 'in imageseries input' + ) + + # have images now + if roi is None: + raise RuntimeError( + "roi must be specified to use shared imageseries" + ) + + if isinstance(images_in, ims_classes): + # input is an imageseries of some kind + ims = ProcessedImageSeries(images_in, [('rectangle', roi), ]) + if isinstance(images_in, OmegaImageSeries): + # if it was an OmegaImageSeries, must re-cast + ims = OmegaImageSeries(ims) + elif isinstance(images_in, np.ndarray): + # 2- or 3-d array of images + ndim = images_in.ndim + if ndim == 2: + ims = images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + elif ndim == 3: + nrows = roi[0][1] - roi[0][0] + ncols = roi[1][1] - roi[1][0] + n_images = len(images_in) + ims = np.empty((n_images, nrows, ncols), + dtype=images_in.dtype) + for i, image in images_in: + ims[i, :, :] = \ + images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + else: + raise RuntimeError( + f"image input dim must be 2 or 3; you gave {ndim}" + ) + return ims + + +def calc_beam_vec(azim, pola): + """ + Calculate unit beam propagation vector from + spherical coordinate spec in DEGREES. + + ...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL! + """ + tht = np.radians(azim) + phi = np.radians(pola) + bv = np.r_[ + np.sin(phi)*np.cos(tht), + np.cos(phi), + np.sin(phi)*np.sin(tht)] + return -bv + + +def calc_angles_from_beam_vec(bvec): + """ + Return the azimuth and polar angle from a beam + vector + """ + bvec = np.atleast_1d(bvec).flatten() + nvec = unit_vector(-bvec) + azim = float( + np.degrees(np.arctan2(nvec[2], nvec[0])) + ) + pola = float(np.degrees(np.arccos(nvec[1]))) + return azim, pola + + +def migrate_instrument_config(instrument_config): + """utility function to generate old instrument config dictionary""" + cfg_list = [] + for detector_id in instrument_config['detectors']: + cfg_list.append( + dict( + detector=instrument_config['detectors'][detector_id], + oscillation_stage=instrument_config['oscillation_stage'], + ) + ) + return cfg_list + + +def angle_in_range(angle, ranges, ccw=True, units='degrees'): + """ + Return the index of the first wedge the angle is found in + + WARNING: always clockwise; assumes wedges are not overlapping + """ + tau = 360. + if units.lower() == 'radians': + tau = 2*np.pi + w = np.nan + for i, wedge in enumerate(ranges): + amin = wedge[0] + amax = wedge[1] + check = amin + np.mod(angle - amin, tau) + if check < amax: + w = i + break + return w + + +# ???: move to gridutil? +def centers_of_edge_vec(edges): + assert np.asarray(edges).ndim == 1, "edges must be 1-d" + return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0) + + +def max_tth(instr): + """ + Return the maximum Bragg angle (in radians) subtended by the instrument. + + Parameters + ---------- + instr : hexrd.instrument.HEDMInstrument instance + the instrument class to evalutate. + + Returns + ------- + tth_max : float + The maximum observable Bragg angle by the instrument in radians. + """ + tth_max = 0. + for det in instr.detectors.values(): + ptth, peta = det.pixel_angles() + tth_max = max(np.max(ptth), tth_max) + return tth_max + + +def pixel_resolution(instr): + """ + Return the minimum, median, and maximum angular + resolution of the instrument. + + Parameters + ---------- + instr : HEDMInstrument instance + An instrument. + + Returns + ------- + tth_stats : float + min/median/max tth resolution in radians. + eta_stats : TYPE + min/median/max eta resolution in radians. + + """ + max_tth = np.inf + max_eta = np.inf + min_tth = -np.inf + min_eta = -np.inf + ang_ps_full = [] + for panel in instr.detectors.values(): + angps = panel.angularPixelSize( + np.stack( + panel.pixel_coords, + axis=0 + ).reshape(2, np.cumprod(panel.shape)[-1]).T + ) + ang_ps_full.append(angps) + max_tth = min(max_tth, np.min(angps[:, 0])) + max_eta = min(max_eta, np.min(angps[:, 1])) + min_tth = max(min_tth, np.max(angps[:, 0])) + min_eta = max(min_eta, np.max(angps[:, 1])) + med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten() + return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta) + + +def max_resolution(instr): + """ + Return the maximum angular resolution of the instrument. + + Parameters + ---------- + instr : HEDMInstrument instance + An instrument. + + Returns + ------- + max_tth : float + Maximum tth resolution in radians. + max_eta : TYPE + maximum eta resolution in radians. + + """ + max_tth = np.inf + max_eta = np.inf + for panel in instr.detectors.values(): + angps = panel.angularPixelSize( + np.stack( + panel.pixel_coords, + axis=0 + ).reshape(2, np.cumprod(panel.shape)[-1]).T + ) + max_tth = min(max_tth, np.min(angps[:, 0])) + max_eta = min(max_eta, np.min(angps[:, 1])) + return max_tth, max_eta + + +def _gaussian_dist(x, cen, fwhm): + sigm = fwhm/(2*np.sqrt(2*np.log(2))) + return np.exp(-0.5*(x - cen)**2/sigm**2) + + +def _sigma_to_fwhm(sigm): + return sigm*ct.sigma_to_fwhm + + +def _fwhm_to_sigma(fwhm): + return fwhm/ct.sigma_to_fwhm + + +# ============================================================================= +# CLASSES +# ============================================================================= + + +class HEDMInstrument(object): + """ + Abstraction of XRD instrument. + + * Distortion needs to be moved to a class with registry; tuple unworkable + * where should reference eta be defined? currently set to default config + """ + + def __init__(self, instrument_config=None, + image_series=None, eta_vector=None, + instrument_name=None, tilt_calibration_mapping=None, + max_workers=max_workers_DFLT, + physics_package=None, + active_beam_name: Optional[str] = None): + self._id = instrument_name_DFLT + + self._active_beam_name = active_beam_name + self._beam_dict = {} + + if eta_vector is None: + self._eta_vector = eta_vec_DFLT + else: + self._eta_vector = eta_vector + + self.max_workers = max_workers + + self.physics_package = physics_package + + if instrument_config is None: + # Default instrument + if instrument_name is not None: + self._id = instrument_name + self._num_panels = 1 + self._create_default_beam() + + # FIXME: must add cylindrical + self._detectors = dict( + panel_id_DFLT=PlanarDetector( + rows=nrows_DFLT, cols=ncols_DFLT, + pixel_size=pixel_size_DFLT, + tvec=t_vec_d_DFLT, + tilt=tilt_params_DFLT, + bvec=self.beam_vector, + xrs_dist=self.source_distance, + evec=self._eta_vector, + distortion=None, + roi=None, group=None, + max_workers=self.max_workers), + ) + + self._tvec = t_vec_s_DFLT + self._chi = chi_DFLT + else: + if isinstance(instrument_config, h5py.File): + tmp = {} + unwrap_h5_to_dict(instrument_config, tmp) + instrument_config = tmp['instrument'] + elif not isinstance(instrument_config, dict): + raise RuntimeError( + "instrument_config must be either an HDF5 file object" + + "or a dictionary. You gave a %s" + % type(instrument_config) + ) + if instrument_name is None: + if 'id' in instrument_config: + self._id = instrument_config['id'] + else: + self._id = instrument_name + + self._num_panels = len(instrument_config['detectors']) + + if instrument_config.get('physics_package', None) is not None: + self.physics_package = instrument_config['physics_package'] + + xrs_config = instrument_config['beam'] + is_single_beam = ( + 'energy' in xrs_config and + 'vector' in xrs_config + ) + if is_single_beam: + # Assume single beam. Load the same way as multibeam + self._create_default_beam() + xrs_config = {self.active_beam_name: xrs_config} + + # Multi beam load + for beam_name, beam in xrs_config.items(): + self._beam_dict[beam_name] = { + 'energy': beam['energy'], + 'vector': calc_beam_vec( + beam['vector']['azimuth'], + beam['vector']['polar_angle'], + ), + 'distance': beam.get('source_distance', np.inf), + } + + # Set the active beam name if not set already + if self._active_beam_name is None: + self._active_beam_name = next(iter(self._beam_dict)) + + # now build detector dict + detectors_config = instrument_config['detectors'] + det_dict = dict.fromkeys(detectors_config) + for det_id, det_info in detectors_config.items(): + det_group = det_info.get('group') # optional detector group + pixel_info = det_info['pixels'] + affine_info = det_info['transform'] + detector_type = det_info.get('detector_type', 'planar') + filter = det_info.get('filter', None) + coating = det_info.get('coating', None) + phosphor = det_info.get('phosphor', None) + try: + saturation_level = det_info['saturation_level'] + except KeyError: + saturation_level = 2**16 + shape = (pixel_info['rows'], pixel_info['columns']) + + panel_buffer = None + if buffer_key in det_info: + det_buffer = det_info[buffer_key] + if det_buffer is not None: + if isinstance(det_buffer, np.ndarray): + if det_buffer.ndim == 2: + if det_buffer.shape != shape: + msg = ( + f'Buffer shape for {det_id} ' + f'({det_buffer.shape}) does not match ' + f'detector shape ({shape})' + ) + raise BufferShapeMismatchError(msg) + else: + assert len(det_buffer) == 2 + panel_buffer = det_buffer + elif isinstance(det_buffer, list): + panel_buffer = np.asarray(det_buffer) + elif np.isscalar(det_buffer): + panel_buffer = det_buffer*np.ones(2) + else: + raise RuntimeError( + "panel buffer spec invalid for %s" % det_id + ) + + # optional roi + roi = pixel_info.get('roi') + + # handle distortion + distortion = None + if distortion_key in det_info: + distortion_cfg = det_info[distortion_key] + if distortion_cfg is not None: + try: + func_name = distortion_cfg['function_name'] + dparams = distortion_cfg['parameters'] + distortion = distortion_pkg.get_mapping( + func_name, dparams + ) + except KeyError: + raise RuntimeError( + "problem with distortion specification" + ) + if detector_type.lower() not in DETECTOR_TYPES: + msg = f'Unknown detector type: {detector_type}' + raise NotImplementedError(msg) + + DetectorClass = DETECTOR_TYPES[detector_type.lower()] + kwargs = dict( + name=det_id, + rows=pixel_info['rows'], + cols=pixel_info['columns'], + pixel_size=pixel_info['size'], + panel_buffer=panel_buffer, + saturation_level=saturation_level, + tvec=affine_info['translation'], + tilt=affine_info['tilt'], + bvec=self.beam_vector, + xrs_dist=self.source_distance, + evec=self._eta_vector, + distortion=distortion, + roi=roi, + group=det_group, + max_workers=self.max_workers, + detector_filter=filter, + detector_coating=coating, + phosphor=phosphor, + ) + + if DetectorClass is CylindricalDetector: + # Add cylindrical detector kwargs + kwargs['radius'] = det_info.get('radius', 49.51) + + det_dict[det_id] = DetectorClass(**kwargs) + + self._detectors = det_dict + + self._tvec = np.r_[ + instrument_config['oscillation_stage']['translation'] + ] + self._chi = instrument_config['oscillation_stage']['chi'] + + # grab angles from beam vec + # !!! these are in DEGREES! + azim, pola = calc_angles_from_beam_vec(self.beam_vector) + + self.update_memoization_sizes() + + @property + def mean_detector_center(self) -> np.ndarray: + """Return the mean center for all detectors""" + centers = np.array([panel.tvec for panel in self.detectors.values()]) + return centers.sum(axis=0) / len(centers) + + def mean_group_center(self, group: str) -> np.ndarray: + """Return the mean center for detectors belonging to a group""" + centers = np.array([ + x.tvec for x in self.detectors_in_group(group).values() + ]) + return centers.sum(axis=0) / len(centers) + + @property + def detector_groups(self) -> list[str]: + groups = [] + for panel in self.detectors.values(): + group = panel.group + if group is not None and group not in groups: + groups.append(group) + + return groups + + def detectors_in_group(self, group: str) -> dict[str, Detector]: + return {k: v for k, v in self.detectors.items() if v.group == group} + + # properties for physical size of rectangular detector + @property + def id(self): + return self._id + + @property + def num_panels(self): + return self._num_panels + + @property + def detectors(self): + return self._detectors + + @property + def detector_parameters(self): + pdict = {} + for key, panel in self.detectors.items(): + pdict[key] = panel.config_dict( + self.chi, self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + return pdict + + @property + def tvec(self): + return self._tvec + + @tvec.setter + def tvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3, 'input must have length = 3' + self._tvec = x + + @property + def chi(self): + return self._chi + + @chi.setter + def chi(self, x): + self._chi = float(x) + + @property + def beam_energy(self) -> float: + return self.active_beam['energy'] + + @beam_energy.setter + def beam_energy(self, x: float): + self.active_beam['energy'] = float(x) + self.beam_dict_modified() + + @property + def beam_wavelength(self): + return ct.keVToAngstrom(self.beam_energy) + + @property + def has_multi_beam(self) -> bool: + return len(self.beam_dict) > 1 + + @property + def beam_dict(self) -> dict: + return self._beam_dict + + def _create_default_beam(self): + name = 'XRS1' + self._beam_dict[name] = { + 'energy': beam_energy_DFLT, + 'vector': beam_vec_DFLT.copy(), + 'distance': np.inf, + } + + if self._active_beam_name is None: + self._active_beam_name = name + + @property + def beam_names(self) -> list[str]: + return list(self.beam_dict) + + def xrs_beam_energy(self, beam_name: Optional[str]) -> float: + if beam_name is None: + beam_name = self.active_beam_name + + return self.beam_dict[beam_name]['energy'] + + @property + def active_beam_name(self) -> str: + return self._active_beam_name + + @active_beam_name.setter + def active_beam_name(self, name: str): + if self._active_beam_name not in self.beam_dict: + raise RuntimeError( + f'"{name}" is not present in "{self.beam_names}"' + ) + + self._active_beam_name = name + + # Update anything beam related where we need to + self._update_panel_beams() + + def beam_dict_modified(self): + # A function to call to indicate that the beam dict was modified. + # Update anything beam related where we need to + self._update_panel_beams() + + @property + def active_beam(self) -> dict: + return self.beam_dict[self.active_beam_name] + + def _update_panel_beams(self): + # FIXME: maybe we shouldn't store these on the panels? + # Might be hard to fix, though... + for panel in self.detectors.values(): + panel.bvec = self.beam_vector + panel.xrs_dist = self.source_distance + + @property + def beam_vector(self) -> np.ndarray: + return self.active_beam['vector'] + + @beam_vector.setter + def beam_vector(self, x: np.ndarray): + x = np.array(x).flatten() + if len(x) == 3: + assert sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + bvec = x + elif len(x) == 2: + bvec = calc_beam_vec(*x) + else: + raise RuntimeError("input must be a unit vector or angle pair") + + # Modify the beam vector for the active beam dict + self.active_beam['vector'] = bvec + self.beam_dict_modified() + + @property + def source_distance(self): + return self.active_beam['distance'] + + @source_distance.setter + def source_distance(self, x): + assert np.isscalar(x), \ + f"'source_distance' must be a scalar; you input '{x}'" + self.active_beam['distance'] = x + self.beam_dict_modified() + + @property + def eta_vector(self): + return self._eta_vector + + @eta_vector.setter + def eta_vector(self, x): + x = np.array(x).flatten() + assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + self._eta_vector = x + # ...maybe change dictionary item behavior for 3.x compatibility? + for detector_id in self.detectors: + panel = self.detectors[detector_id] + panel.evec = self._eta_vector + + # ========================================================================= + # METHODS + # ========================================================================= + + def write_config(self, file=None, style='yaml', calibration_dict={}): + """ WRITE OUT YAML FILE """ + # initialize output dictionary + assert style.lower() in ['yaml', 'hdf5'], \ + "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + + par_dict = {} + + par_dict['id'] = self.id + + # Multi beam writer + beam_dict = {} + for beam_name, beam in self.beam_dict.items(): + azim, polar = calc_angles_from_beam_vec(beam['vector']) + beam_dict[beam_name] = { + 'energy': beam['energy'], + 'vector': { + 'azimuth': azim, + 'polar_angle': polar, + }, + } + if beam['distance'] != np.inf: + beam_dict[beam_name]['source_distance'] = beam['distance'] + + if len(beam_dict) == 1: + # Just write it out a single beam (classical way) + beam_dict = next(iter(beam_dict.values())) + + par_dict['beam'] = beam_dict + + if calibration_dict: + par_dict['calibration_crystal'] = calibration_dict + + ostage = dict( + chi=self.chi, + translation=self.tvec.tolist() + ) + par_dict['oscillation_stage'] = ostage + + det_dict = dict.fromkeys(self.detectors) + for det_name, detector in self.detectors.items(): + # grab panel config + # !!! don't need beam or tvec + # !!! have vetted style + pdict = detector.config_dict(chi=self.chi, tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style=style) + det_dict[det_name] = pdict['detector'] + par_dict['detectors'] = det_dict + + # handle output file if requested + if file is not None: + if style.lower() == 'yaml': + with open(file, 'w') as f: + yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) + else: + def _write_group(file): + instr_grp = file.create_group('instrument') + unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) + + # hdf5 + if isinstance(file, str): + with h5py.File(file, 'w') as f: + _write_group(f) + elif isinstance(file, h5py.File): + _write_group(file) + else: + raise TypeError("Unexpected file type.") + + return par_dict + + def extract_polar_maps(self, plane_data, imgser_dict, + active_hkls=None, threshold=None, + tth_tol=None, eta_tol=0.25): + """ + Extract eta-omega maps from an imageseries. + + Quick and dirty way to histogram angular patch data for make + pole figures suitable for fiber generation + + TODO: streamline projection code + TODO: normalization + !!!: images must be non-negative! + !!!: plane_data is NOT a copy! + """ + if tth_tol is not None: + plane_data.tThWidth = np.radians(tth_tol) + else: + tth_tol = np.degrees(plane_data.tThWidth) + + # make rings clipped to panel + # !!! eta_idx has the same length as plane_data.exclusions + # each entry are the integer indices into the bins + # !!! eta_edges is the list of eta bin EDGES; same for all + # detectors, so calculate it once + # !!! grab first panel + panel = next(iter(self.detectors.values())) + pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = \ + panel.make_powder_rings( + plane_data, merge_hkls=False, + delta_eta=eta_tol, full_output=True + ) + + if active_hkls is not None: + assert hasattr(active_hkls, '__len__'), \ + "active_hkls must be an iterable with __len__" + + # need to re-cast for element-wise operations + active_hkls = np.array(active_hkls) + + # these are all active reflection unique hklIDs + active_hklIDs = plane_data.getHKLID( + plane_data.hkls, master=True + ) + + # find indices + idx = np.zeros_like(active_hkls, dtype=int) + for i, input_hklID in enumerate(active_hkls): + try: + idx[i] = np.where(active_hklIDs == input_hklID)[0] + except ValueError: + raise RuntimeError(f"hklID '{input_hklID}' is invalid") + tth_ranges = tth_ranges[idx] + + delta_eta = eta_edges[1] - eta_edges[0] + ncols_eta = len(eta_edges) - 1 + + ring_maps_panel = dict.fromkeys(self.detectors) + for i_d, det_key in enumerate(self.detectors): + print("working on detector '%s'..." % det_key) + + # grab panel + panel = self.detectors[det_key] + # native_area = panel.pixel_area # pixel ref area + + # pixel angular coords for the detector panel + ptth, peta = panel.pixel_angles() + + # grab imageseries for this detector + ims = _parse_imgser_dict(imgser_dict, det_key, roi=panel.roi) + + # grab omegas from imageseries and squawk if missing + try: + omegas = ims.metadata['omega'] + except KeyError: + raise RuntimeError( + f"imageseries for '{det_key}' has no omega info" + ) + + # initialize maps and assing by row (omega/frame) + nrows_ome = len(omegas) + + # init map with NaNs + shape = (len(tth_ranges), nrows_ome, ncols_eta) + ring_maps = np.full(shape, np.nan) + + # Generate ring parameters once, and re-use them for each image + ring_params = [] + for tthr in tth_ranges: + kwargs = { + 'tthr': tthr, + 'ptth': ptth, + 'peta': peta, + 'eta_edges': eta_edges, + 'delta_eta': delta_eta, + } + ring_params.append(_generate_ring_params(**kwargs)) + + # Divide up the images among processes + tasks = distribute_tasks(len(ims), self.max_workers) + func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges, + ring_maps=ring_maps, ring_params=ring_params, + threshold=threshold) + + max_workers = self.max_workers + if max_workers == 1 or len(tasks) == 1: + # Just execute it serially. + for task in tasks: + func(task) + else: + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Evaluate the results via `list()`, so that if an + # exception is raised in a thread, it will be re-raised + # and visible to the user. + list(executor.map(func, tasks)) + + ring_maps_panel[det_key] = ring_maps + + return ring_maps_panel, eta_edges + + def extract_line_positions(self, plane_data, imgser_dict, + tth_tol=None, eta_tol=1., npdiv=2, + eta_centers=None, + collapse_eta=True, collapse_tth=False, + do_interpolation=True, do_fitting=False, + tth_distortion=None, fitting_kwargs=None): + """ + Perform annular interpolation on diffraction images. + + Provides data for extracting the line positions from powder diffraction + images, pole figure patches from imageseries, or Bragg peaks from + Laue diffraction images. + + Parameters + ---------- + plane_data : hexrd.crystallography.PlaneData object or array_like + Object determining the 2theta positions for the integration + sectors. If PlaneData, this will be all non-excluded reflections, + subject to merging within PlaneData.tThWidth. If array_like, + interpreted as a list of 2theta angles IN DEGREES. + imgser_dict : dict + Dictionary of powder diffraction images, one for each detector. + tth_tol : scalar, optional + The radial (i.e. 2theta) width of the integration sectors + IN DEGREES. This arg is required if plane_data is array_like. + The default is None. + eta_tol : scalar, optional + The azimuthal (i.e. eta) width of the integration sectors + IN DEGREES. The default is 1. + npdiv : int, optional + The number of oversampling pixel subdivision (see notes). + The default is 2. + eta_centers : array_like, optional + The desired azimuthal sector centers. The default is None. If + None, then bins are distrubted sequentially from (-180, 180). + collapse_eta : bool, optional + Flag for summing sectors in eta. The default is True. + collapse_tth : bool, optional + Flag for summing sectors in 2theta. The default is False. + do_interpolation : bool, optional + If True, perform bilinear interpolation. The default is True. + do_fitting : bool, optional + If True, then perform spectrum fitting, and append the results + to the returned data. collapse_eta must also be True for this + to have any effect. The default is False. + tth_distortion : special class, optional + for special case of pinhole camera distortions. See + hexrd.xrdutil.phutil.SampleLayerDistortion (only type supported) + fitting_kwargs : dict, optional + kwargs passed to hexrd.fitting.utils.fit_ring if do_fitting is True + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + panel_data : dict + Dictionary over the detctors with the following structure: + [list over (merged) 2theta ranges] + [list over valid eta sectors] + [angle data , + bin intensities , + fitting results ] + + Notes + ----- + TODO: May change the array_like input units to degrees. + TODO: rename function. + + """ + + if fitting_kwargs is None: + fitting_kwargs = {} + + # ===================================================================== + # LOOP OVER DETECTORS + # ===================================================================== + logger.info("Interpolating ring data") + pbar_dets = partial(tqdm, total=self.num_panels, desc="Detector", + position=self.num_panels) + + # Split up the workers among the detectors + max_workers_per_detector = max(1, self.max_workers // self.num_panels) + + kwargs = { + 'plane_data': plane_data, + 'tth_tol': tth_tol, + 'eta_tol': eta_tol, + 'eta_centers': eta_centers, + 'npdiv': npdiv, + 'collapse_tth': collapse_tth, + 'collapse_eta': collapse_eta, + 'do_interpolation': do_interpolation, + 'do_fitting': do_fitting, + 'fitting_kwargs': fitting_kwargs, + 'tth_distortion': tth_distortion, + 'max_workers': max_workers_per_detector, + } + func = partial(_extract_detector_line_positions, **kwargs) + + def make_instr_cfg(panel): + return panel.config_dict( + chi=self.chi, tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + + images = [] + for detector_id, panel in self.detectors.items(): + images.append(_parse_imgser_dict(imgser_dict, detector_id, + roi=panel.roi)) + + panels = [self.detectors[k] for k in self.detectors] + instr_cfgs = [make_instr_cfg(x) for x in panels] + pbp_array = np.arange(self.num_panels) + iter_args = zip(panels, instr_cfgs, images, pbp_array) + with ProcessPoolExecutor(mp_context=constants.mp_context, + max_workers=self.num_panels) as executor: + results = list(pbar_dets(executor.map(func, iter_args))) + + panel_data = {} + for det, res in zip(self.detectors, results): + panel_data[det] = res + + return panel_data + + def simulate_powder_pattern(self, + mat_list, + params=None, + bkgmethod=None, + origin=None, + noise=None): + """ + Generate powder diffraction iamges from specified materials. + + Parameters + ---------- + mat_list : array_like (n, ) + List of Material classes. + params : dict, optional + Dictionary of LeBail parameters (see Notes). The default is None. + bkgmethod : dict, optional + Background function specification. The default is None. + origin : array_like (3,), optional + Vector describing the origin of the diffrction volume. + The default is None, wiich is equivalent to [0, 0, 0]. + noise : str, optional + Flag describing type of noise to be applied. The default is None. + + Returns + ------- + img_dict : dict + Dictionary of diffraciton images over the detectors. + + Notes + ----- + TODO: add more controls for noise function. + TODO: modify hooks to LeBail parameters. + TODO: add optional volume fraction weights for phases in mat_list + """ + """ + >> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab, + saransh1@llnl.gov + >> @DATE: 01/22/2021 SS 1.0 original + >> @DETAILS: adding hook to WPPF class. this changes the input list + significantly + """ + if origin is None: + origin = self.tvec + origin = np.asarray(origin).squeeze() + assert len(origin) == 3, \ + "origin must be a 3-element sequence" + + if bkgmethod is None: + bkgmethod = {'chebyshev': 3} + + ''' + if params is none, fill in some sane default values + only the first value is used. the rest of the values are + the upper, lower bounds and vary flag for refinement which + are not used but required for interfacing with WPPF + + zero_error : zero shift error + U, V, W : Cagliotti parameters + P, X, Y : Lorentzian parameters + eta1, eta2, eta3 : Mixing parameters + ''' + if params is None: + # params = {'zero_error': [0.0, -1., 1., True], + # 'U': [2e-1, -1., 1., True], + # 'V': [2e-2, -1., 1., True], + # 'W': [2e-2, -1., 1., True], + # 'X': [2e-1, -1., 1., True], + # 'Y': [2e-1, -1., 1., True] + # } + params = wppfsupport._generate_default_parameters_LeBail( + mat_list, + 1, + bkgmethod, + ) + ''' + use the material list to obtain the dictionary of initial intensities + we need to make sure that the intensities are properly scaled by the + lorentz polarization factor. since the calculation is done in the + LeBail class, all that means is the initial intensity needs that factor + in there + ''' + img_dict = dict.fromkeys(self.detectors) + + # find min and max tth over all panels + tth_mi = np.inf + tth_ma = 0. + ptth_dict = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + ptth, peta = panel.pixel_angles(origin=origin) + tth_mi = min(tth_mi, ptth.min()) + tth_ma = max(tth_ma, ptth.max()) + ptth_dict[det_key] = ptth + + ''' + now make a list of two theta and dummy ones for the experimental + spectrum this is never really used so any values should be okay. We + could also pas the integrated detector image if we would like to + simulate some realistic background. But thats for another day. + ''' + # convert angles to degrees because thats what the WPPF expects + tth_mi = np.degrees(tth_mi) + tth_ma = np.degrees(tth_ma) + + # get tth angular resolution for instrument + ang_res = max_resolution(self) + + # !!! calc nsteps by oversampling + nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0]))) + + # evaulation vector for LeBail + tth = np.linspace(tth_mi, tth_ma, nsteps) + + expt = np.vstack([tth, np.ones_like(tth)]).T + + wavelength = [ + valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), + 1. + ] + + ''' + now go through the material list and get the intensity dictionary + ''' + intensity = {} + for mat in mat_list: + + multiplicity = mat.planeData.getMultiplicity() + + tth = mat.planeData.getTTh() + + LP = (1 + np.cos(tth)**2) / \ + np.cos(0.5*tth)/np.sin(0.5*tth)**2 + + intensity[mat.name] = {} + intensity[mat.name]['synchrotron'] = \ + mat.planeData.structFact * LP * multiplicity + + kwargs = { + 'expt_spectrum': expt, + 'params': params, + 'phases': mat_list, + 'wavelength': { + 'synchrotron': wavelength + }, + 'bkgmethod': bkgmethod, + 'intensity_init': intensity, + 'peakshape': 'pvtch' + } + + self.WPPFclass = LeBail(**kwargs) + + self.simulated_spectrum = self.WPPFclass.spectrum_sim + self.background = self.WPPFclass.background + + ''' + now that we have the simulated intensities, its time to get the + two theta for the detector pixels and interpolate what the intensity + for each pixel should be + ''' + + img_dict = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + ptth = ptth_dict[det_key] + + img = np.interp(np.degrees(ptth), + self.simulated_spectrum.x, + self.simulated_spectrum.y + self.background.y) + + if noise is None: + img_dict[det_key] = img + + else: + # Rescale to be between 0 and 1 so random_noise() will work + prev_max = img.max() + img /= prev_max + + if noise.lower() == 'poisson': + im_noise = random_noise(img, + mode='poisson', + clip=True) + mi = im_noise.min() + ma = im_noise.max() + if ma > mi: + im_noise = (im_noise - mi)/(ma - mi) + + elif noise.lower() == 'gaussian': + im_noise = random_noise(img, mode='gaussian', clip=True) + + elif noise.lower() == 'salt': + im_noise = random_noise(img, mode='salt') + + elif noise.lower() == 'pepper': + im_noise = random_noise(img, mode='pepper') + + elif noise.lower() == 's&p': + im_noise = random_noise(img, mode='s&p') + + elif noise.lower() == 'speckle': + im_noise = random_noise(img, mode='speckle', clip=True) + + # Now scale back up + img_dict[det_key] = im_noise * prev_max + + return img_dict + + def simulate_laue_pattern(self, crystal_data, + minEnergy=5., maxEnergy=35., + rmat_s=None, grain_params=None): + """ + Simulate Laue diffraction over the instrument. + + Parameters + ---------- + crystal_data : TYPE + DESCRIPTION. + minEnergy : TYPE, optional + DESCRIPTION. The default is 5.. + maxEnergy : TYPE, optional + DESCRIPTION. The default is 35.. + rmat_s : TYPE, optional + DESCRIPTION. The default is None. + grain_params : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + results : TYPE + DESCRIPTION. + + xy_det, hkls_in, angles, dspacing, energy + + TODO: revisit output; dict, or concatenated list? + """ + results = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + results[det_key] = panel.simulate_laue_pattern( + crystal_data, + minEnergy=minEnergy, maxEnergy=maxEnergy, + rmat_s=rmat_s, tvec_s=self.tvec, + grain_params=grain_params, + beam_vec=self.beam_vector) + return results + + def simulate_rotation_series(self, plane_data, grain_param_list, + eta_ranges=[(-np.pi, np.pi), ], + ome_ranges=[(-np.pi, np.pi), ], + ome_period=(-np.pi, np.pi), + wavelength=None): + """ + Simulate a monochromatic rotation series over the instrument. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_param_list : TYPE + DESCRIPTION. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + wavelength : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + results : TYPE + DESCRIPTION. + + TODO: revisit output; dict, or concatenated list? + """ + results = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + results[det_key] = panel.simulate_rotation_series( + plane_data, grain_param_list, + eta_ranges=eta_ranges, + ome_ranges=ome_ranges, + ome_period=ome_period, + chi=self.chi, tVec_s=self.tvec, + wavelength=wavelength) + return results + + def pull_spots(self, plane_data, grain_params, + imgser_dict, + tth_tol=0.25, eta_tol=1., ome_tol=1., + npdiv=2, threshold=10, + eta_ranges=[(-np.pi, np.pi), ], + ome_period=None, + dirname='results', filename=None, output_format='text', + return_spot_list=False, + quiet=True, check_only=False, + interp='nearest'): + """ + Exctract reflection info from a rotation series. + + Input must be encoded as an OmegaImageseries object. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_params : TYPE + DESCRIPTION. + imgser_dict : TYPE + DESCRIPTION. + tth_tol : TYPE, optional + DESCRIPTION. The default is 0.25. + eta_tol : TYPE, optional + DESCRIPTION. The default is 1.. + ome_tol : TYPE, optional + DESCRIPTION. The default is 1.. + npdiv : TYPE, optional + DESCRIPTION. The default is 2. + threshold : TYPE, optional + DESCRIPTION. The default is 10. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + dirname : TYPE, optional + DESCRIPTION. The default is 'results'. + filename : TYPE, optional + DESCRIPTION. The default is None. + output_format : TYPE, optional + DESCRIPTION. The default is 'text'. + return_spot_list : TYPE, optional + DESCRIPTION. The default is False. + quiet : TYPE, optional + DESCRIPTION. The default is True. + check_only : TYPE, optional + DESCRIPTION. The default is False. + interp : TYPE, optional + DESCRIPTION. The default is 'nearest'. + + Returns + ------- + compl : TYPE + DESCRIPTION. + output : TYPE + DESCRIPTION. + + """ + # grain parameters + rMat_c = make_rmat_of_expmap(grain_params[:3]) + tVec_c = grain_params[3:6] + + # grab omega ranges from first imageseries + # + # WARNING: all imageseries AND all wedges within are assumed to have + # the same omega values; put in a check that they are all the same??? + oims0 = next(iter(imgser_dict.values())) + ome_ranges = [np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges] + if ome_period is None: + ims = next(iter(imgser_dict.values())) + ostart = ims.omega[0, 0] + ome_period = np.radians(ostart + np.r_[0., 360.]) + + # delta omega in DEGREES grabbed from first imageseries in the dict + delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] + + # make omega grid for frame expansion around reference frame + # in DEGREES + ndiv_ome, ome_del = make_tolerance_grid( + delta_ome, ome_tol, 1, adjust_window=True, + ) + + # generate structuring element for connected component labeling + if ndiv_ome == 1: + label_struct = ndimage.generate_binary_structure(2, 2) + else: + label_struct = ndimage.generate_binary_structure(3, 3) + + # simulate rotation series + sim_results = self.simulate_rotation_series( + plane_data, [grain_params, ], + eta_ranges=eta_ranges, + ome_ranges=ome_ranges, + ome_period=ome_period) + + # patch vertex generator (global for instrument) + tol_vec = 0.5*np.radians( + [-tth_tol, -eta_tol, + -tth_tol, eta_tol, + tth_tol, eta_tol, + tth_tol, -eta_tol]) + + # prepare output if requested + if filename is not None and output_format.lower() == 'hdf5': + this_filename = os.path.join(dirname, filename) + writer = GrainDataWriter_h5( + os.path.join(dirname, filename), + self.write_config(), grain_params) + + # ===================================================================== + # LOOP OVER PANELS + # ===================================================================== + iRefl = 0 + next_invalid_peak_id = -100 + compl = [] + output = dict.fromkeys(self.detectors) + for detector_id, panel in self.detectors.items(): + # initialize text-based output writer + if filename is not None and output_format.lower() == 'text': + output_dir = os.path.join( + dirname, detector_id + ) + os.makedirs(output_dir, exist_ok=True) + this_filename = os.path.join( + output_dir, filename + ) + writer = PatchDataWriter(this_filename) + + # grab panel + instr_cfg = panel.config_dict( + self.chi, self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + native_area = panel.pixel_area # pixel ref area + + # pull out the OmegaImageSeries for this panel from input dict + ome_imgser = _parse_imgser_dict(imgser_dict, + detector_id, + roi=panel.roi) + + # extract simulation results + sim_results_p = sim_results[detector_id] + hkl_ids = sim_results_p[0][0] + hkls_p = sim_results_p[1][0] + ang_centers = sim_results_p[2][0] + xy_centers = sim_results_p[3][0] + ang_pixel_size = sim_results_p[4][0] + + # now verify that full patch falls on detector... + # ???: strictly necessary? + # + # patch vertex array from sim + nangs = len(ang_centers) + patch_vertices = ( + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4*nangs, 2) + ome_dupl = np.tile( + ang_centers[:, 2], (4, 1) + ).T.reshape(len(patch_vertices), 1) + + # find vertices that all fall on the panel + det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( + np.hstack([patch_vertices, ome_dupl]), + panel.rmat, rMat_c, self.chi, + panel.tvec, tVec_c, self.tvec, + panel.distortion) + _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) + + # all vertices must be on... + patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) + patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] + + # re-filter... + hkl_ids = hkl_ids[patch_is_on] + hkls_p = hkls_p[patch_is_on, :] + ang_centers = ang_centers[patch_is_on, :] + xy_centers = xy_centers[patch_is_on, :] + ang_pixel_size = ang_pixel_size[patch_is_on, :] + + # TODO: add polygon testing right here! + # done + if check_only: + patch_output = [] + for i_pt, angs in enumerate(ang_centers): + # the evaluation omegas; + # expand about the central value using tol vector + ome_eval = np.degrees(angs[2]) + ome_del + + # ...vectorize the omega_to_frame function to avoid loop? + frame_indices = [ + ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval + ] + if -1 in frame_indices: + if not quiet: + msg = """ + window for (%d %d %d) falls outside omega range + """ % tuple(hkls_p[i_pt, :]) + print(msg) + continue + else: + these_vertices = patch_xys[i_pt] + ijs = panel.cartToPixel(these_vertices) + ii, jj = polygon(ijs[:, 0], ijs[:, 1]) + contains_signal = False + for i_frame in frame_indices: + contains_signal = contains_signal or np.any( + ome_imgser[i_frame][ii, jj] > threshold + ) + compl.append(contains_signal) + patch_output.append((ii, jj, frame_indices)) + else: + # make the tth,eta patches for interpolation + patches = xrdutil.make_reflection_patches( + instr_cfg, + ang_centers[:, :2], ang_pixel_size, + omega=ang_centers[:, 2], + tth_tol=tth_tol, eta_tol=eta_tol, + rmat_c=rMat_c, tvec_c=tVec_c, + npdiv=npdiv, quiet=True) + + # GRAND LOOP over reflections for this panel + patch_output = [] + for i_pt, patch in enumerate(patches): + + # strip relevant objects out of current patch + vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch + + prows, pcols = areas.shape + nrm_fac = areas/float(native_area) + nrm_fac = nrm_fac / np.min(nrm_fac) + + # grab hkl info + hkl = hkls_p[i_pt, :] + hkl_id = hkl_ids[i_pt] + + # edge arrays + tth_edges = vtx_angs[0][0, :] + delta_tth = tth_edges[1] - tth_edges[0] + eta_edges = vtx_angs[1][:, 0] + delta_eta = eta_edges[1] - eta_edges[0] + + # need to reshape eval pts for interpolation + xy_eval = np.vstack([xy_eval[0].flatten(), + xy_eval[1].flatten()]).T + + # the evaluation omegas; + # expand about the central value using tol vector + ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del + + # ???: vectorize the omega_to_frame function to avoid loop? + frame_indices = [ + ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval + ] + + if -1 in frame_indices: + if not quiet: + msg = """ + window for (%d%d%d) falls outside omega range + """ % tuple(hkl) + print(msg) + continue + else: + # initialize spot data parameters + # !!! maybe change these to nan to not fuck up writer + peak_id = next_invalid_peak_id + sum_int = np.nan + max_int = np.nan + meas_angs = np.nan*np.ones(3) + meas_xy = np.nan*np.ones(2) + + # quick check for intensity + contains_signal = False + patch_data_raw = [] + for i_frame in frame_indices: + tmp = ome_imgser[i_frame][ijs[0], ijs[1]] + contains_signal = contains_signal or np.any( + tmp > threshold + ) + patch_data_raw.append(tmp) + patch_data_raw = np.stack(patch_data_raw, axis=0) + compl.append(contains_signal) + + if contains_signal: + # initialize patch data array for intensities + if interp.lower() == 'bilinear': + patch_data = np.zeros( + (len(frame_indices), prows, pcols)) + for i, i_frame in enumerate(frame_indices): + patch_data[i] = \ + panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False + ).reshape(prows, pcols) # * nrm_fac + elif interp.lower() == 'nearest': + patch_data = patch_data_raw # * nrm_fac + else: + msg = "interpolation option " + \ + "'%s' not understood" + raise RuntimeError(msg % interp) + + # now have interpolated patch data... + labels, num_peaks = ndimage.label( + patch_data > threshold, structure=label_struct + ) + slabels = np.arange(1, num_peaks + 1) + + if num_peaks > 0: + peak_id = iRefl + props = regionprops(labels, patch_data) + coms = np.vstack( + [x.weighted_centroid for x in props]) + if num_peaks > 1: + center = np.r_[patch_data.shape]*0.5 + center_t = np.tile(center, (num_peaks, 1)) + com_diff = coms - center_t + closest_peak_idx = np.argmin( + np.sum(com_diff**2, axis=1) + ) + else: + closest_peak_idx = 0 + coms = coms[closest_peak_idx] + # meas_omes = \ + # ome_edges[0] + (0.5 + coms[0])*delta_ome + meas_omes = \ + ome_eval[0] + coms[0]*delta_ome + meas_angs = np.hstack( + [tth_edges[0] + (0.5 + coms[2])*delta_tth, + eta_edges[0] + (0.5 + coms[1])*delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ) + ] + ) + + # intensities + # - summed is 'integrated' over interpolated + # data + # - max is max of raw input data + sum_int = np.sum( + patch_data[ + labels == slabels[closest_peak_idx] + ] + ) + max_int = np.max( + patch_data_raw[ + labels == slabels[closest_peak_idx] + ] + ) + # ???: Should this only use labeled pixels? + # Those are segmented from interpolated data, + # not raw; likely ok in most cases. + + # need MEASURED xy coords + # FIXME: overload angles_to_cart? + gvec_c = angles_to_gvec( + meas_angs, + chi=self.chi, + rmat_c=rMat_c, + beam_vec=self.beam_vector) + rMat_s = make_sample_rmat( + self.chi, meas_angs[2] + ) + meas_xy = gvec_to_xy( + gvec_c, + panel.rmat, rMat_s, rMat_c, + panel.tvec, self.tvec, tVec_c, + beam_vec=self.beam_vector) + if panel.distortion is not None: + meas_xy = panel.distortion.apply_inverse( + np.atleast_2d(meas_xy) + ).flatten() + # FIXME: why is this suddenly necessary??? + meas_xy = meas_xy.squeeze() + else: + patch_data = patch_data_raw + + if peak_id < 0: + # The peak is invalid. + # Decrement the next invalid peak ID. + next_invalid_peak_id -= 1 + + # write output + if filename is not None: + if output_format.lower() == 'text': + writer.dump_patch( + peak_id, hkl_id, hkl, sum_int, max_int, + ang_centers[i_pt], meas_angs, + xy_centers[i_pt], meas_xy) + elif output_format.lower() == 'hdf5': + xyc_arr = xy_eval.reshape( + prows, pcols, 2 + ).transpose(2, 0, 1) + writer.dump_patch( + detector_id, iRefl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, np.radians(ome_eval), + xyc_arr, ijs, frame_indices, patch_data, + ang_centers[i_pt], xy_centers[i_pt], + meas_angs, meas_xy) + + if return_spot_list: + # Full output + xyc_arr = xy_eval.reshape( + prows, pcols, 2 + ).transpose(2, 0, 1) + _patch_output = [ + detector_id, iRefl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, np.radians(ome_eval), + xyc_arr, ijs, frame_indices, patch_data, + ang_centers[i_pt], xy_centers[i_pt], + meas_angs, meas_xy + ] + else: + # Trimmed output + _patch_output = [ + peak_id, hkl_id, hkl, sum_int, max_int, + ang_centers[i_pt], meas_angs, meas_xy + ] + patch_output.append(_patch_output) + iRefl += 1 + output[detector_id] = patch_output + if filename is not None and output_format.lower() == 'text': + writer.close() + if filename is not None and output_format.lower() == 'hdf5': + writer.close() + return compl, output + + def update_memoization_sizes(self): + # Resize all known memoization functions to have a cache at least + # the size of the number of detectors. + all_panels = list(self.detectors.values()) + PlanarDetector.update_memoization_sizes(all_panels) + CylindricalDetector.update_memoization_sizes(all_panels) + + def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: + """calculate the transmission from the + filter and polymer coating. the inverse of this + number is the intensity correction that needs + to be applied. actual computation is done inside + the detector class + """ + if rMat_s is None: + rMat_s = ct.identity_3x3 + + energy = self.beam_energy + transmissions = {} + for det_name, det in self.detectors.items(): + transmission_filter, transmission_phosphor = ( + det.calc_filter_coating_transmission(energy)) + + transmission = transmission_filter * transmission_phosphor + + if self.physics_package is not None: + transmission_physics_package = ( + det.calc_physics_package_transmission( + energy, rMat_s, self.physics_package)) + effective_pinhole_area = det.calc_effective_pinhole_area( + self.physics_package) + + transmission = ( + transmission * + transmission_physics_package * + effective_pinhole_area + ) + + transmissions[det_name] = transmission + return transmissions + +# ============================================================================= +# UTILITIES +# ============================================================================= + + +class PatchDataWriter(object): + """Class for dumping Bragg reflection data.""" + + def __init__(self, filename): + self._delim = ' ' + header_items = ( + '# ID', 'PID', + 'H', 'K', 'L', + 'sum(int)', 'max(int)', + 'pred tth', 'pred eta', 'pred ome', + 'meas tth', 'meas eta', 'meas ome', + 'pred X', 'pred Y', + 'meas X', 'meas Y' + ) + self._header = self._delim.join([ + self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]), + self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), + self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) + ]) + if isinstance(filename, IOBase): + self.fid = filename + else: + self.fid = open(filename, 'w') + print(self._header, file=self.fid) + + def __del__(self): + self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, peak_id, hkl_id, + hkl, spot_int, max_int, + pangs, mangs, pxy, mxy): + """ + !!! maybe need to check that last four inputs are arrays + """ + if mangs is None: + spot_int = np.nan + max_int = np.nan + mangs = np.nan*np.ones(3) + mxy = np.nan*np.ones(2) + + res = [int(peak_id), int(hkl_id)] \ + + np.array(hkl, dtype=int).tolist() \ + + [spot_int, max_int] \ + + pangs.tolist() \ + + mangs.tolist() \ + + pxy.tolist() \ + + mxy.tolist() + + output_str = self._delim.join( + [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + ) + print(output_str, file=self.fid) + return output_str + + +class GrainDataWriter(object): + """Class for dumping grain data.""" + + def __init__(self, filename=None, array=None): + """Writes to either file or np array + + Array must be initialized with number of rows to be written. + """ + if filename is None and array is None: + raise RuntimeError( + 'GrainDataWriter must be specified with filename or array') + + self.array = None + self.fid = None + + # array supersedes filename + if array is not None: + assert array.shape[1] == 21, \ + f'grain data table must have 21 columns not {array.shape[21]}' + self.array = array + self._array_row = 0 + return + + self._delim = ' ' + header_items = ( + '# grain ID', 'completeness', 'chi^2', + 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', + 't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]', + 'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]', + 'inv(V_s)[1,2]*sqrt(2)', + 'inv(V_s)[0,2]*sqrt(2)', + 'inv(V_s)[0,1]*sqrt(2)', + 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]', + 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]' + ) + self._header = self._delim.join( + [self._delim.join( + np.tile('{:<12}', 3) + ).format(*header_items[:3]), + self._delim.join( + np.tile('{:<23}', len(header_items) - 3) + ).format(*header_items[3:])] + ) + if isinstance(filename, IOBase): + self.fid = filename + else: + self.fid = open(filename, 'w') + print(self._header, file=self.fid) + + def __del__(self): + self.close() + + def close(self): + if self.fid is not None: + self.fid.close() + + def dump_grain(self, grain_id, completeness, chisq, + grain_params): + assert len(grain_params) == 12, \ + "len(grain_params) must be 12, not %d" % len(grain_params) + + # extract strain + emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) + evec = mutil.symmToVecMV(emat, scale=False) + + res = [int(grain_id), completeness, chisq] \ + + grain_params.tolist() \ + + evec.tolist() + + if self.array is not None: + row = self._array_row + assert row < self.array.shape[0], \ + f'invalid row {row} in array table' + self.array[row] = res + self._array_row += 1 + return res + + # (else) format and write to file + output_str = self._delim.join( + [self._delim.join( + ['{:<12d}', '{:<12f}', '{:<12e}'] + ).format(*res[:3]), + self._delim.join( + np.tile('{:<23.16e}', len(res) - 3) + ).format(*res[3:])] + ) + print(output_str, file=self.fid) + return output_str + + +class GrainDataWriter_h5(object): + """Class for dumping grain results to an HDF5 archive. + + TODO: add material spec + """ + + def __init__(self, filename, instr_cfg, grain_params, use_attr=False): + if isinstance(filename, h5py.File): + self.fid = filename + else: + self.fid = h5py.File(filename + ".hdf5", "w") + icfg = dict(instr_cfg) + + # add instrument groups and attributes + self.instr_grp = self.fid.create_group('instrument') + unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr) + + # add grain group + self.grain_grp = self.fid.create_group('grain') + rmat_c = make_rmat_of_expmap(grain_params[:3]) + tvec_c = np.array(grain_params[3:6]).flatten() + vinv_s = np.array(grain_params[6:]).flatten() + vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) + + if use_attr: # attribute version + self.grain_grp.attrs.create('rmat_c', rmat_c) + self.grain_grp.attrs.create('tvec_c', tvec_c) + self.grain_grp.attrs.create('inv(V)_s', vinv_s) + self.grain_grp.attrs.create('vmat_s', vmat_s) + else: # dataset version + self.grain_grp.create_dataset('rmat_c', data=rmat_c) + self.grain_grp.create_dataset('tvec_c', data=tvec_c) + self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) + self.grain_grp.create_dataset('vmat_s', data=vmat_s) + + data_key = 'reflection_data' + self.data_grp = self.fid.create_group(data_key) + + for det_key in self.instr_grp['detectors'].keys(): + self.data_grp.create_group(det_key) + + # FIXME: throws exception when called after close method + # def __del__(self): + # self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, panel_id, + i_refl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, ome_centers, + xy_centers, ijs, frame_indices, + spot_data, pangs, pxy, mangs, mxy, gzip=1): + """ + to be called inside loop over patches + + default GZIP level for data arrays is 1 + """ + fi = np.array(frame_indices, dtype=int) + + panel_grp = self.data_grp[panel_id] + spot_grp = panel_grp.create_group("spot_%05d" % i_refl) + spot_grp.attrs.create('peak_id', int(peak_id)) + spot_grp.attrs.create('hkl_id', int(hkl_id)) + spot_grp.attrs.create('hkl', np.array(hkl, dtype=int)) + spot_grp.attrs.create('predicted_angles', pangs) + spot_grp.attrs.create('predicted_xy', pxy) + if mangs is None: + mangs = np.nan*np.ones(3) + spot_grp.attrs.create('measured_angles', mangs) + if mxy is None: + mxy = np.nan*np.ones(3) + spot_grp.attrs.create('measured_xy', mxy) + + # get centers crds from edge arrays + # FIXME: export full coordinate arrays, or just center vectors??? + # + # ome_crd, eta_crd, tth_crd = np.meshgrid( + # ome_centers, + # centers_of_edge_vec(eta_edges), + # centers_of_edge_vec(tth_edges), + # indexing='ij') + # + # ome_dim, eta_dim, tth_dim = spot_data.shape + + # !!! for now just exporting center vectors for spot_data + tth_crd = centers_of_edge_vec(tth_edges) + eta_crd = centers_of_edge_vec(eta_edges) + + shuffle_data = True # reduces size by 20% + spot_grp.create_dataset('tth_crd', data=tth_crd, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('eta_crd', data=eta_crd, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('ome_crd', data=ome_centers, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('xy_centers', data=xy_centers, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('ij_centers', data=ijs, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('frame_indices', data=fi, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('intensities', data=spot_data, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + return + + +class GenerateEtaOmeMaps(object): + """ + eta-ome map class derived from new image_series and YAML config + + ...for now... + + must provide: + + self.dataStore + self.planeData + self.iHKLList + self.etaEdges # IN RADIANS + self.omeEdges # IN RADIANS + self.etas # IN RADIANS + self.omegas # IN RADIANS + + """ + + def __init__(self, image_series_dict, instrument, plane_data, + active_hkls=None, eta_step=0.25, threshold=None, + ome_period=(0, 360)): + """ + image_series must be OmegaImageSeries class + instrument_params must be a dict (loaded from yaml spec) + active_hkls must be a list (required for now) + + FIXME: get rid of omega period; should get it from imageseries + """ + + self._planeData = plane_data + + # ???: change name of iHKLList? + # ???: can we change the behavior of iHKLList? + if active_hkls is None: + self._iHKLList = plane_data.getHKLID( + plane_data.hkls, master=True + ) + n_rings = len(self._iHKLList) + else: + assert hasattr(active_hkls, '__len__'), \ + "active_hkls must be an iterable with __len__" + self._iHKLList = active_hkls + n_rings = len(active_hkls) + + # grab a det key and corresponding imageseries (first will do) + # !!! assuming that the imageseries for all panels + # have the same length and omegas + det_key, this_det_ims = next(iter(image_series_dict.items())) + + # handle omegas + # !!! for multi wedge, enforncing monotonicity + # !!! wedges also cannot overlap or span more than 360 + omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES + delta_ome = omegas_array[0][-1] - omegas_array[0][0] + frame_mask = None + ome_period = omegas_array[0, 0] + np.r_[0., 360.] # !!! be careful + if this_det_ims.omegawedges.nwedges > 1: + delta_omes = [(i['ostop'] - i['ostart'])/i['nsteps'] + for i in this_det_ims.omegawedges.wedges] + check_wedges = mutil.uniqueVectors(np.atleast_2d(delta_omes), + tol=1e-6).squeeze() + assert check_wedges.size == 1, \ + "all wedges must have the same delta omega to 1e-6" + # grab representative delta ome + # !!! assuming positive delta consistent with OmegaImageSeries + delta_ome = delta_omes[0] + + # grab full-range start/stop + # !!! be sure to map to the same period to enable arithmatic + # ??? safer to do this way rather than just pulling from + # the omegas attribute? + owedges = this_det_ims.omegawedges.wedges + ostart = owedges[0]['ostart'] # !!! DEGREES + ostop = float( + mapAngle(owedges[-1]['ostop'], ome_period, units='degrees') + ) + # compute total nsteps + # FIXME: need check for roundoff badness + nsteps = int((ostop - ostart)/delta_ome) + ome_edges_full = np.linspace( + ostart, ostop, num=nsteps+1, endpoint=True + ) + omegas_array = np.vstack( + [ome_edges_full[:-1], ome_edges_full[1:]] + ).T + ome_centers = np.average(omegas_array, axis=1) + + # use OmegaImageSeries method to determine which bins have data + # !!! this array has -1 outside a wedge + # !!! again assuming the valid frame order increases monotonically + frame_mask = np.array( + [this_det_ims.omega_to_frame(ome)[0] != -1 + for ome in ome_centers] + ) + + # ???: need to pass a threshold? + eta_mapping, etas = instrument.extract_polar_maps( + plane_data, image_series_dict, + active_hkls=active_hkls, threshold=threshold, + tth_tol=None, eta_tol=eta_step) + + # for convenience grab map shape from first + map_shape = next(iter(eta_mapping.values())).shape[1:] + + # pack all detectors with masking + # FIXME: add omega masking + data_store = [] + for i_ring in range(n_rings): + # first handle etas + full_map = np.zeros(map_shape, dtype=float) + nan_mask_full = np.zeros( + (len(eta_mapping), map_shape[0], map_shape[1]) + ) + i_p = 0 + for det_key, eta_map in eta_mapping.items(): + nan_mask = ~np.isnan(eta_map[i_ring]) + nan_mask_full[i_p] = nan_mask + full_map[nan_mask] += eta_map[i_ring][nan_mask] + i_p += 1 + re_nan_these = np.sum(nan_mask_full, axis=0) == 0 + full_map[re_nan_these] = np.nan + + # now omegas + if frame_mask is not None: + # !!! must expand row dimension to include + # skipped omegas + tmp = np.ones((len(frame_mask), map_shape[1]))*np.nan + tmp[frame_mask, :] = full_map + full_map = tmp + data_store.append(full_map) + self._dataStore = data_store + + # set required attributes + self._omegas = mapAngle( + np.radians(np.average(omegas_array, axis=1)), + np.radians(ome_period) + ) + self._omeEdges = mapAngle( + np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), + np.radians(ome_period) + ) + + # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the + # indexer to work properly + if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf: + # !!! SIGNED delta ome + del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0]) + self._omeEdges[-1] = self._omeEdges[-2] + del_ome + + # handle etas + # WARNING: unlinke the omegas in imageseries metadata, + # these are in RADIANS and represent bin centers + self._etaEdges = etas + self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + + @property + def dataStore(self): + return self._dataStore + + @property + def planeData(self): + return self._planeData + + @property + def iHKLList(self): + return np.atleast_1d(self._iHKLList).flatten() + + @property + def etaEdges(self): + return self._etaEdges + + @property + def omeEdges(self): + return self._omeEdges + + @property + def etas(self): + return self._etas + + @property + def omegas(self): + return self._omegas + + def save(self, filename): + xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename) + + +def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): + # mark pixels in the spec'd tth range + pixels_in_tthr = np.logical_and( + ptth >= tthr[0], ptth <= tthr[1] + ) + + # catch case where ring isn't on detector + if not np.any(pixels_in_tthr): + return None + + pixel_ids = np.where(pixels_in_tthr) + + # grab relevant eta coords using histogram + pixel_etas = peta[pixel_ids] + reta_hist = histogram(pixel_etas, eta_edges) + bins_on_detector = np.where(reta_hist)[0] + + return pixel_etas, eta_edges, pixel_ids, bins_on_detector + + +def run_fast_histogram(x, bins, weights=None): + return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), + weights=weights) + + +def run_numpy_histogram(x, bins, weights=None): + return histogram1d(x, bins=bins, weights=weights)[0] + + +histogram = run_fast_histogram if fast_histogram else run_numpy_histogram + + +def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): + for i_row in range(*rows): + image = ims[i_row] + + # handle threshold if specified + if threshold is not None: + # !!! NaNs get preserved + image = np.array(image) + image[image < threshold] = 0. + + for i_r, tthr in enumerate(tth_ranges): + this_map = ring_maps[i_r] + params = ring_params[i_r] + if not params: + # We are supposed to skip this ring... + continue + + # Unpack the params + pixel_etas, eta_edges, pixel_ids, bins_on_detector = params + result = histogram(pixel_etas, eta_edges, weights=image[pixel_ids]) + + # Note that this preserves nan values for bins not on the detector. + this_map[i_row, bins_on_detector] = result[bins_on_detector] + + +def _extract_detector_line_positions(iter_args, plane_data, tth_tol, + eta_tol, eta_centers, npdiv, + collapse_tth, collapse_eta, + do_interpolation, do_fitting, + fitting_kwargs, tth_distortion, + max_workers): + panel, instr_cfg, images, pbp = iter_args + + if images.ndim == 2: + images = np.tile(images, (1, 1, 1)) + elif images.ndim != 3: + raise RuntimeError("images must be 2- or 3-d") + + # make rings + # !!! adding tth_distortion pass-through; comes in as dict over panels + tth_distr_cls = None + if tth_distortion is not None: + tth_distr_cls = tth_distortion[panel.name] + + pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( + plane_data, merge_hkls=True, + delta_tth=tth_tol, delta_eta=eta_tol, + eta_list=eta_centers, tth_distortion=tth_distr_cls) + + tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) + + # !!! this is only needed if doing fitting + if isinstance(plane_data, PlaneData): + tth_idx, tth_ranges = plane_data.getMergedRanges(cullDupl=True) + tth_ref = plane_data.getTTh() + tth0 = [np.degrees(tth_ref[i]) for i in tth_idx] + else: + tth0 = plane_data + + # ================================================================= + # LOOP OVER RING SETS + # ================================================================= + pbar_rings = partial(tqdm, total=len(pow_angs), desc="Ringset", + position=pbp) + + kwargs = { + 'instr_cfg': instr_cfg, + 'panel': panel, + 'eta_tol': eta_tol, + 'npdiv': npdiv, + 'collapse_tth': collapse_tth, + 'collapse_eta': collapse_eta, + 'images': images, + 'do_interpolation': do_interpolation, + 'do_fitting': do_fitting, + 'fitting_kwargs': fitting_kwargs, + 'tth_distortion': tth_distr_cls, + } + func = partial(_extract_ring_line_positions, **kwargs) + iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) + with ProcessPoolExecutor(mp_context=constants.mp_context, + max_workers=max_workers) as executor: + return list(pbar_rings(executor.map(func, iter_arg))) + + +def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, + collapse_tth, collapse_eta, images, + do_interpolation, do_fitting, fitting_kwargs, + tth_distortion): + """ + Extracts data for a single Debye-Scherrer ring . + + Parameters + ---------- + iter_args : tuple + (angs [radians], + xys [mm], + tth_tol [deg], + this_tth0 [deg]) + instr_cfg : TYPE + DESCRIPTION. + panel : TYPE + DESCRIPTION. + eta_tol : TYPE + DESCRIPTION. + npdiv : TYPE + DESCRIPTION. + collapse_tth : TYPE + DESCRIPTION. + collapse_eta : TYPE + DESCRIPTION. + images : TYPE + DESCRIPTION. + do_interpolation : TYPE + DESCRIPTION. + do_fitting : TYPE + DESCRIPTION. + fitting_kwargs : TYPE + DESCRIPTION. + tth_distortion : TYPE + DESCRIPTION. + + Yields + ------ + patch_data : TYPE + DESCRIPTION. + + """ + # points are already checked to fall on detector + angs, xys, tth_tol, this_tth0 = iter_args + + # SS 01/31/25 noticed some nans in xys even after clipping + # going to do another round of masking to get rid of those + nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) + nan_mask = np.logical_or.reduce(nan_mask, 1) + if angs.ndim > 1 and xys.ndim > 1: + angs = angs[nan_mask,:] + xys = xys[nan_mask, :] + + n_images = len(images) + native_area = panel.pixel_area + + # make the tth,eta patches for interpolation + patches = xrdutil.make_reflection_patches( + instr_cfg, angs, panel.angularPixelSize(xys), + tth_tol=tth_tol, eta_tol=eta_tol, npdiv=npdiv, quiet=True) + + # loop over patches + # FIXME: fix initialization + if collapse_tth: + patch_data = np.zeros((len(angs), n_images)) + else: + patch_data = [] + for i_p, patch in enumerate(patches): + # strip relevant objects out of current patch + vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch + + # need to reshape eval pts for interpolation + xy_eval = np.vstack([ + xys_eval[0].flatten(), + xys_eval[1].flatten()]).T + + _, on_panel = panel.clip_to_panel(xy_eval) + + if np.any(~on_panel): + continue + + if collapse_tth: + ang_data = (vtx_angs[0][0, [0, -1]], + vtx_angs[1][[0, -1], 0]) + elif collapse_eta: + # !!! yield the tth bin centers + tth_centers = np.average( + np.vstack( + [vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]] + ), + axis=0 + ) + ang_data = (tth_centers, + angs[i_p][-1]) + if do_fitting: + fit_data = [] + else: + ang_data = vtx_angs + + prows, pcols = areas.shape + area_fac = areas/float(native_area) + + # interpolate + if not collapse_tth: + ims_data = [] + for j_p in np.arange(len(images)): + # catch interpolation type + image = images[j_p] + if do_interpolation: + p_img = panel.interpolate_bilinear( + xy_eval, + image, + ).reshape(prows, pcols)*area_fac + else: + p_img = image[ijs[0], ijs[1]]*area_fac + + # catch flat spectrum data, which will cause + # fitting to fail. + # ???: best here, or make fitting handle it? + mxval = np.max(p_img) + mnval = np.min(p_img) + if mxval == 0 or (1. - mnval/mxval) < 0.01: + continue + + # catch collapsing options + if collapse_tth: + patch_data[i_p, j_p] = np.average(p_img) + # ims_data.append(np.sum(p_img)) + else: + if collapse_eta: + lineout = np.average(p_img, axis=0) + ims_data.append(lineout) + if do_fitting: + if tth_distortion is not None: + # must correct tth0 + tmp = tth_distortion.apply( + panel.angles_to_cart( + np.vstack( + [np.radians(this_tth0), + np.tile(ang_data[-1], len(this_tth0))] + ).T + ), + return_nominal=True) + pk_centers = np.degrees(tmp[:, 0]) + else: + pk_centers = this_tth0 + kwargs = { + 'tth_centers': np.degrees(tth_centers), + 'lineout': lineout, + 'tth_pred': pk_centers, + **fitting_kwargs, + } + result = fit_ring(**kwargs) + fit_data.append(result) + else: + ims_data.append(p_img) + if not collapse_tth: + output = [ang_data, ims_data] + if do_fitting: + output.append(fit_data) + patch_data.append(output) + + return patch_data + + +DETECTOR_TYPES = { + 'planar': PlanarDetector, + 'cylindrical': CylindricalDetector, +} + + +class BufferShapeMismatchError(RuntimeError): + # This is raised when the buffer shape does not match the detector shape + pass + + +@contextmanager +def switch_xray_source(instr: HEDMInstrument, xray_source: Optional[str]): + if xray_source is None: + # If the x-ray source is None, leave it as the current active one + yield + return + + prev_beam_name = instr.active_beam_name + instr.active_beam_name = xray_source + try: + yield + finally: + instr.active_beam_name = prev_beam_name diff --git a/hexrd/hedm/instrument/physics_package.py b/hexrd/hedm/instrument/physics_package.py new file mode 100644 index 000000000..d5837d99a --- /dev/null +++ b/hexrd/hedm/instrument/physics_package.py @@ -0,0 +1,295 @@ +from abc import abstractmethod +import numpy as np +from hexrd.material.utils import calculate_linear_absorption_length + + +class AbstractPhysicsPackage: + """abstract class for the physics package. + there will be two separate physics package class + types -- one for HED samples and the other for + HEDM samples. + + Parameters + ---------- + sample_material : str or hexrd.material.Material + either the formula or a hexrd material instance + sample_density : float + density of sample material in g/cc + sample_thickness : float + sample thickness in microns + sample_geometry : FIXME + FIXME + pinhole_material : str or hexrd.material.Material, optional + either the formula or a hexrd material instance + pinhole_density : float + density of pinhole material in g/cc + pinhole_thickness : float + pinhole thickness in microns + pinhole_diameter : float + pinhole diameter in microns + window_material : str or hexrd.material.Material + either the formula or a hexrd material instance + window_density : float + density of window material in g/cc + window_thickness : float + window thickness in microns + + + Notes + ----- + [1] Rygg et al., X-ray diffraction at the National + Ignition Facility, Rev. Sci. Instrum. 91, 043902 (2020) + [2] M. Stoeckl, A. A. Solodov + Readout models for BaFBr0.85I0.15:Eu image plates + Rev. Sci. Instrum. 89, 063101 (2018 + """ + # Abstract methods that must be redefined in derived classes + @property + @abstractmethod + def type(self): + pass + + def __init__(self, + sample_material=None, + sample_density=None, + sample_thickness=None, + pinhole_material=None, + pinhole_density=None, + pinhole_thickness=None, + pinhole_diameter=None, + **kwargs + ): + self._sample_material = sample_material + self._sample_density = sample_density + self._sample_thickness = sample_thickness + self._pinhole_material = pinhole_material + self._pinhole_density = pinhole_density + self._pinhole_thickness = pinhole_thickness + self._pinhole_diameter = pinhole_diameter + + @property + def attributes_to_serialize(self): + return [ + 'sample_material', + 'sample_density', + 'sample_thickness', + 'pinhole_material', + 'pinhole_density', + 'pinhole_thickness', + 'pinhole_diameter', + ] + + @property + def sample_material(self): + return self._sample_material + + @sample_material.setter + def sample_material(self, material): + self._sample_material = material + + @property + def sample_density(self): + if self._sample_density is None: + return 0.0 + return self._sample_density + + @sample_density.setter + def sample_density(self, density): + self._sample_density = density + + @property + def sample_thickness(self): + if self._sample_thickness is None: + return 0.0 + return self._sample_thickness + + @sample_thickness.setter + def sample_thickness(self, value): + self._sample_thickness = value + + @property + def pinhole_material(self): + return self._pinhole_material + + @pinhole_material.setter + def pinhole_material(self, material): + self._pinhole_material = material + + @property + def pinhole_density(self): + if self._pinhole_density is None: + return 0.0 + return self._pinhole_density + + @pinhole_density.setter + def pinhole_density(self, density): + self._pinhole_density = density + + @property + def pinhole_thickness(self): + if self._pinhole_thickness is None: + return 0.0 + return self._pinhole_thickness + + @pinhole_thickness.setter + def pinhole_thickness(self, value): + self._pinhole_thickness = value + + @property + def pinhole_radius(self): + if self.pinhole_diameter is None: + return 0.0 + return 0.5 * self.pinhole_diameter + + @pinhole_radius.setter + def pinhole_radius(self, value): + self._pinhole_diameter = 2.0 * value + + @property + def pinhole_diameter(self): + if self._pinhole_diameter is None: + return 0.0 + return self._pinhole_diameter + + @pinhole_diameter.setter + def pinhole_diameter(self, value): + self._pinhole_diameter = value + + def absorption_length(self, energy, flag): + if isinstance(energy, float): + energy_inp = np.array([energy]) + elif isinstance(energy, list): + energy_inp = np.array(energy) + elif isinstance(energy, np.ndarray): + energy_inp = energy + + if flag.lower() == 'sample': + args = (self.sample_density, + self.sample_material, + energy_inp, + ) + elif flag.lower() == 'window': + args = (self.window_density, + self.window_material, + energy_inp, + ) + elif flag.lower() == 'pinhole': + args = (self.pinhole_density, + self.pinhole_material, + energy_inp, + ) + abs_length = calculate_linear_absorption_length(*args) + if abs_length.shape[0] == 1: + return abs_length[0] + else: + return abs_length + + def sample_absorption_length(self, energy): + return self.absorption_length(energy, 'sample') + + def pinhole_absorption_length(self, energy): + return self.absorption_length(energy, 'pinhole') + + def serialize(self): + return {a: getattr(self, a) for a in self.attributes_to_serialize} + + def deserialize(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) + + +class HEDPhysicsPackage(AbstractPhysicsPackage): + + def __init__(self, **pp_kwargs): + super().__init__(**pp_kwargs) + self._window_material = pp_kwargs.get('window_material', None) + self._window_density = pp_kwargs.get('window_density', None) + self._window_thickness = pp_kwargs.get('window_thickness', None) + + @property + def attributes_to_serialize(self): + return [ + 'sample_material', + 'sample_density', + 'sample_thickness', + 'pinhole_material', + 'pinhole_density', + 'pinhole_thickness', + 'pinhole_diameter', + 'window_material', + 'window_density', + 'window_thickness', + ] + + @property + def type(self): + return 'HED' + + @property + def window_material(self): + return self._window_material + + @window_material.setter + def window_material(self, material): + self._window_material = material + + @property + def window_density(self): + if self._window_density is None: + return 0.0 + return self._window_density + + @window_density.setter + def window_density(self, density): + self._window_density = density + + @property + def window_thickness(self): + if self._window_thickness is None: + return 0.0 + return self._window_thickness + + @window_thickness.setter + def window_thickness(self, thickness): + self._window_thickness = thickness + + def window_absorption_length(self, energy): + return self.absorption_length(energy, 'window') + + +class HEDMPhysicsPackage(AbstractPhysicsPackage): + + def __init__(self, **pp_kwargs): + super().__init__(**pp_kwargs) + self._sample_geometry = pp_kwargs.get('sample_geometry', None) + + @property + def attributes_to_serialize(self): + return [ + 'sample_material', + 'sample_density', + 'sample_thickness', + 'sample_geometry', + 'pinhole_material', + 'pinhole_density', + 'pinhole_thickness', + 'pinhole_diameter', + ] + + @property + def sample_geometry(self): + return self._sample_geometry + + @property + def sample_diameter(self): + if self.sample_geometry == 'cylinder': + return self._sample_thickness + else: + msg = (f'sample geometry does not have diameter ' + f'associated with it.') + print(msg) + return + + @property + def type(self): + return 'HEDM' diff --git a/hexrd/resources/detector_templates/__init__.py b/hexrd/hedm/ipfcolor/__init__.py similarity index 100% rename from hexrd/resources/detector_templates/__init__.py rename to hexrd/hedm/ipfcolor/__init__.py diff --git a/hexrd/ipfcolor/colorspace.py b/hexrd/hedm/ipfcolor/colorspace.py similarity index 100% rename from hexrd/ipfcolor/colorspace.py rename to hexrd/hedm/ipfcolor/colorspace.py diff --git a/hexrd/ipfcolor/sphere_sector.py b/hexrd/hedm/ipfcolor/sphere_sector.py similarity index 100% rename from hexrd/ipfcolor/sphere_sector.py rename to hexrd/hedm/ipfcolor/sphere_sector.py diff --git a/hexrd/hedm/material/crystallography.py b/hexrd/hedm/material/crystallography.py new file mode 100644 index 000000000..574225e67 --- /dev/null +++ b/hexrd/hedm/material/crystallography.py @@ -0,0 +1,2255 @@ +# -*- coding: utf-8 -*- +# ============================================================================= +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================================= +import re +import copy +import csv +import os +from math import pi +from typing import Optional, Union, Dict, List, Tuple + +import numpy as np + +from hexrd.material.unitcell import unitcell +from hexrd.deprecation import deprecated +from hexrd import constants +from hexrd.matrixutil import unitVector +from hexrd.rotations import ( + rotMatOfExpMap, + mapAngle, + applySym, + ltypeOfLaueGroup, + quatOfLaueGroup, +) +from hexrd.transforms import xfcapi +from hexrd import valunits +from hexrd.valunits import toFloat +from hexrd.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf + +"""module vars""" + +# units +dUnit = 'angstrom' +outputDegrees = False +outputDegrees_bak = outputDegrees + + +def hklToStr(hkl: np.ndarray) -> str: + """ + Converts hkl representation to a string. + + Parameters + ---------- + hkl : np.ndarray + 3 element list of h, k, and l values (Miller indices). + + Returns + ------- + str + Space-separated string representation of h, k, and l values. + + """ + return re.sub(r'[\[\]\(\)\{\},]', '', str(hkl)) + + +def tempSetOutputDegrees(val: bool) -> None: + """ + Set the global outputDegrees flag temporarily. Can be reverted with + revertOutputDegrees(). + + Parameters + ---------- + val : bool + True to output angles in degrees, False to output angles in radians. + + Returns + ------- + None + + """ + global outputDegrees, outputDegrees_bak + outputDegrees_bak = outputDegrees + outputDegrees = val + + +def revertOutputDegrees() -> None: + """ + Revert the effect of tempSetOutputDegrees(), resetting the outputDegrees + flag to its previous value (True to output in degrees, False for radians). + + Returns + ------- + None + """ + global outputDegrees, outputDegrees_bak + outputDegrees = outputDegrees_bak + + +def cosineXform( + a: np.ndarray, b: np.ndarray, c: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + """ + Spherical trig transform to take alpha, beta, gamma to expressions + for cos(alpha*). See ref below. + + [1] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector algebra and + the relations between direct and reciprocal lattice quantities''. Acta + Cryst. (1968), A24, 247--248 + + Parameters + ---------- + a : np.ndarray + List of alpha angle values (radians). + b : np.ndarray + List of beta angle values (radians). + c : np.ndarray + List of gamma angle values (radians). + + Returns + ------- + np.ndarray + List of cos(alpha*) values. + np.ndarray + List of sin(alpha*) values. + + """ + cosar = (np.cos(b) * np.cos(c) - np.cos(a)) / (np.sin(b) * np.sin(c)) + sinar = np.sqrt(1 - cosar**2) + return cosar, sinar + + +def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: + """ + Convert an energy value to a wavelength. If argument has units of length + or energy, will convert to globally specified unit type for wavelength + (dUnit). If argument is a scalar, assumed input units are keV. + """ + if isinstance(arg, valunits.valWUnit): + # arg is a valunits.valWUnit object + if arg.isLength(): + return arg.getVal(dUnit) + elif arg.isEnergy(): + e = arg.getVal('keV') + return valunits.valWUnit( + 'wavelength', 'length', constants.keVToAngstrom(e), 'angstrom' + ).getVal(dUnit) + else: + raise RuntimeError('do not know what to do with ' + str(arg)) + else: + # !!! assuming arg is in keV + return valunits.valWUnit( + 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' + ).getVal(dUnit) + +def latticeParameters(lvec): + """ + Generates direct and reciprocal lattice vector components in a + crystal-relative RHON basis, X. The convention for fixing X to the + lattice is such that a || x1 and c* || x3, where a and c* are + direct and reciprocal lattice vectors, respectively. + """ + lnorm = np.sqrt(np.sum(lvec**2, 0)) + + a = lnorm[0] + b = lnorm[1] + c = lnorm[2] + + ahat = lvec[:, 0] / a + bhat = lvec[:, 1] / b + chat = lvec[:, 2] / c + + gama = np.arccos(np.dot(ahat, bhat)) + beta = np.arccos(np.dot(ahat, chat)) + alfa = np.arccos(np.dot(bhat, chat)) + if outputDegrees: + gama = r2d * gama + beta = r2d * beta + alfa = r2d * alfa + + return [a, b, c, alfa, beta, gama] + +def latticePlanes( + hkls: np.ndarray, + lparms: np.ndarray, + ltype: Optional[str] = 'cubic', + wavelength: Optional[float] = 1.54059292, + strainMag: Optional[float] = None, +) -> Dict[str, np.ndarray]: + """ + Generates lattice plane data in the direct lattice for a given set + of Miller indices. Vector components are written in the + crystal-relative RHON basis, X. The convention for fixing X to the + lattice is such that a || x1 and c* || x3, where a and c* are + direct and reciprocal lattice vectors, respectively. + + USAGE: + + planeInfo = latticePlanes(hkls, lparms, **kwargs) + + INPUTS: + + 1) hkls (3 x n float ndarray) is the array of Miller indices for + the planes of interest. The vectors are assumed to be + concatenated along the 1-axis (horizontal). + + 2) lparms (1 x m float list) is the array of lattice parameters, + where m depends on the symmetry group (see below). + + The following optional arguments are recognized: + + 3) ltype=(string) is a string representing the symmetry type of + the implied Laue group. The 11 available choices are shown + below. The default value is 'cubic'. Note that each group + expects a lattice parameter array of the indicated length + and order. + + latticeType lparms + ----------- ------------ + 'cubic' a + 'hexagonal' a, c + 'trigonal' a, c + 'rhombohedral' a, alpha (in degrees) + 'tetragonal' a, c + 'orthorhombic' a, b, c + 'monoclinic' a, b, c, beta (in degrees) + 'triclinic' a, b, c, alpha, beta, gamma (in degrees) + + 4) wavelength= is a value represented the wavelength in + Angstroms to calculate bragg angles for. The default value + is for Cu K-alpha radiation (1.54059292 Angstrom) + + 5) strainMag=None + + OUTPUTS: + + 1) planeInfo is a dictionary containing the following keys/items: + + normals (3, n) double array array of the components to the + unit normals for each {hkl} in + X (horizontally concatenated) + + dspacings (n, ) double array array of the d-spacings for + each {hkl} + + tThetas (n, ) double array array of the Bragg angles for + each {hkl} relative to the + specified wavelength + + NOTES: + + *) This function is effectively a wrapper to 'latticeVectors'. + See 'help(latticeVectors)' for additional info. + + *) Lattice plane d-spacings are calculated from the reciprocal + lattice vectors specified by {hkl} as shown in Appendix 1 of + [1]. + + REFERENCES: + + [1] B. D. Cullity, ``Elements of X-Ray Diffraction, 2 + ed.''. Addison-Wesley Publishing Company, Inc., 1978. ISBN + 0-201-01174-3 + + """ + location = 'latticePlanes' + + assert ( + hkls.shape[0] == 3 + ), f"hkls aren't column vectors in call to '{location}'!" + + tag = ltype + wlen = wavelength + + # get B + L = latticeVectors(lparms, tag) + + # get G-vectors -- reciprocal vectors in crystal frame + G = np.dot(L['B'], hkls) + + # magnitudes + d = 1 / np.sqrt(np.sum(G**2, 0)) + + aconv = 1.0 + if outputDegrees: + aconv = r2d + + # two thetas + sth = wlen / 2.0 / d + mask = np.abs(sth) < 1.0 + tth = np.zeros(sth.shape) + + tth[~mask] = np.nan + tth[mask] = aconv * 2.0 * np.arcsin(sth[mask]) + + p = dict(normals=unitVector(G), dspacings=d, tThetas=tth) + + if strainMag is not None: + p['tThetasLo'] = np.zeros(sth.shape) + p['tThetasHi'] = np.zeros(sth.shape) + + mask = (np.abs(wlen / 2.0 / (d * (1.0 + strainMag))) < 1.0) & ( + np.abs(wlen / 2.0 / (d * (1.0 - strainMag))) < 1.0 + ) + + p['tThetasLo'][~mask] = np.nan + p['tThetasHi'][~mask] = np.nan + + p['tThetasLo'][mask] = ( + aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 + strainMag))) + ) + p['tThetasHi'][mask] = ( + aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 - strainMag))) + ) + + return p + + +def latticeVectors( + lparms: np.ndarray, + tag: Optional[str] = 'cubic', + radians: Optional[bool] = False, +) -> Dict[str, Union[np.ndarray, float]]: + """ + Generates direct and reciprocal lattice vector components in a + crystal-relative RHON basis, X. The convention for fixing X to the + lattice is such that a || x1 and c* || x3, where a and c* are + direct and reciprocal lattice vectors, respectively. + + USAGE: + + lattice = LatticeVectors(lparms, ) + + INPUTS: + + 1) lparms (1 x n float list) is the array of lattice parameters, + where n depends on the symmetry group (see below). + + 2) tag (string) is a case-insensitive string representing the + symmetry type of the implied Laue group. The 11 available choices + are shown below. The default value is 'cubic'. Note that each + group expects a lattice parameter array of the indicated length + and order. + + latticeType lparms + ----------- ------------ + 'cubic' a + 'hexagonal' a, c + 'trigonal' a, c + 'rhombohedral' a, alpha (in degrees) + 'tetragonal' a, c + 'orthorhombic' a, b, c + 'monoclinic' a, b, c, beta (in degrees) + 'triclinic' a, b, c, alpha, beta, gamma (in degrees) + + The following optional arguments are recognized: + + 3) radians= is a boolean flag indicating usage of radians rather + than degrees, defaults to false. + + OUTPUTS: + + 1) lattice is a dictionary containing the following keys/items: + + F (3, 3) double array transformation matrix taking + componenents in the direct + lattice (i.e. {uvw}) to the + reference, X + + B (3, 3) double array transformation matrix taking + componenents in the reciprocal + lattice (i.e. {hkl}) to X + + BR (3, 3) double array transformation matrix taking + componenents in the reciprocal + lattice to the Fable reference + frame (see notes) + + U0 (3, 3) double array transformation matrix + (orthogonal) taking + componenents in the + Fable reference frame to X + + vol double the unit cell volume + + + dparms (6, ) double list the direct lattice parameters: + [a b c alpha beta gamma] + + rparms (6, ) double list the reciprocal lattice + parameters: + [a* b* c* alpha* beta* gamma*] + + NOTES: + + *) The conventions used for assigning a RHON basis, + X -> {x1, x2, x3}, to each point group are consistent with + those published in Appendix B of [1]. Namely: a || x1 and + c* || x3. This differs from the convention chosen by the Fable + group, where a* || x1 and c || x3 [2]. + + *) The unit cell angles are defined as follows: + alpha=acos(b'*c/|b||c|), beta=acos(c'*a/|c||a|), and + gamma=acos(a'*b/|a||b|). + + *) The reciprocal lattice vectors are calculated using the + crystallographic convention, where the prefactor of 2*pi is + omitted. In this convention, the reciprocal lattice volume is + 1/V. + + *) Several relations from [3] were employed in the component + calculations. + + REFERENCES: + + [1] J. F. Nye, ``Physical Properties of Crystals: Their + Representation by Tensors and Matrices''. Oxford University + Press, 1985. ISBN 0198511655 + + [2] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, + ``Tracking: a method for structural characterization of grains + in powders or polycrystals''. J. Appl. Cryst. (2001). 34, + 744--750 + + [3] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector + algebra and the relations between direct and reciprocal + lattice quantities''. Acta Cryst. (1968), A24, 247--248 + + + """ + + # build index for sorting out lattice parameters + lattStrings = [ + 'cubic', + 'hexagonal', + 'trigonal', + 'rhombohedral', + 'tetragonal', + 'orthorhombic', + 'monoclinic', + 'triclinic', + ] + + if radians: + aconv = 1.0 + else: + aconv = pi / 180.0 # degToRad + deg90 = pi / 2.0 + deg120 = 2.0 * pi / 3.0 + # + if tag == lattStrings[0]: + # cubic + cellparms = np.r_[np.tile(lparms[0], (3,)), deg90 * np.ones((3,))] + elif tag == lattStrings[1] or tag == lattStrings[2]: + # hexagonal | trigonal (hex indices) + cellparms = np.r_[ + lparms[0], lparms[0], lparms[1], deg90, deg90, deg120 + ] + elif tag == lattStrings[3]: + # rhombohedral + cellparms = np.r_[ + np.tile(lparms[0], (3,)), np.tile(aconv * lparms[1], (3,)) + ] + elif tag == lattStrings[4]: + # tetragonal + cellparms = np.r_[lparms[0], lparms[0], lparms[1], deg90, deg90, deg90] + elif tag == lattStrings[5]: + # orthorhombic + cellparms = np.r_[lparms[0], lparms[1], lparms[2], deg90, deg90, deg90] + elif tag == lattStrings[6]: + # monoclinic + cellparms = np.r_[ + lparms[0], lparms[1], lparms[2], deg90, aconv * lparms[3], deg90 + ] + elif tag == lattStrings[7]: + # triclinic + cellparms = np.r_[ + lparms[0], + lparms[1], + lparms[2], + aconv * lparms[3], + aconv * lparms[4], + aconv * lparms[5], + ] + else: + raise RuntimeError(f'lattice tag "{tag}" is not recognized') + + alpha, beta, gamma = cellparms[3:6] + cosalfar, sinalfar = cosineXform(alpha, beta, gamma) + + a = cellparms[0] * np.r_[1, 0, 0] + b = cellparms[1] * np.r_[np.cos(gamma), np.sin(gamma), 0] + c = ( + cellparms[2] + * np.r_[ + np.cos(beta), -cosalfar * np.sin(beta), sinalfar * np.sin(beta) + ] + ) + + ad = np.sqrt(np.sum(a**2)) + bd = np.sqrt(np.sum(b**2)) + cd = np.sqrt(np.sum(c**2)) + + # Cell volume + V = np.dot(a, np.cross(b, c)) + + # F takes components in the direct lattice to X + F = np.c_[a, b, c] + + # Reciprocal lattice vectors + astar = np.cross(b, c) / V + bstar = np.cross(c, a) / V + cstar = np.cross(a, b) / V + + # and parameters + ar = np.sqrt(np.sum(astar**2)) + br = np.sqrt(np.sum(bstar**2)) + cr = np.sqrt(np.sum(cstar**2)) + + alfar = np.arccos(np.dot(bstar, cstar) / br / cr) + betar = np.arccos(np.dot(cstar, astar) / cr / ar) + gamar = np.arccos(np.dot(astar, bstar) / ar / br) + + # B takes components in the reciprocal lattice to X + B = np.c_[astar, bstar, cstar] + + cosalfar2, sinalfar2 = cosineXform(alfar, betar, gamar) + + afable = ar * np.r_[1, 0, 0] + bfable = br * np.r_[np.cos(gamar), np.sin(gamar), 0] + cfable = ( + cr + * np.r_[ + np.cos(betar), + -cosalfar2 * np.sin(betar), + sinalfar2 * np.sin(betar), + ] + ) + + BR = np.c_[afable, bfable, cfable] + U0 = np.dot(B, np.linalg.inv(BR)) + if outputDegrees: + dparms = np.r_[ad, bd, cd, r2d * np.r_[alpha, beta, gamma]] + rparms = np.r_[ar, br, cr, r2d * np.r_[alfar, betar, gamar]] + else: + dparms = np.r_[ad, bd, cd, np.r_[alpha, beta, gamma]] + rparms = np.r_[ar, br, cr, np.r_[alfar, betar, gamar]] + + return { + 'F': F, + 'B': B, + 'BR': BR, + 'U0': U0, + 'vol': V, + 'dparms': dparms, + 'rparms': rparms, + } + +def hexagonalIndicesFromRhombohedral(hkl): + """ + converts rhombohedral hkl to hexagonal indices + """ + HKL = np.zeros((3, hkl.shape[1]), dtype='int') + + HKL[0, :] = hkl[0, :] - hkl[1, :] + HKL[1, :] = hkl[1, :] - hkl[2, :] + HKL[2, :] = hkl[0, :] + hkl[1, :] + hkl[2, :] + + return HKL + + +def rhombohedralIndicesFromHexagonal(HKL): + """ + converts hexagonal hkl to rhombohedral indices + """ + hkl = np.zeros((3, HKL.shape[1]), dtype='int') + + hkl[0, :] = 2 * HKL[0, :] + HKL[1, :] + HKL[2, :] + hkl[1, :] = -HKL[0, :] + HKL[1, :] + HKL[2, :] + hkl[2, :] = -HKL[0, :] - 2 * HKL[1, :] + HKL[2, :] + + hkl = hkl / 3.0 + return hkl + + +def rhombohedralParametersFromHexagonal(a_h, c_h): + """ + converts hexagonal lattice parameters (a, c) to rhombohedral + lattice parameters (a, alpha) + """ + a_r = np.sqrt(3 * a_h**2 + c_h**2) / 3.0 + alfa_r = 2 * np.arcsin(3.0 / (2 * np.sqrt(3 + (c_h / a_h) ** 2))) + if outputDegrees: + alfa_r = r2d * alfa_r + return a_r, alfa_r + + +def convert_Miller_direction_to_cartesian(uvw, a=1.0, c=1.0, normalize=False): + """ + Converts 3-index hexagonal Miller direction indices to components in the + crystal reference frame. + Parameters + ---------- + uvw : array_like + The (n, 3) array of 3-index hexagonal indices to convert. + a : scalar, optional + The `a` lattice parameter. The default value is 1. + c : scalar, optional + The `c` lattice parameter. The default value is 1. + normalize : bool, optional + Flag for whether or not to normalize output vectors + Returns + ------- + numpy.ndarray + The (n, 3) array of cartesian components associated with the input + direction indices. + Notes + ----- + 1) The [uv.w] the Miller-Bravais convention is in the hexagonal basis + {a1, a2, a3, c}. The basis for the output, {o1, o2, o3}, is + chosen such that + o1 || a1 + o3 || c + o2 = o3 ^ o1 + """ + u, v, w = np.atleast_2d(uvw).T + retval = np.vstack([1.5 * u * a, sqrt3by2 * a * (2 * v + u), w * c]) + if normalize: + return unitVector(retval).T + else: + return retval.T + + +def convert_Miller_direction_to_MillerBravias(uvw, suppress_redundant=True): + """ + Converts 3-index hexagonal Miller direction indices to 4-index + Miller-Bravais direction indices. + Parameters + ---------- + uvw : array_like + The (n, 3) array of 3-index hexagonal Miller indices to convert. + suppress_redundant : bool, optional + Flag to suppress the redundant 3rd index. The default is True. + Returns + ------- + numpy.ndarray + The (n, 3) or (n, 4) array -- depending on kwarg -- of Miller-Bravis + components associated with the input Miller direction indices. + Notes + ----- + * NOT for plane normals!!! + """ + u, v, w = np.atleast_2d(uvw).T + retval = np.vstack([(2 * u - v) / 3, (2 * v - u) / 3, w]).T + rem = np.vstack([np.mod(np.tile(i[0], 2), i[1:]) for i in retval]) + rem[abs(rem) < epsf] = np.nan + lcm = np.nanmin(rem, axis=1) + lcm[np.isnan(lcm)] = 1 + retval = retval / np.tile(lcm, (3, 1)).T + if suppress_redundant: + return retval + else: + t = np.atleast_2d(1 - np.sum(retval[:2], axis=1)).T + return np.hstack([retval[:, :2], t, np.atleast_2d(retval[:, 2]).T]) + + +def convert_MillerBravias_direction_to_Miller(UVW): + """ + Converts 4-index hexagonal Miller-Bravais direction indices to + 3-index Miller direction indices. + Parameters + ---------- + UVW : array_like + The (n, 3) array of **non-redundant** Miller-Bravais direction indices + to convert. + Returns + ------- + numpy.ndarray + The (n, 3) array of Miller direction indices associated with the + input Miller-Bravais indices. + Notes + ----- + * NOT for plane normals!!! + """ + U, V, W = np.atleast_2d(UVW).T + return np.vstack([2 * U + V, 2 * V + U, W]) + + +class PlaneData(object): + """ + Careful with ordering: Outputs are ordered by the 2-theta for the + hkl unless you get self._hkls directly, and this order can change + with changes in lattice parameters (lparms); setting and getting + exclusions works on the current hkl ordering, not the original + ordering (in self._hkls), but exclusions are stored in the + original ordering in case the hkl ordering does change with + lattice parameters + + if not None, tThWidth takes priority over strainMag in setting + two-theta ranges; changing strainMag automatically turns off + tThWidth + """ + + def __init__(self, hkls: Optional[np.ndarray], *args, **kwargs) -> None: + """ + Constructor for PlaneData + + Parameters + ---------- + hkls : np.ndarray + Miller indices to be used in the plane data. Can be None if + args is another PlaneData object + + *args + Unnamed arguments. Could be in the format of `lparms, laueGroup, + wavelength, strainMag`, or just a `PlaneData` object. + + **kwargs + Valid keyword arguments include: + - doTThSort + - exclusions + - tThMax + - tThWidth + """ + self._doTThSort = True + self._exclusions = None + self._tThMax = None + + if len(args) == 4: + lparms, laueGroup, wavelength, strainMag = args + tThWidth = None + self._wavelength = processWavelength(wavelength) + self._lparms = self._parseLParms(lparms) + elif len(args) == 1 and isinstance(args[0], PlaneData): + other = args[0] + lparms, laueGroup, wavelength, strainMag, tThWidth = ( + other.getParams() + ) + self._wavelength = wavelength + self._lparms = lparms + self._doTThSort = other._doTThSort + self._exclusions = other._exclusions + self._tThMax = other._tThMax + if hkls is None: + hkls = other._hkls + else: + raise NotImplementedError(f'args : {args}') + + self._laueGroup = laueGroup + self._hkls = copy.deepcopy(hkls) + self._strainMag = strainMag + self._structFact = np.ones(self._hkls.shape[1]) + self.tThWidth = tThWidth + + # ... need to implement tThMin too + if 'doTThSort' in kwargs: + self._doTThSort = kwargs.pop('doTThSort') + if 'exclusions' in kwargs: + self._exclusions = kwargs.pop('exclusions') + if 'tThMax' in kwargs: + self._tThMax = toFloat(kwargs.pop('tThMax'), 'radians') + if 'tThWidth' in kwargs: + self.tThWidth = kwargs.pop('tThWidth') + if len(kwargs) > 0: + raise RuntimeError( + f'have unparsed keyword arguments with keys: {kwargs.keys()}' + ) + + # This is only used to calculate the structure factor if invalidated + self._unitcell: unitcell = None + + self._calc() + + def _calc(self): + symmGroup = ltypeOfLaueGroup(self._laueGroup) + self._q_sym = quatOfLaueGroup(self._laueGroup) + _, latVecOps, hklDataList = PlaneData.makePlaneData( + self._hkls, + self._lparms, + self._q_sym, + symmGroup, + self._strainMag, + self.wavelength, + ) + 'sort by tTheta' + tThs = np.array( + [hklDataList[iHKL]['tTheta'] for iHKL in range(len(hklDataList))] + ) + if self._doTThSort: + # sorted hkl -> _hkl + # _hkl -> sorted hkl + self.tThSort = np.argsort(tThs) + self.tThSortInv = np.empty(len(hklDataList), dtype=int) + self.tThSortInv[self.tThSort] = np.arange(len(hklDataList)) + self.hklDataList = [hklDataList[iHKL] for iHKL in self.tThSort] + else: + self.tThSort = np.arange(len(hklDataList)) + self.tThSortInv = np.arange(len(hklDataList)) + self.hklDataList = hklDataList + self._latVecOps = latVecOps + self.nHKLs = len(self.getHKLs()) + + def __str__(self): + s = '========== plane data ==========\n' + s += 'lattice parameters:\n ' + str(self.lparms) + '\n' + s += f'two theta width: ({str(self.tThWidth)})\n' + s += f'strain magnitude: ({str(self.strainMag)})\n' + s += f'beam energy ({str(self.wavelength)})\n' + s += 'hkls: (%d)\n' % self.nHKLs + s += str(self.getHKLs()) + return s + + def getParams(self): + """ + Getter for the parameters of the plane data. + + Returns + ------- + tuple + The parameters of the plane data. In the order of + _lparams, _laueGroup, _wavelength, _strainMag, tThWidth + + """ + return ( + self._lparms, + self._laueGroup, + self._wavelength, + self._strainMag, + self.tThWidth, + ) + + def getNhklRef(self) -> int: + """ + Get the total number of hkl's in the plane data, not ignoring + ones that are excluded in exclusions. + + Returns + ------- + int + The total number of hkl's in the plane data. + """ + return len(self.hklDataList) + + @property + def hkls(self) -> np.ndarray: + """ + hStacked Hkls of the plane data (Miller indices). + """ + return self.getHKLs().T + + @hkls.setter + def hkls(self, hkls): + raise NotImplementedError('for now, not allowing hkls to be reset') + + @property + def tThMax(self) -> Optional[float]: + """ + Maximum 2-theta value of the plane data. + + float or None + """ + return self._tThMax + + @tThMax.setter + def tThMax(self, t_th_max: Union[float, valunits.valWUnit]) -> None: + self._tThMax = toFloat(t_th_max, 'radians') + + @property + def exclusions(self) -> np.ndarray: + """ + Excluded HKL's the plane data. + + Set as type np.ndarray, as a mask of length getNhklRef(), a list of + indices to be excluded, or a list of ranges of indices. + + Read as a mask of length getNhklRef(). + """ + retval = np.zeros(self.getNhklRef(), dtype=bool) + if self._exclusions is not None: + # report in current hkl ordering + retval[:] = self._exclusions[self.tThSortInv] + if self._tThMax is not None: + for iHKLr, hklData in enumerate(self.hklDataList): + if hklData['tTheta'] > self._tThMax: + retval[iHKLr] = True + return retval + + @exclusions.setter + def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: + excl = np.zeros(len(self.hklDataList), dtype=bool) + if new_exclusions is not None: + exclusions = np.atleast_1d(new_exclusions) + if len(exclusions) == len(self.hklDataList): + assert ( + exclusions.dtype == 'bool' + ), 'Exclusions should be bool if full length' + # convert from current hkl ordering to _hkl ordering + excl[:] = exclusions[self.tThSort] + else: + if len(exclusions.shape) == 1: + # treat exclusions as indices + excl[self.tThSort[exclusions]] = True + elif len(exclusions.shape) == 2: + # treat exclusions as ranges of indices + for r in exclusions: + excl[self.tThSort[r[0]:r[1]]] = True + else: + raise RuntimeError( + f'Unclear behavior for shape {exclusions.shape}' + ) + self._exclusions = excl + self.nHKLs = np.sum(np.logical_not(self._exclusions)) + + def exclude( + self, + dmin: Optional[float] = None, + dmax: Optional[float] = None, + tthmin: Optional[float] = None, + tthmax: Optional[float] = None, + sfacmin: Optional[float] = None, + sfacmax: Optional[float] = None, + pintmin: Optional[float] = None, + pintmax: Optional[float] = None, + ) -> None: + """ + Set exclusions according to various parameters + + Any hkl with a value below any min or above any max will be excluded. So + to be included, an hkl needs to have values between the min and max + for all of the conditions given. + + Note that method resets the tThMax attribute to None. + + PARAMETERS + ---------- + dmin: float > 0 + minimum lattice spacing (angstroms) + dmax: float > 0 + maximum lattice spacing (angstroms) + tthmin: float > 0 + minimum two theta (radians) + tthmax: float > 0 + maximum two theta (radians) + sfacmin: float > 0 + minimum structure factor as a proportion of maximum + sfacmax: float > 0 + maximum structure factor as a proportion of maximum + pintmin: float > 0 + minimum powder intensity as a proportion of maximum + pintmax: float > 0 + maximum powder intensity as a proportion of maximum + """ + excl = np.zeros(self.getNhklRef(), dtype=bool) + self.exclusions = None + self.tThMax = None + + if (dmin is not None) or (dmax is not None): + d = np.array(self.getPlaneSpacings()) + if dmin is not None: + excl[d < dmin] = True + if dmax is not None: + excl[d > dmax] = True + + if (tthmin is not None) or (tthmax is not None): + tth = self.getTTh() + if tthmin is not None: + excl[tth < tthmin] = True + if tthmax is not None: + excl[tth > tthmax] = True + + if (sfacmin is not None) or (sfacmax is not None): + sfac = self.structFact + sfac = sfac / sfac.max() + if sfacmin is not None: + excl[sfac < sfacmin] = True + if sfacmax is not None: + excl[sfac > sfacmax] = True + + if (pintmin is not None) or (pintmax is not None): + pint = self.powder_intensity + pint = pint / pint.max() + if pintmin is not None: + excl[pint < pintmin] = True + if pintmax is not None: + excl[pint > pintmax] = True + + self.exclusions = excl + + def _parseLParms( + self, lparms: List[Union[valunits.valWUnit, float]] + ) -> List[float]: + lparmsDUnit = [] + for lparmThis in lparms: + if isinstance(lparmThis, valunits.valWUnit): + if lparmThis.isLength(): + lparmsDUnit.append(lparmThis.getVal(dUnit)) + elif lparmThis.isAngle(): + # plumbing set up to default to degrees + # for lattice parameters + lparmsDUnit.append(lparmThis.getVal('degrees')) + else: + raise RuntimeError( + f'Do not know what to do with {lparmThis}' + ) + else: + lparmsDUnit.append(lparmThis) + return lparmsDUnit + + @property + def lparms(self) -> List[float]: + """ + Lattice parameters of the plane data. + + Can be set as a List[float | valWUnit], but will be converted to + List[float]. + """ + return self._lparms + + @lparms.setter + def lparms(self, lparms: List[Union[valunits.valWUnit, float]]) -> None: + self._lparms = self._parseLParms(lparms) + self._calc() + + @property + def strainMag(self) -> Optional[float]: + """ + Strain magnitude of the plane data. + + float or None + """ + return self._strainMag + + @strainMag.setter + def strainMag(self, strain_mag: float) -> None: + self._strainMag = strain_mag + self.tThWidth = None + self._calc() + + @property + def wavelength(self) -> float: + """ + Wavelength of the plane data. + + Set as float or valWUnit. + + Read as float + """ + return self._wavelength + + @wavelength.setter + def wavelength(self, wavelength: Union[float, valunits.valWUnit]) -> None: + wavelength = processWavelength(wavelength) + # Do not re-compute if it is almost the same + if np.isclose(self._wavelength, wavelength): + return + + self._wavelength = wavelength + self._calc() + + def invalidate_structure_factor(self, ucell: unitcell) -> None: + """ + It can be expensive to compute the structure factor + This method just invalidates it, providing a unit cell, + so that it can be lazily computed from the unit cell. + + Parameters: + ----------- + unitcell : unitcell + The unit cell to be used to compute the structure factor + """ + self._structFact = None + self._hedm_intensity = None + self._powder_intensity = None + self._unitcell = ucell + + def _compute_sf_if_needed(self): + any_invalid = ( + self._structFact is None + or self._hedm_intensity is None + or self._powder_intensity is None + ) + if any_invalid and self._unitcell is not None: + # Compute the structure factor first. + # This can be expensive to do, so we lazily compute it when needed. + hkls = self.getHKLs(allHKLs=True) + self.structFact = self._unitcell.CalcXRSF(hkls) + + @property + def structFact(self) -> np.ndarray: + """ + Structure factors for each hkl. + + np.ndarray + """ + self._compute_sf_if_needed() + return self._structFact[~self.exclusions] + + @structFact.setter + def structFact(self, structFact: np.ndarray) -> None: + self._structFact = structFact + multiplicity = self.getMultiplicity(allHKLs=True) + tth = self.getTTh(allHKLs=True) + + hedm_intensity = ( + structFact * lorentz_factor(tth) * polarization_factor(tth) + ) + + powderI = hedm_intensity * multiplicity + + # Now scale them + hedm_intensity = 100.0 * hedm_intensity / np.nanmax(hedm_intensity) + powderI = 100.0 * powderI / np.nanmax(powderI) + + self._hedm_intensity = hedm_intensity + self._powder_intensity = powderI + + @property + def powder_intensity(self) -> np.ndarray: + """ + Powder intensity for each hkl. + """ + self._compute_sf_if_needed() + return self._powder_intensity[~self.exclusions] + + @property + def hedm_intensity(self) -> np.ndarray: + """ + HEDM (high energy x-ray diffraction microscopy) intensity for each hkl. + """ + self._compute_sf_if_needed() + return self._hedm_intensity[~self.exclusions] + + @staticmethod + def makePlaneData( + hkls: np.ndarray, + lparms: np.ndarray, + qsym: np.ndarray, + symmGroup, + strainMag, + wavelength, + ) -> Tuple[ + Dict[str, np.ndarray], Dict[str, Union[np.ndarray, float]], List[Dict] + ]: + """ + Generate lattice plane data from inputs. + + Parameters: + ----------- + hkls: np.ndarray + Miller indices, as in crystallography.latticePlanes + lparms: np.ndarray + Lattice parameters, as in crystallography.latticePlanes + qsym: np.ndarray + (4, n) containing quaternions of symmetry + symmGroup: str + Tag for the symmetry (Laue) group of the lattice. Can generate from + ltypeOfLaueGroup + strainMag: float + Swag of strain magnitudes + wavelength: float + Wavelength + + Returns: + ------- + dict: + Dictionary containing lattice plane data + dict: + Dictionary containing lattice vector operators + list: + List of dictionaries, each containing the data for one hkl + """ + + tempSetOutputDegrees(False) + latPlaneData = latticePlanes( + hkls, + lparms, + ltype=symmGroup, + strainMag=strainMag, + wavelength=wavelength, + ) + + latVecOps = latticeVectors(lparms, symmGroup) + + hklDataList = [] + for iHKL in range(len(hkls.T)): + # need transpose because of convention for hkls ordering + + """ + latVec = latPlaneData['normals'][:,iHKL] + # ... if not spots, may be able to work with a subset of these + latPlnNrmlList = applySym( + np.c_[latVec], qsym, csFlag=True, cullPM=False + ) + """ + # returns UN-NORMALIZED lattice plane normals + latPlnNrmls = applySym( + np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), + qsym, + csFlag=True, + cullPM=False, + ) + + # check for +/- in symmetry group + latPlnNrmlsM = applySym( + np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), + qsym, + csFlag=False, + cullPM=False, + ) + + csRefl = latPlnNrmls.shape[1] == latPlnNrmlsM.shape[1] + + # added this so that I retain the actual symmetric + # integer hkls as well + symHKLs = np.array( + np.round(np.dot(latVecOps['F'].T, latPlnNrmls)), dtype='int' + ) + + hklDataList.append( + dict( + hklID=iHKL, + hkl=hkls[:, iHKL], + tTheta=latPlaneData['tThetas'][iHKL], + dSpacings=latPlaneData['dspacings'][iHKL], + tThetaLo=latPlaneData['tThetasLo'][iHKL], + tThetaHi=latPlaneData['tThetasHi'][iHKL], + latPlnNrmls=unitVector(latPlnNrmls), + symHKLs=symHKLs, + centrosym=csRefl, + ) + ) + + revertOutputDegrees() + return latPlaneData, latVecOps, hklDataList + + @property + def laueGroup(self) -> str: + """ + This is the Schoenflies tag, describing symmetry group of the lattice. + Note that setting this with incompatible lattice parameters will + cause an error. If changing both, use set_laue_and_lparms. + + str + """ + return self._laueGroup + + @laueGroup.setter + def laueGroup(self, laueGroup: str) -> None: + self._laueGroup = laueGroup + self._calc() + + def set_laue_and_lparms( + self, laueGroup: str, lparms: List[Union[valunits.valWUnit, float]] + ) -> None: + """ + Set the Laue group and lattice parameters simultaneously + + When the Laue group changes, the lattice parameters may be + incompatible, and cause an error in self._calc(). This function + allows us to update both the Laue group and lattice parameters + simultaneously to avoid this issue. + + Parameters: + ----------- + laueGroup : str + The symmetry (Laue) group to be set + lparms : List[valunits.valWUnit | float] + Lattice parameters to be set + """ + self._laueGroup = laueGroup + self._lparms = self._parseLParms(lparms) + self._calc() + + @property + def q_sym(self) -> np.ndarray: + """ + Quaternions of symmetry for each hkl, generated from the Laue group + + np.ndarray((4, n)) + """ + return self._q_sym # rotations.quatOfLaueGroup(self._laueGroup) + + def getPlaneSpacings(self) -> List[float]: + """ + Plane spacings for each hkl. + + Returns: + ------- + List[float] + List of plane spacings for each hkl + """ + dspacings = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + dspacings.append(hklData['dSpacings']) + return dspacings + + @property + def latVecOps(self) -> Dict[str, Union[np.ndarray, float]]: + """ + gets lattice vector operators as a new (deepcopy) + + Returns: + ------- + Dict[str, np.ndarray | float] + Dictionary containing lattice vector operators + """ + return copy.deepcopy(self._latVecOps) + + def _thisHKL(self, iHKLr: int) -> bool: + hklData = self.hklDataList[iHKLr] + if self._exclusions is not None: + if self._exclusions[self.tThSortInv[iHKLr]]: + return False + if self._tThMax is not None: + if hklData['tTheta'] > self._tThMax or np.isnan(hklData['tTheta']): + return False + return True + + def _getTThRange(self, iHKLr: int) -> Tuple[float, float]: + hklData = self.hklDataList[iHKLr] + if self.tThWidth is not None: # tThHi-tThLo < self.tThWidth + tTh = hklData['tTheta'] + tThHi = tTh + self.tThWidth * 0.5 + tThLo = tTh - self.tThWidth * 0.5 + else: + tThHi = hklData['tThetaHi'] + tThLo = hklData['tThetaLo'] + return (tThLo, tThHi) + + def getTThRanges(self, strainMag: Optional[float] = None) -> np.ndarray: + """ + Get the 2-theta ranges for included hkls + + Parameters: + ----------- + strainMag : Optional[float] + Optional swag of strain magnitude + + Returns: + ------- + np.ndarray: + hstacked array of hstacked tThLo and tThHi for each hkl (n x 2) + """ + tThRanges = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + if strainMag is None: + tThRanges.append(self._getTThRange(iHKLr)) + else: + hklData = self.hklDataList[iHKLr] + d = hklData['dSpacings'] + tThLo = 2.0 * np.arcsin( + self._wavelength / 2.0 / (d * (1.0 + strainMag)) + ) + tThHi = 2.0 * np.arcsin( + self._wavelength / 2.0 / (d * (1.0 - strainMag)) + ) + tThRanges.append((tThLo, tThHi)) + return np.array(tThRanges) + + def getMergedRanges( + self, cullDupl: Optional[bool] = False + ) -> Tuple[List[List[int]], List[List[float]]]: + """ + Return indices and ranges for specified planeData, merging where + there is overlap based on the tThWidth and line positions + + Parameters: + ----------- + cullDupl : (optional) bool + If True, cull duplicate 2-theta values (within sqrt_epsf). Defaults + to False. + + Returns: + -------- + List[List[int]] + List of indices for each merged range + + List[List[float]] + List of merged ranges, (n x 2) + """ + tThs = self.getTTh() + tThRanges = self.getTThRanges() + + # if you end exlcusions in a doublet (or multiple close rings) + # then this will 'fail'. May need to revisit... + nonoverlapNexts = np.hstack( + (tThRanges[:-1, 1] < tThRanges[1:, 0], True) + ) + iHKLLists = [] + mergedRanges = [] + hklsCur = [] + tThLoIdx = 0 + tThHiCur = 0.0 + for iHKL, nonoverlapNext in enumerate(nonoverlapNexts): + tThHi = tThRanges[iHKL, -1] + if not nonoverlapNext: + if cullDupl and abs(tThs[iHKL] - tThs[iHKL + 1]) < sqrt_epsf: + continue + else: + hklsCur.append(iHKL) + tThHiCur = tThHi + else: + hklsCur.append(iHKL) + tThHiCur = tThHi + iHKLLists.append(hklsCur) + mergedRanges.append([tThRanges[tThLoIdx, 0], tThHiCur]) + tThLoIdx = iHKL + 1 + hklsCur = [] + return iHKLLists, mergedRanges + + def getTTh(self, allHKLs: Optional[bool] = False) -> np.ndarray: + """ + Get the 2-theta values for each hkl. + + Parameters: + ----------- + allHKLs : (optional) bool + If True, return all 2-theta values, even if they are excluded in + the current planeData. Default is False. + + Returns: + ------- + np.ndarray + Array of 2-theta values for each hkl + """ + tTh = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not allHKLs and not self._thisHKL(iHKLr): + continue + tTh.append(hklData['tTheta']) + return np.array(tTh) + + def getMultiplicity(self, allHKLs: Optional[bool] = False) -> np.ndarray: + """ + Get the multiplicity for each hkl (number of symHKLs). + + Paramters: + ---------- + allHKLs : (optional) bool + If True, return all multiplicities, even if they are excluded in + the current planeData. Defaults to false. + + Returns + ------- + np.ndarray + Array of multiplicities for each hkl + """ + # ... JVB: is this incorrect? + multip = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if allHKLs or self._thisHKL(iHKLr): + multip.append(hklData['symHKLs'].shape[1]) + return np.array(multip) + + def getHKLID( + self, + hkl: Union[int, Tuple[int, int, int], np.ndarray], + master: Optional[bool] = False, + ) -> Union[List[int], int]: + """ + Return the unique ID of a list of hkls. + + Parameters + ---------- + hkl : int | tuple | list | numpy.ndarray + The input hkl. If an int, or a list of ints, it just passes + through (FIXME). + If a tuple, treated as a single (h, k, l). + If a list of lists/tuples, each is treated as an (h, k, l). + If an numpy.ndarray, it is assumed to have shape (3, N) with the + N (h, k, l) vectors stacked column-wise + + master : bool, optional + If True, return the master hklID, else return the index from the + external (sorted and reduced) list. + + Returns + ------- + hkl_ids : list + The list of requested hklID values associate with the input. + + Notes + ----- + TODO: revisit this weird API??? + + Changes: + ------- + 2020-05-21 (JVB) -- modified to handle all symmetric equavlent reprs. + """ + if hasattr(hkl, '__setitem__'): # tuple does not have __setitem__ + if isinstance(hkl, np.ndarray): + # if is ndarray, assume is 3xN + return [self._getHKLID(x, master=master) for x in hkl.T] + else: + return [self._getHKLID(x, master=master) for x in hkl] + else: + return self._getHKLID(hkl, master=master) + + def _getHKLID( + self, + hkl: Union[int, Tuple[int, int, int], np.ndarray], + master: Optional[bool] = False, + ) -> int: + """ + for hkl that is a tuple, return externally visible hkl index + """ + if isinstance(hkl, int): + return hkl + else: + hklList = self.getSymHKLs() # !!! list, reduced by exclusions + intl_hklIDs = np.asarray([i['hklID'] for i in self.hklDataList]) + intl_hklIDs_sorted = intl_hklIDs[~self.exclusions[self.tThSortInv]] + dHKLInv = {} + for iHKL, symHKLs in enumerate(hklList): + idx = intl_hklIDs_sorted[iHKL] if master else iHKL + for thisHKL in symHKLs.T: + dHKLInv[tuple(thisHKL)] = idx + try: + return dHKLInv[tuple(hkl)] + except KeyError: + raise RuntimeError( + f"hkl '{tuple(hkl)}' is not present in this material!" + ) + + def getHKLs(self, *hkl_ids: int, **kwargs) -> Union[List[str], np.ndarray]: + """ + Returns the powder HKLs subject to specified options. + + Parameters + ---------- + *hkl_ids : int + Optional list of specific master hklIDs. + **kwargs : dict + One or more of the following keyword arguments: + asStr : bool + If True, return a list of strings. The default is False. + thisTTh : scalar | None + If not None, only return hkls overlapping the specified + 2-theta (in radians). The default is None. + allHKLs : bool + If True, then ignore exlcusions. The default is False. + + Raises + ------ + TypeError + If an unknown kwarg is passed. + RuntimeError + If an invalid hklID is passed. + + Returns + ------- + hkls : list | numpy.ndarray + Either a list of hkls as strings (if asStr=True) or a vstacked + array of hkls. + + Notes + ----- + !!! the shape of the return value when asStr=False is the _transpose_ + of the typical return value for self.get_hkls() and self.hkls! + This _may_ change to avoid confusion, but going to leave it for + now so as not to break anything. + + 2022/08/05 JVB: + - Added functionality to handle optional hklID args + - Updated docstring + """ + # kwarg parsing + opts = dict(asStr=False, thisTTh=None, allHKLs=False) + if len(kwargs) > 0: + # check keys + for k, v in kwargs.items(): + if k not in opts: + raise TypeError( + f"getHKLs() got an unexpected keyword argument '{k}'" + ) + opts.update(kwargs) + + hkls = [] + if len(hkl_ids) == 0: + for iHKLr, hklData in enumerate(self.hklDataList): + if not opts['allHKLs']: + if not self._thisHKL(iHKLr): + continue + if opts['thisTTh'] is not None: + tThLo, tThHi = self._getTThRange(iHKLr) + if opts['thisTTh'] < tThHi and opts['thisTTh'] > tThLo: + hkls.append(hklData['hkl']) + else: + hkls.append(hklData['hkl']) + else: + # !!! changing behavior here; if the hkl_id is invalid, raises + # RuntimeError, and if allHKLs=True and the hkl_id is + # excluded, it also raises a RuntimeError + all_hkl_ids = np.asarray([i['hklID'] for i in self.hklDataList]) + sorted_excl = self.exclusions[self.tThSortInv] + idx = np.zeros(len(self.hklDataList), dtype=int) + for i, hkl_id in enumerate(hkl_ids): + # find ordinal index of current hklID + try: + idx[i] = int(np.where(all_hkl_ids == hkl_id)[0]) + except TypeError: + raise RuntimeError( + f"Requested hklID '{hkl_id}'is invalid!" + ) + if sorted_excl[idx[i]] and not opts['allHKLs']: + raise RuntimeError( + f"Requested hklID '{hkl_id}' is excluded!" + ) + hkls.append(self.hklDataList[idx[i]]['hkl']) + + # handle output kwarg + if opts['asStr']: + return list(map(hklToStr, np.array(hkls))) + else: + return np.array(hkls) + + def getSymHKLs( + self, + asStr: Optional[bool] = False, + withID: Optional[bool] = False, + indices: Optional[List[int]] = None, + ) -> Union[List[List[str]], List[np.ndarray]]: + """ + Return all symmetry HKLs. + + Parameters + ---------- + asStr : bool, optional + If True, return the symmetry HKLs as strings. The default is False. + withID : bool, optional + If True, return the symmetry HKLs with the hklID. The default is + False. Does nothing if asStr is True. + indices : list[inr], optional + Optional list of indices of hkls to include. + + Returns + ------- + sym_hkls : list list of strings, or list of numpy.ndarray + List of symmetry HKLs for each HKL, either as strings or as a + vstacked array. + """ + sym_hkls = [] + hkl_index = 0 + if indices is not None: + indB = np.zeros(self.nHKLs, dtype=bool) + indB[np.array(indices)] = True + else: + indB = np.ones(self.nHKLs, dtype=bool) + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + if indB[hkl_index]: + hkls = hklData['symHKLs'] + if asStr: + sym_hkls.append(list(map(hklToStr, np.array(hkls).T))) + elif withID: + sym_hkls.append( + np.vstack( + [ + np.tile(hklData['hklID'], (1, hkls.shape[1])), + hkls, + ] + ) + ) + else: + sym_hkls.append(np.array(hkls)) + hkl_index += 1 + return sym_hkls + + @staticmethod + def makeScatteringVectors( + hkls: np.ndarray, + rMat_c: np.ndarray, + bMat: np.ndarray, + wavelength: float, + chiTilt: Optional[float] = None, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Static method for calculating g-vectors and scattering vector angles + for specified hkls, subject to the bragg conditions specified by + lattice vectors, orientation matrix, and wavelength + + Parameters + ---------- + hkls : np.ndarray + (3, n) array of hkls. + rMat_c : np.ndarray + (3, 3) rotation matrix from the crystal to the sample frame. + bMat : np.ndarray, optional + (3, 3) COB from reciprocal lattice frame to the crystal frame. + wavelength : float + xray wavelength in Angstroms. + chiTilt : float, optional + 0 <= chiTilt <= 90 degrees, defaults to 0 + + Returns + ------- + gVec_s : np.ndarray + (3, n) array of g-vectors (reciprocal lattice) in the sample frame. + oangs0 : np.ndarray + (3, n) array containing the feasible (2-theta, eta, ome) triplets + for each input hkl (first solution) + oangs1 : np.ndarray + (3, n) array containing the feasible (2-theta, eta, ome) triplets + for each input hkl (second solution) + + FIXME: must do testing on strained bMat + """ + # arg munging + chi = float(chiTilt) if chiTilt is not None else 0.0 + rMat_c = rMat_c.squeeze() + + # these are the reciprocal lattice vectors in the SAMPLE FRAME + # ** NOTE ** + # if strained, assumes that you handed it a bMat calculated from + # strained [a, b, c] in the CRYSTAL FRAME + gVec_s = np.dot(rMat_c, np.dot(bMat, hkls)) + + dim0 = gVec_s.shape[0] + if dim0 != 3: + raise ValueError(f'Number of lattice plane normal dims is {dim0}') + + # call model from transforms now + oangs0, oangs1 = xfcapi.oscill_angles_of_hkls( + hkls.T, chi, rMat_c, bMat, wavelength + ) + + return gVec_s, oangs0.T, oangs1.T + + def _makeScatteringVectors( + self, + rMat: np.ndarray, + bMat: Optional[np.ndarray] = None, + chiTilt: Optional[float] = None, + ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: + """ + modeled after QFromU.m + """ + + if bMat is None: + bMat = self._latVecOps['B'] + + Qs_vec = [] + Qs_ang0 = [] + Qs_ang1 = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + thisQs, thisAng0, thisAng1 = PlaneData.makeScatteringVectors( + hklData['symHKLs'], + rMat, + bMat, + self._wavelength, + chiTilt=chiTilt, + ) + Qs_vec.append(thisQs) + Qs_ang0.append(thisAng0) + Qs_ang1.append(thisAng1) + + return Qs_vec, Qs_ang0, Qs_ang1 + + def calcStructFactor(self, atominfo): + """ + Calculates unit cell structure factors as a function of hkl + USAGE: + FSquared = calcStructFactor(atominfo,hkls,B) + INPUTS: + 1) atominfo (m x 1 float ndarray) the first threee columns of the + matrix contain fractional atom positions [uvw] of atoms in the unit + cell. The last column contains the number of electrons for a given atom + 2) hkls (3 x n float ndarray) is the array of Miller indices for + the planes of interest. The vectors are assumed to be + concatenated along the 1-axis (horizontal) + 3) B (3 x 3 float ndarray) is a matrix of reciprocal lattice basis + vectors,where each column contains a reciprocal lattice basis vector + ({g}=[B]*{hkl}) + OUTPUTS: + 1) FSquared (n x 1 float ndarray) array of structure factors, + one for each hkl passed into the function + """ + r = atominfo[:, 0:3] + elecNum = atominfo[:, 3] + hkls = self.hkls + B = self.latVecOps['B'] + sinThOverLamdaList, ffDataList = LoadFormFactorData() + FSquared = np.zeros(hkls.shape[1]) + + for jj in np.arange(0, hkls.shape[1]): + # ???: probably have other functions for this + # Calculate G for each hkl + # Calculate magnitude of G for each hkl + G = ( + hkls[0, jj] * B[:, 0] + + hkls[1, jj] * B[:, 1] + + hkls[2, jj] * B[:, 2] + ) + magG = np.sqrt(G[0] ** 2 + G[1] ** 2 + G[2] ** 2) + + # Begin calculating form factor + F = 0 + for ii in np.arange(0, r.shape[0]): + ff = RetrieveAtomicFormFactor( + elecNum[ii], magG, sinThOverLamdaList, ffDataList + ) + exparg = complex( + 0.0, + 2.0 + * np.pi + * ( + hkls[0, jj] * r[ii, 0] + + hkls[1, jj] * r[ii, 1] + + hkls[2, jj] * r[ii, 2] + ), + ) + F += ff * np.exp(exparg) + + """ + F = sum_atoms(ff(Q)*e^(2*pi*i(hu+kv+lw))) + """ + FSquared[jj] = np.real(F * np.conj(F)) + + return FSquared + + # OLD DEPRECATED PLANE_DATA STUFF ==================================== + @deprecated(new_func="len(self.hkls.T)", removal_date="2025-08-01") + def getNHKLs(self): + return len(self.getHKLs()) + + @deprecated(new_func="self.exclusions", removal_date="2025-08-01") + def get_exclusions(self): + return self.exclusions + + @deprecated(new_func="self.exclusions=...", removal_date="2025-08-01") + def set_exclusions(self, exclusions): + self.exclusions = exclusions + + @deprecated(new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", + removal_date="2025-08-01") + def getLatticeType(self): + return ltypeOfLaueGroup(self.laueGroup) + + @deprecated(new_func="self.q_sym", removal_date="2025-08-01") + def getQSym(self): + return self.q_sym + + +@deprecated(removal_date='2025-01-01') +def getFriedelPair(tth0, eta0, *ome0, **kwargs): + """ + Get the diffractometer angular coordinates in degrees for + the Friedel pair of a given reflection (min angular distance). + + AUTHORS: + + J. V. Bernier -- 10 Nov 2009 + + USAGE: + + ome1, eta1 = getFriedelPair(tth0, eta0, *ome0, + display=False, + units='degrees', + convention='hexrd') + + INPUTS: + + 1) tth0 is a list (or ndarray) of 1 or n the bragg angles (2theta) for + the n reflections (tiled to match eta0 if only 1 is given). + + 2) eta0 is a list (or ndarray) of 1 or n azimuthal coordinates for the n + reflections (tiled to match tth0 if only 1 is given). + + 3) ome0 is a list (or ndarray) of 1 or n reference oscillation + angles for the n reflections (denoted omega in [1]). This argument + is optional. + + 4) Keyword arguments may be one of the following: + + Keyword Values|{default} Action + -------------- -------------- -------------- + 'display' True|{False} toggles display to cmd line + 'units' 'radians'|{'degrees'} sets units for input angles + 'convention' 'fable'|{'hexrd'} sets conventions defining + the angles (see below) + 'chiTilt' None the inclination (about Xlab) of + the oscillation axis + + OUTPUTS: + + 1) ome1 contains the oscialltion angle coordinates of the + Friedel pairs associated with the n input reflections, relative to ome0 + (i.e. ome1 = + ome0). Output is in DEGREES! + + 2) eta1 contains the azimuthal coordinates of the Friedel + pairs associated with the n input reflections. Output units are + controlled via the module variable 'outputDegrees' + + NOTES: + + !!!: The ouputs ome1, eta1 are written using the selected convention, but + the units are alway degrees. May change this to work with Nathan's + global... + + !!!: In the 'fable' convention [1], {XYZ} form a RHON basis where X is + downstream, Z is vertical, and eta is CCW with +Z defining eta = 0. + + !!!: In the 'hexrd' convention [2], {XYZ} form a RHON basis where Z is + upstream, Y is vertical, and eta is CCW with +X defining eta = 0. + + REFERENCES: + + [1] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, + ``Tracking: a method for structural characterization of grains in + powders or polycrystals''. J. Appl. Cryst. (2001). 34, 744--750 + + [2] J. V. Bernier, M. P. Miller, J. -S. Park, and U. Lienert, + ``Quantitative Stress Analysis of Recrystallized OFHC Cu Subject + to Deformed In Situ'', J. Eng. Mater. Technol. (2008). 130. + DOI:10.1115/1.2870234 + """ + + dispFlag = False + fableFlag = False + chi = None + c1 = 1.0 + c2 = pi / 180.0 + + eta0 = np.atleast_1d(eta0) + tth0 = np.atleast_1d(tth0) + ome0 = np.atleast_1d(ome0) + + if eta0.ndim != 1: + raise RuntimeError('azimuthal input must be 1-D') + + npts = len(eta0) + + if tth0.ndim != 1: + raise RuntimeError('Bragg angle input must be not 1-D') + else: + if len(tth0) != npts: + if len(tth0) == 1: + tth0 *= np.ones(npts) + elif npts == 1: + npts = len(tth0) + eta0 *= np.ones(npts) + else: + raise RuntimeError( + 'the azimuthal and Bragg angle inputs are inconsistent' + ) + + if len(ome0) == 0: + ome0 = np.zeros(npts) # dummy ome0 + elif len(ome0) == 1 and npts > 1: + ome0 *= np.ones(npts) + else: + if len(ome0) != npts: + raise RuntimeError( + 'your oscialltion angle input is inconsistent; ' + + f'it has length {len(ome0)} while it should be {npts}' + ) + + # keyword args processing + kwarglen = len(kwargs) + if kwarglen > 0: + argkeys = list(kwargs.keys()) + for i in range(kwarglen): + if argkeys[i] == 'display': + dispFlag = kwargs[argkeys[i]] + elif argkeys[i] == 'convention': + if kwargs[argkeys[i]].lower() == 'fable': + fableFlag = True + elif argkeys[i] == 'units': + if kwargs[argkeys[i]] == 'radians': + c1 = 180.0 / pi + c2 = 1.0 + elif argkeys[i] == 'chiTilt': + if kwargs[argkeys[i]] is not None: + chi = kwargs[argkeys[i]] + + # a little talkback... + if dispFlag: + if fableFlag: + print('\nUsing Fable angle convention\n') + else: + print('\nUsing image-based angle convention\n') + + # mapped eta input + # - in DEGREES, thanks to c1 + eta0 = mapAngle(c1 * eta0, [-180, 180], units='degrees') + if fableFlag: + eta0 = 90 - eta0 + + # must put args into RADIANS + # - eta0 is in DEGREES, + # - the others are in whatever was entered, hence c2 + eta0 = d2r * eta0 + tht0 = c2 * tth0 / 2 + if chi is not None: + chi = c2 * chi + else: + chi = 0 + + """ + SYSTEM SOLVE + + + cos(chi)cos(eta)cos(theta)sin(x) - cos(chi)sin(theta)cos(x) \ + = sin(theta) - sin(chi)sin(eta)cos(theta) + + + Identity: a sin x + b cos x = sqrt(a**2 + b**2) sin (x + alpha) + + / + | atan(b/a) for a > 0 + alpha < + | pi + atan(b/a) for a < 0 + \ + + => sin (x + alpha) = c / sqrt(a**2 + b**2) + + must use both branches for sin(x) = n: + x = u (+ 2k*pi) | x = pi - u (+ 2k*pi) + """ + cchi = np.cos(chi) + schi = np.sin(chi) + ceta = np.cos(eta0) + seta = np.sin(eta0) + ctht = np.cos(tht0) + stht = np.sin(tht0) + + nchi = np.c_[0.0, cchi, schi].T + + gHat0_l = -np.vstack([ceta * ctht, seta * ctht, stht]) + + a = cchi * ceta * ctht + b = -cchi * stht + c = stht + schi * seta * ctht + + # form solution + abMag = np.sqrt(a * a + b * b) + assert np.all(abMag > 0), "Beam vector specification is infeasible!" + phaseAng = np.arctan2(b, a) + rhs = c / abMag + rhs[abs(rhs) > 1.0] = np.nan + rhsAng = np.arcsin(rhs) + + # write ome angle output arrays (NaNs persist here) + ome1 = rhsAng - phaseAng + ome2 = np.pi - rhsAng - phaseAng + + ome1 = mapAngle(ome1, [-np.pi, np.pi], units='radians') + ome2 = mapAngle(ome2, [-np.pi, np.pi], units='radians') + + ome_stack = np.vstack([ome1, ome2]) + + min_idx = np.argmin(abs(ome_stack), axis=0) + + ome_min = ome_stack[min_idx, list(range(len(ome1)))] + eta_min = np.nan * np.ones_like(ome_min) + + # mark feasible reflections + goodOnes = ~np.isnan(ome_min) + + numGood = np.sum(goodOnes) + tmp_eta = np.empty(numGood) + tmp_gvec = gHat0_l[:, goodOnes] + for i in range(numGood): + rchi = rotMatOfExpMap(np.tile(ome_min[goodOnes][i], (3, 1)) * nchi) + gHat_l = np.dot(rchi, tmp_gvec[:, i].reshape(3, 1)) + tmp_eta[i] = np.arctan2(gHat_l[1], gHat_l[0]) + eta_min[goodOnes] = tmp_eta + + # everybody back to DEGREES! + # - ome1 is in RADIANS here + # - convert and put into [-180, 180] + ome1 = mapAngle( + mapAngle(r2d * ome_min, [-180, 180], units='degrees') + c1 * ome0, + [-180, 180], + units='degrees', + ) + + # put eta1 in [-180, 180] + eta1 = mapAngle(r2d * eta_min, [-180, 180], units='degrees') + + if not outputDegrees: + ome1 *= d2r + eta1 *= d2r + + return ome1, eta1 + + +def getDparms( + lp: np.ndarray, lpTag: str, radians: Optional[bool] = True +) -> np.ndarray: + """ + Utility routine for getting dparms, that is the lattice parameters + without symmetry -- 'triclinic' + + Parameters + ---------- + lp : np.ndarray + Parsed lattice parameters + lpTag : str + Tag for the symmetry group of the lattice (from Laue group) + radians : bool, optional + Whether or not to use radians for angles, default is True + + Returns + ------- + np.ndarray + The lattice parameters without symmetry. + """ + latVecOps = latticeVectors(lp, tag=lpTag, radians=radians) + return latVecOps['dparms'] + + +def LoadFormFactorData(): + """ + Script to read in a csv file containing information relating the + magnitude of Q (sin(th)/lambda) to atomic form factor + Notes: + Atomic form factor data gathered from the International Tables of + Crystallography: + P. J. Brown, A. G. Fox, E. N. Maslen, M. A. O'Keefec and B. T. M. Willis, + "Chapter 6.1. Intensity of diffracted intensities", International Tables + for Crystallography (2006). Vol. C, ch. 6.1, pp. 554-595 + """ + + dir1 = os.path.split(valunits.__file__) + dataloc = os.path.join(dir1[0], 'data', 'FormFactorVsQ.csv') + + data = np.zeros((62, 99), float) + + # FIXME: marked broken by DP + jj = 0 + with open(dataloc, 'rU') as csvfile: + datareader = csv.reader(csvfile, dialect=csv.excel) + for row in datareader: + ii = 0 + for val in row: + data[jj, ii] = float(val) + ii += 1 + jj += 1 + + sinThOverLamdaList = data[:, 0] + ffDataList = data[:, 1:] + + return sinThOverLamdaList, ffDataList + + +def RetrieveAtomicFormFactor(elecNum, magG, sinThOverLamdaList, ffDataList): + """Interpolates between tabulated data to find the atomic form factor + for an atom with elecNum electrons for a given magnitude of Q + USAGE: + ff = RetrieveAtomicFormFactor(elecNum,magG,sinThOverLamdaList,ffDataList) + INPUTS: + 1) elecNum, (1 x 1 float) number of electrons for atom of interest + 2) magG (1 x 1 float) magnitude of G + 3) sinThOverLamdaList (n x 1 float ndarray) form factor data is tabulated + in terms of sin(theta)/lambda (A^-1). + 3) ffDataList (n x m float ndarray) form factor data is tabulated in terms + of sin(theta)/lambda (A^-1). Each column corresponds to a different + number of electrons + OUTPUTS: + 1) ff (n x 1 float) atomic form factor for atom and hkl of interest + NOTES: + Data should be calculated in terms of G at some point + """ + sinThOverLambda = 0.5 * magG + # lambda=2*d*sin(th) + # lambda=2*sin(th)/G + # 1/2*G=sin(th)/lambda + + ff = np.interp( + sinThOverLambda, sinThOverLamdaList, ffDataList[:, (elecNum - 1)] + ) + + return ff + + +def lorentz_factor(tth: np.ndarray) -> np.ndarray: + """ + 05/26/2022 SS adding lorentz factor computation + to the detector so that it can be compenstated for in the + intensity correction + + Parameters + ---------- + tth: np.ndarray + 2-theta of every pixel in radians + + Returns + ------- + np.ndarray + Lorentz factor for each pixel + """ + + theta = 0.5 * tth + + cth = np.cos(theta) + sth2 = np.sin(theta) ** 2 + + return 1.0 / (4.0 * cth * sth2) + + +def polarization_factor( + tth: np.ndarray, + unpolarized: Optional[bool] = True, + eta: Optional[np.ndarray] = None, + f_hor: Optional[float] = None, + f_vert: Optional[float] = None, +) -> np.ndarray: + """ + 06/14/2021 SS adding lorentz polarization factor computation + to the detector so that it can be compenstated for in the + intensity correction + + 05/26/2022 decoupling lorentz factor from polarization factor + + parameters: tth two theta of every pixel in radians + if unpolarized is True, all subsequent arguments are optional + eta azimuthal angle of every pixel + f_hor fraction of horizontal polarization + (~1 for XFELs) + f_vert fraction of vertical polarization + (~0 for XFELs) + notice f_hor + f_vert = 1 + + FIXME, called without parameters like eta, f_hor, f_vert, but they default + to none in the current implementation, which will throw an error. + """ + + ctth2 = np.cos(tth) ** 2 + + if unpolarized: + return (1 + ctth2) / 2 + + seta2 = np.sin(eta) ** 2 + ceta2 = np.cos(eta) ** 2 + return f_hor * (seta2 + ceta2 * ctth2) + f_vert * (ceta2 + seta2 * ctth2) diff --git a/hexrd/hedm/material/unitcell.py b/hexrd/hedm/material/unitcell.py new file mode 100644 index 000000000..26f3a2fef --- /dev/null +++ b/hexrd/hedm/material/unitcell.py @@ -0,0 +1,1989 @@ +import importlib.resources +import numpy as np +from numba import njit +from hexrd import constants +from hexrd.material import spacegroup, symbols, symmetry +from hexrd.ipfcolor import sphere_sector, colorspace +from hexrd.valunits import valWUnit +import hexrd.resources +import warnings +import h5py +from pathlib import Path +from scipy.interpolate import interp1d +import time + +eps = constants.sqrt_epsf +ENERGY_ID = 0 +REAL_F1_ID = 1 +IMAG_F2_ID = 2 +MU_ID = 3 +COH_INCOH_ID = 4 +MU_K_ID = 6 +WAV_ID = 7 + +''' calculate dot product of two vectors in any space 'd' 'r' or 'c' ''' + + +@njit(cache=True, nogil=True) +def _calclength(u, mat): + return np.sqrt(np.dot(u, np.dot(mat, u))) + + +@njit(cache=True, nogil=True) +def _calcstar(v, sym, mat): + vsym = np.atleast_2d(v) + for s in sym: + vp = np.dot(np.ascontiguousarray(s), v) + # check if this is new + isnew = True + for vec in vsym: + vv = vp - vec + dist = _calclength(vv, mat) + if dist < 1E-3: + isnew = False + break + if isnew: + vp = np.atleast_2d(vp) + vsym = np.vstack((vsym, vp)) + + return vsym + + +class unitcell: + + ''' + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 10/09/2018 SS 1.0 original + @DATE: 10/15/2018 SS 1.1 added space group handling + >> @DETAILS: this is the unitcell class + + ''' + + # initialize the unitcell class + # need lattice parameters and space group data from HDF5 file + def __init__(self, lp, sgnum, + atomtypes, charge, + atominfo, + U, dmin, beamenergy, + sgsetting=0): + + self._tstart = time.time() + self.pref = 0.4178214 + + self.atom_type = atomtypes + self.chargestates = charge + self.atom_pos = atominfo + + self._dmin = dmin + + self.lparms = lp + + self.U = U + ''' + initialize interpolation from table for anomalous scattering + ''' + self.InitializeInterpTable() + + ''' + sets x-ray energy + calculate wavelength + also calculates anomalous form factors for xray scattering + ''' + self.voltage = beamenergy * 1000.0 + ''' + calculate symmetry + ''' + self.sgsetting = sgsetting + self.sgnum = sgnum + + self._tstop = time.time() + self.tinit = self._tstop - self._tstart + + def GetPgLg(self): + ''' + simple subroutine to get point and laue groups + to maintain consistency for planedata initialization + in the materials class + ''' + for k in list(_pgDict.keys()): + if self.sgnum in k: + pglg = _pgDict[k] + self._pointGroup = pglg[0] + self._laueGroup = pglg[1] + self._supergroup = pglg[2] + self._supergroup_laue = pglg[3] + + def CalcWavelength(self): + # wavelength in nm + self.wavelength = constants.cPlanck * \ + constants.cLight / \ + constants.cCharge / \ + self.voltage + self.wavelength *= 1e9 + + def calcBetaij(self): + + self.betaij = np.zeros([3, 3, self.atom_ntype]) + for i in range(self.U.shape[0]): + U = self.U[i, :] + self.betaij[:, :, i] = np.array([[U[0], U[3], U[4]], + [U[3], U[1], U[5]], + [U[4], U[5], U[2]]]) + + self.betaij[:, :, i] *= 2. * np.pi**2 * self._aij + + def calcmatrices(self): + + a = self.a + b = self.b + c = self.c + + alpha = np.radians(self.alpha) + beta = np.radians(self.beta) + gamma = np.radians(self.gamma) + + ca = np.cos(alpha) + cb = np.cos(beta) + cg = np.cos(gamma) + sa = np.sin(alpha) + sb = np.sin(beta) + sg = np.sin(gamma) + tg = np.tan(gamma) + + ''' + direct metric tensor + ''' + self._dmt = np.array([[a**2, a*b*cg, a*c*cb], + [a*b*cg, b**2, b*c*ca], + [a*c*cb, b*c*ca, c**2]]) + self._vol = np.sqrt(np.linalg.det(self.dmt)) + + if self.vol < 1e-5: + warnings.warn('unitcell volume is suspiciously small') + + ''' + reciprocal metric tensor + ''' + self._rmt = np.linalg.inv(self.dmt) + + ''' + direct structure matrix + ''' + self._dsm = np.array([[a, b*cg, c*cb], + [0., b*sg, -c*(cb*cg - ca)/sg], + [0., 0., self.vol/(a*b*sg)]]) + + self._dsm[np.abs(self._dsm) < eps] = 0. + + ''' + reciprocal structure matrix + ''' + self._rsm = np.array([[1./a, 0., 0.], + [-1./(a*tg), 1./(b*sg), 0.], + [b*c*(cg*ca - cb)/(self.vol*sg), + a*c*(cb*cg - ca)/(self.vol*sg), + a*b*sg/self.vol]]) + + self._rsm[np.abs(self._rsm) < eps] = 0. + + ast = self.CalcLength([1, 0, 0], 'r') + bst = self.CalcLength([0, 1, 0], 'r') + cst = self.CalcLength([0, 0, 1], 'r') + + self._aij = np.array([[ast**2, ast*bst, ast*cst], + [bst*ast, bst**2, bst*cst], + [cst*ast, cst*bst, cst**2]]) + + ''' transform between any crystal space to any other space. + choices are 'd' (direct), 'r' (reciprocal) and 'c' (cartesian)''' + + def TransSpace(self, v_in, inspace, outspace): + if inspace == outspace: + return v_in + if inspace == 'd': + if outspace == 'r': + v_out = np.dot(v_in, self.dmt) + elif outspace == 'c': + v_out = np.dot(self.dsm, v_in) + else: + raise ValueError( + 'inspace in "d" but outspace can\'t be identified') + + elif inspace == 'r': + if outspace == 'd': + v_out = np.dot(v_in, self.rmt) + elif outspace == 'c': + v_out = np.dot(self.rsm, v_in) + else: + raise ValueError( + 'inspace in "r" but outspace can\'t be identified') + + elif inspace == 'c': + if outspace == 'r': + v_out = np.dot(v_in, self.dsm) + elif outspace == 'd': + v_out = np.dot(v_in, self.rsm) + else: + raise ValueError( + 'inspace in "c" but outspace can\'t be identified') + + else: + raise ValueError('incorrect inspace argument') + + return v_out + + ''' calculate dot product of two vectors in any space 'd' 'r' or 'c' ''' + + def CalcDot(self, u, v, space): + + if space == 'd': + dot = np.dot(u, np.dot(self.dmt, v)) + elif space == 'r': + dot = np.dot(u, np.dot(self.rmt, v)) + elif space == 'c': + dot = np.dot(u, v) + else: + raise ValueError('space is unidentified') + + return dot + + def CalcLength(self, u, space): + + if space == 'd': + mat = self.dmt + # vlen = np.sqrt(np.dot(u, np.dot(self.dmt, u))) + elif space == 'r': + mat = self.rmt + # vlen = np.sqrt(np.dot(u, np.dot(self.rmt, u))) + elif space == 'c': + mat = np.eye(3) + # vlen = np.linalg.norm(u) + else: + raise ValueError('incorrect space argument') + + uu = np.array(u).astype(np.float64) + return _calclength(uu, mat) + + ''' normalize vector in any space 'd' 'r' or 'c' ''' + + def NormVec(self, u, space): + ulen = self.CalcLength(u, space) + return u/ulen + + ''' calculate angle between two vectors in any space''' + + def CalcAngle(self, u, v, space): + + ulen = self.CalcLength(u, space) + vlen = self.CalcLength(v, space) + + dot = self.CalcDot(u, v, space)/ulen/vlen + if np.isclose(np.abs(dot), 1.0): + dot = np.sign(dot) + angle = np.arccos(dot) + + return angle + + ''' calculate cross product between two vectors in any space. + + cross product of two vectors in direct space is a vector in + reciprocal space + + cross product of two vectors in reciprocal space is a vector in + direct space + + the outspace specifies if a conversion needs to be made + + @NOTE: iv is the switch (0/1) which will either turn division + by volume of the unit cell on or off.''' + + def CalcCross(self, p, q, inspace, outspace, vol_divide=False): + iv = 0 + if vol_divide: + vol = self.vol + else: + vol = 1.0 + + pxq = np.array([p[1]*q[2]-p[2]*q[1], + p[2]*q[0]-p[0]*q[2], + p[0]*q[1]-p[1]*q[0]]) + + if inspace == 'd': + ''' + cross product vector is in reciprocal space + and can be converted to direct or cartesian space + ''' + pxq *= vol + + if outspace == 'r': + pass + elif outspace == 'd': + pxq = self.TransSpace(pxq, 'r', 'd') + elif outspace == 'c': + pxq = self.TransSpace(pxq, 'r', 'c') + else: + raise ValueError( + 'inspace is ''d'' but outspace is unidentified') + + elif inspace == 'r': + ''' + cross product vector is in direct space and + can be converted to any other space + ''' + pxq /= vol + if outspace == 'r': + pxq = self.TransSpace(pxq, 'd', 'r') + elif outspace == 'd': + pass + elif outspace == 'c': + pxq = self.TransSpace(pxq, 'd', 'c') + else: + raise ValueError( + 'inspace is ''r'' but outspace is unidentified') + + elif inspace == 'c': + ''' + cross product is already in cartesian space so no + volume factor is involved. can be converted to any + other space too + ''' + if outspace == 'r': + pxq = self.TransSpace(pxq, 'c', 'r') + elif outspace == 'd': + pxq = self.TransSpace(pxq, 'c', 'd') + elif outspace == 'c': + pass + else: + raise ValueError( + 'inspace is ''c'' but outspace is unidentified') + + else: + raise ValueError('inspace is unidentified') + + return pxq + + def GenerateRecipPGSym(self): + + self.SYM_PG_r = self.SYM_PG_d[0, :, :] + self.SYM_PG_r = np.broadcast_to(self.SYM_PG_r, [1, 3, 3]) + + self.SYM_PG_r_laue = self.SYM_PG_d[0, :, :] + self.SYM_PG_r_laue = np.broadcast_to(self.SYM_PG_r_laue, [1, 3, 3]) + + for i in range(1, self.npgsym): + g = self.SYM_PG_d[i, :, :] + g = np.dot(self.dmt, np.dot(g, self.rmt)) + g = np.round(np.broadcast_to(g, [1, 3, 3])) + self.SYM_PG_r = np.concatenate((self.SYM_PG_r, g)) + + for i in range(1, self.SYM_PG_d_laue.shape[0]): + g = self.SYM_PG_d_laue[i, :, :] + g = np.dot(self.dmt, np.dot(g, self.rmt)) + g = np.round(np.broadcast_to(g, [1, 3, 3])) + self.SYM_PG_r_laue = np.concatenate((self.SYM_PG_r_laue, g)) + + self.SYM_PG_r = self.SYM_PG_r.astype(np.int32) + self.SYM_PG_r_laue = self.SYM_PG_r_laue.astype(np.int32) + + def GenerateCartesianPGSym(self): + ''' + use the direct point group symmetries to generate the + symmetry operations in the cartesian frame. this is used + to reduce directions to the standard stereographi tringle + ''' + self.SYM_PG_c = [] + self.SYM_PG_c_laue = [] + + for sop in self.SYM_PG_d: + self.SYM_PG_c.append(np.dot(self.dsm, np.dot(sop, self.rsm.T))) + + self.SYM_PG_c = np.array(self.SYM_PG_c) + self.SYM_PG_c[np.abs(self.SYM_PG_c) < eps] = 0. + + if self._pointGroup == self._laueGroup: + self.SYM_PG_c_laue = self.SYM_PG_c + else: + for sop in self.SYM_PG_d_laue: + self.SYM_PG_c_laue.append( + np.dot(self.dsm, np.dot(sop, self.rsm.T))) + self.SYM_PG_c_laue = np.array(self.SYM_PG_c_laue) + self.SYM_PG_c_laue[np.abs(self.SYM_PG_c_laue) < eps] = 0. + + ''' + use the point group symmetry of the supergroup + to generate the equivalent operations in the + cartesian reference frame + + SS 11/23/2020 added supergroup symmetry operations + SS 11/24/2020 fix monoclinic groups separately since + the supergroup for monoclinic is orthorhombic + ''' + supergroup = self._supergroup + sym_supergroup = symmetry.GeneratePGSYM(supergroup) + + supergroup_laue = self._supergroup_laue + sym_supergroup_laue = symmetry.GeneratePGSYM(supergroup_laue) + + if self.latticeType in ('monoclinic', 'triclinic'): + ''' + for monoclinic groups c2 and c2h, the supergroups are + orthorhombic, so no need to convert from direct to + cartesian as they are identical + ''' + self.SYM_PG_supergroup = sym_supergroup + self.SYM_PG_supergroup_laue = sym_supergroup_laue + + else: + + self.SYM_PG_supergroup = [] + self.SYM_PG_supergroup_laue = [] + + for sop in sym_supergroup: + self.SYM_PG_supergroup.append( + np.dot(self.dsm, np.dot(sop, self.rsm.T))) + + self.SYM_PG_supergroup = np.array(self.SYM_PG_supergroup) + self.SYM_PG_supergroup[np.abs(self.SYM_PG_supergroup) < eps] = 0. + + for sop in sym_supergroup_laue: + self.SYM_PG_supergroup_laue.append( + np.dot(self.dsm, np.dot(sop, self.rsm.T))) + + self.SYM_PG_supergroup_laue = np.array(self.SYM_PG_supergroup_laue) + self.SYM_PG_supergroup_laue[np.abs( + self.SYM_PG_supergroup_laue) < eps] = 0. + + ''' + the standard setting for the monoclinic system has the b-axis aligned + with the 2-fold axis. this needs to be accounted for when reduction to + the standard stereographic triangle is performed. the siplest way is to + rotate all symmetry elements by 90 about the x-axis + + the supergroups for the monoclinic groups are orthorhombic so they need + not be rotated as they have the c* axis already aligned with the z-axis + SS 12/10/2020 + ''' + if self.latticeType == 'monoclinic': + + om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]]) + + for i, s in enumerate(self.SYM_PG_c): + ss = np.dot(om, np.dot(s, om.T)) + self.SYM_PG_c[i, :, :] = ss + + for i, s in enumerate(self.SYM_PG_c_laue): + ss = np.dot(om, np.dot(s, om.T)) + self.SYM_PG_c_laue[i, :, :] = ss + ''' + for the triclinic group c1, the supergroups are the monoclinic group m + therefore we need to rotate the mirror to be perpendicular to the z-axis + same shouldn't be done for the group ci, since the supergroup is just the + triclinic group c1!! + SS 12/10/2020 + ''' + if self._pointGroup == 'c1': + om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]]) + + for i, s in enumerate(self.SYM_PG_supergroup): + ss = np.dot(om, np.dot(s, om.T)) + self.SYM_PG_supergroup[i, :, :] = ss + + for i, s in enumerate(self.SYM_PG_supergroup_laue): + ss = np.dot(om, np.dot(s, om.T)) + self.SYM_PG_supergroup_laue[i, :, :] = ss + + def CalcOrbit(self, v, reduceToUC=True): + """ + @date 03/04/2021 SS 1.0 original + + @details calculate the equivalent position for the + space group symmetry. this function will replace the + code in the CalcPositions subroutine. + + @params v is the factional coordinates in direct space + reduceToUC reduces the position to the + fundamental fractional unit cell (0-1) + """ + + asym_pos = [] + n = 1 + if v.shape[0] != 3: + raise RuntimeError("fractional coordinate in not 3-d") + r = v + # using wigner-sietz notation + r = np.hstack((r, 1.)) + + asym_pos = np.broadcast_to(r[0:3], [1, 3]) + + for symmat in self.SYM_SG: + # get new position + rnew = np.dot(symmat, r) + rr = rnew[0:3] + + if reduceToUC: + # reduce to fundamental unitcell with fractional + # coordinates between 0-1 + rr = np.modf(rr)[0] + rr[rr < 0.] += 1. + rr[np.abs(rr) < 1.0E-6] = 0. + + # check if this is new + isnew = True + for j in range(n): + v = rr - asym_pos[j] + dist = self.CalcLength(v, 'd') + if dist < 1E-3: + isnew = False + break + + # if its new add this to the list + if isnew: + asym_pos = np.vstack((asym_pos, rr)) + n += 1 + + numat = n + + return asym_pos, numat + + def CalcStar(self, v, space, applyLaue=False): + ''' + this function calculates the symmetrically equivalent hkls (or uvws) + for the reciprocal (or direct) point group symmetry. + ''' + if space == 'd': + mat = self.dmt.astype(np.float64) + if applyLaue: + sym = self.SYM_PG_d_laue.astype(np.float64) + else: + sym = self.SYM_PG_d.astype(np.float64) + elif space == 'r': + mat = self.rmt.astype(np.float64) + if applyLaue: + sym = self.SYM_PG_r_laue.astype(np.float64) + else: + sym = self.SYM_PG_r.astype(np.float64) + elif space == 'c': + mat = np.eye(3) + if applyLaue: + sym = self.SYM_PG_c_laue.astype(np.float64) + else: + sym = self.SYM_PG_c.astype(np.float64) + else: + raise ValueError('CalcStar: unrecognized space.') + + vv = np.array(v).astype(np.float64) + return _calcstar(vv, sym, mat) + + def CalcPositions(self): + ''' + calculate the asymmetric positions in the fundamental unitcell + used for structure factor calculations + ''' + numat = [] + asym_pos = [] + + for i in range(self.atom_ntype): + + v = self.atom_pos[i, 0:3] + apos, n = self.CalcOrbit(v) + + asym_pos.append(apos) + numat.append(n) + + self.numat = np.array(numat) + self.asym_pos = asym_pos + + def remove_duplicate_atoms(self, + atom_pos=None, + tol=1e-3): + """ + @date 03/04/2021 SS 1.0 original + + @details it was requested that a functionality be + added which can remove duplicate atoms from the + atom_pos field such that no two atoms are closer that + the distance specified by "tol" (lets assume its in A) + steps involved are as follows: + 1. get the star (or orbit) oe each point in atom_pos + 2. if any points in the orbits are within tol, then + remove the second point (the first point will be + preserved by convention) + 3. update the densities, interptables for structure factors + etc. + + @params tol tolerance of distance between points specified + in A + """ + + if atom_pos is None: + atom_pos = self.atom_pos + + atom_pos_fixed = [] + idx = [] + """ + go through the atom_pos and remove the atoms that are duplicate + """ + for i in range(atom_pos.shape[0]): + pos = atom_pos[i, 0:3] + occ = atom_pos[i, 3] + v1, n1 = self.CalcOrbit(pos) + if i == 0: + atom_pos_fixed.append(np.hstack([pos, occ])) + idx.append(i) + else: + isclose = False + for j, uniqpos in enumerate(atom_pos_fixed): + pos2 = uniqpos[0:3] + occ2 = uniqpos[3] + # cases with fractional occupancy on same site + if (np.all(np.isclose(pos, pos2)) and + (occ+occ2 <= 1.)): + atom_pos_fixed.append(np.hstack([pos, occ])) + idx.append(i) + isclose = True + break + else: + v2, n2 = self.CalcOrbit(pos2) + for v in v2: + vv = np.tile(v, [v1.shape[0], 1]) + vv = vv - v1 + + for vvv in vv: + # check if distance less than tol + # the factor of 10 is for A --> nm + if self.CalcLength(vvv, 'd') < tol/10.: + # if true then its a repeated atom + isclose = True + break + + if isclose: + break + if isclose: + break + if not isclose: + atom_pos_fixed.append(np.hstack([pos, occ])) + idx.append(i) + + idx = np.array(idx) + atom_pos_fixed = np.array(atom_pos_fixed) + atom_type = self.atom_type[idx] + chargestates = [self.chargestates[i] for i in idx] + + if self.aniU: + U = self.U[idx, :] + else: + U = self.U[idx] + + self.atom_type = atom_type + self.chargestates = chargestates + self.atom_pos = atom_pos_fixed + + self.U = U + ''' + initialize interpolation from table for anomalous scattering + ''' + self.InitializeInterpTable() + self.CalcPositions() + self.CalcDensity() + self.calc_absorption_length() + + def CalcDensity(self): + ''' + calculate density, average atomic weight (avA) + and average atomic number(avZ) + ''' + self.avA = 0.0 + self.avZ = 0.0 + + for i in range(self.atom_ntype): + ''' + atype is atom type i.e. atomic number + numat is the number of atoms of atype + atom_pos(i,3) has the occupation factor + ''' + atype = self.atom_type[i] + numat = self.numat[i] + occ = self.atom_pos[i, 3] + + # -1 due to 0 indexing in python + self.avA += numat * constants.atom_weights[atype-1] * occ + + self.avZ += numat * atype + + self.density = self.avA / (self.vol * 1.0E-21 * constants.cAvogadro) + + av_natom = np.dot(self.numat, self.atom_pos[:, 3]) + + self.avA /= av_natom + self.avZ /= np.sum(self.numat) + + ''' calculate the maximum index of diffraction vector along + each of the three reciprocal + basis vectors ''' + + def init_max_g_index(self): + """ + added 03/17/2021 SS + """ + self.ih = 1 + self.ik = 1 + self.il = 1 + + def CalcMaxGIndex(self): + self.init_max_g_index() + + while (1.0 / self.CalcLength( + np.array([self.ih, 0, 0], + dtype=np.float64), 'r') > self.dmin): + self.ih = self.ih + 1 + + while (1.0 / self.CalcLength( + np.array([0, self.ik, 0], + dtype=np.float64), 'r') > self.dmin): + self.ik = self.ik + 1 + + while (1.0 / self.CalcLength( + np.array([0, 0, self.il], + dtype=np.float64), 'r') > self.dmin): + self.il = self.il + 1 + + def InitializeInterpTable(self): + + f_anomalous_data = [] + self.pe_cs = {} + data = ( + importlib.resources.files(hexrd.resources) + .joinpath('Anomalous.h5') + .open('rb') + ) + with h5py.File(data, 'r') as fid: + for i in range(0, self.atom_ntype): + + Z = self.atom_type[i] + elem = constants.ptableinverse[Z] + + if Z <= 92: + gid = fid.get('/'+elem) + data = np.array(gid.get('data')) + self.pe_cs[elem] = interp1d(data[:, WAV_ID], + data[:, MU_ID]+data[:,COH_INCOH_ID]) + data = data[:, [WAV_ID, REAL_F1_ID, IMAG_F2_ID]] + f_anomalous_data.append(data) + else: + wav = np.linspace(1.16E2, 2.86399992e-03, 189) + zs = np.ones_like(wav)*Z + zrs = np.zeros_like(wav) + data_zs = np.vstack((wav, zs, zrs)).T + self.pe_cs[elem] = interp1d(wav, zrs) + f_anomalous_data.append(data_zs) + + n = max([x.shape[0] for x in f_anomalous_data]) + self.f_anomalous_data = np.zeros([self.atom_ntype, n, 3]) + self.f_anomalous_data_sizes = np.zeros( + [self.atom_ntype, ], dtype=np.int32) + + for i in range(self.atom_ntype): + nd = f_anomalous_data[i].shape[0] + self.f_anomalous_data_sizes[i] = nd + self.f_anomalous_data[i, :nd, :] = f_anomalous_data[i] + + def CalcXRSF(self, hkl): + from hexrd.wppf.xtal import _calcxrsf + ''' + the 1E-2 is to convert to A^-2 + since the fitting is done in those units + ''' + fNT = np.zeros([self.atom_ntype, ]) + frel = np.zeros([self.atom_ntype, ]) + scatfac = np.zeros([self.atom_ntype, 11]) + f_anomalous_data = self.f_anomalous_data + + hkl2d = np.atleast_2d(hkl).astype(np.float64) + nref = hkl2d.shape[0] + + multiplicity = np.ones([nref, ]) + w_int = 1.0 + + occ = self.atom_pos[:, 3] + aniU = self.aniU + if aniU: + betaij = self.betaij + else: + betaij = self.U + + self.asym_pos_arr = np.zeros([self.numat.max(), self.atom_ntype, 3]) + for i in range(0, self.atom_ntype): + nn = self.numat[i] + self.asym_pos_arr[:nn, i, :] = self.asym_pos[i] + + self.numat = np.zeros(self.atom_ntype, dtype=np.int32) + for i in range(0, self.atom_ntype): + self.numat[i] = self.asym_pos[i].shape[0] + Z = self.atom_type[i] + elem = constants.ptableinverse[Z] + scatfac[i, :] = constants.scatfac[elem] + if Z <= 92: + frel[i] = constants.frel[elem] + fNT[i] = constants.fNT[elem] + + sf, sf_raw = _calcxrsf(hkl2d, + nref, + multiplicity, + w_int, + self.wavelength, + self.rmt.astype(np.float64), + self.atom_type, + self.atom_ntype, + betaij, + occ, + self.asym_pos_arr, + self.numat, + scatfac, + fNT, + frel, + f_anomalous_data, + self.f_anomalous_data_sizes) + + return sf_raw + + """ + molecular mass calculates the molar weight of the unit cell + since the unitcell can have multiple formular units, this + might be greater than the molecular weight + """ + + def calc_unitcell_mass(self): + a_mass = constants.atom_weights[self.atom_type-1] + return np.sum(a_mass*self.numat) + + """ + calculate the number density in 1/micron^3 + number density = density * Avogadro / unitcell mass + the 1e-12 factor converts from 1/cm^3 to 1/micron^3 + """ + + def calc_number_density(self): + M = self.calc_unitcell_mass() + Na = constants.cAvogadro + + return 1e-12 * self.density * Na / M + + def calc_absorption_cross_sec(self): + + abs_cs_total = 0. + for i in range(self.atom_ntype): + Z = self.atom_type[i] + elem = constants.ptableinverse[Z] + abs_cs_total += self.pe_cs[elem](self.wavelength) *\ + self.numat[i]/np.sum(self.numat) + return abs_cs_total + + """ + calculate the absorption coefficient which is + calculated using the sum of photoeffect, compton and + rayleigh cross ections. the pair and triplet production + cross sections etc are not applicable in the energy range + of interest and therefore neglected. + + attenuation coeff = sigma_total * density + + attenuation_length = 1/attenuation_coeff + + NOTE: units will be microns!! + + """ + + def calc_absorption_length(self): + # re = 2.8179403e-9 # in microns + # N = self.calc_number_density() + abs_cs_total = self.calc_absorption_cross_sec() + + # the 1e4 factor converts wavelength from cm -> micron + self.absorption_length = 1e4/(abs_cs_total*self.density) + + """ + calculate bragg angle for a reflection. returns Nan if + the reflections is not possible for the voltage/wavelength + """ + + def CalcBraggAngle(self, hkl): + glen = self.CalcLength(hkl, 'r') + sth = self.wavelength * glen * 0.5 + return np.arcsin(sth) + + def ChooseSymmetric(self, hkllist, InversionSymmetry=True): + ''' + this function takes a list of hkl vectors and + picks out a subset of the list picking only one + of the symmetrically equivalent one. The convention + is to choose the hkl with the most positive components. + ''' + mask = np.ones(hkllist.shape[0], dtype=bool) + laue = InversionSymmetry + + for i, g in enumerate(hkllist): + if mask[i]: + + geqv = self.CalcStar(g, 'r', applyLaue=laue) + + for r in geqv[1:, ]: + rid = np.where(np.all(r == hkllist, axis=1)) + mask[rid] = False + + hkl = hkllist[mask, :].astype(np.int32) + + hkl_max = [] + + for g in hkl: + geqv = self.CalcStar(g, 'r', applyLaue=laue) + loc = np.argmax(np.sum(geqv, axis=1)) + gmax = geqv[loc, :] + hkl_max.append(gmax) + + return np.array(hkl_max).astype(np.int32) + + def SortHKL(self, hkllist): + ''' + this function sorts the hkllist by increasing |g| + i.e. decreasing d-spacing. If two vectors are same + length, then they are ordered with increasing + priority to l, k and h + ''' + glen = [] + for g in hkllist: + glen.append(np.round(self.CalcLength(g, 'r'), 8)) + + # glen = np.atleast_2d(np.array(glen,dtype=float)).T + dtype = [('glen', float), ('max', int), ('sum', int), + ('h', int), ('k', int), ('l', int)] + + a = [] + for i, gl in enumerate(glen): + g = hkllist[i, :] + a.append((gl, np.max(g), np.sum(g), g[0], g[1], g[2])) + a = np.array(a, dtype=dtype) + + isort = np.argsort(a, order=['glen', 'max', 'sum', 'l', 'k', 'h']) + return hkllist[isort, :] + + def getHKLs(self, dmin): + ''' + this function generates the symetrically unique set of + hkls up to a given dmin. + dmin is in nm + ''' + ''' + always have the centrosymmetric condition because of + Friedels law for xrays so only 4 of the 8 octants + are sampled for unique hkls. By convention we will + ignore all l < 0 + ''' + + hmin = -self.ih-1 + hmax = self.ih + kmin = -self.ik-1 + kmax = self.ik + lmin = -1 + lmax = self.il + + hkllist = np.array([[ih, ik, il] for ih in np.arange(hmax, hmin, -1) + for ik in np.arange(kmax, kmin, -1) + for il in np.arange(lmax, lmin, -1)]) + + hkl_allowed = spacegroup.Allowed_HKLs(self.sgnum, hkllist) + + hkl = [] + dsp = [] + + hkl_dsp = [] + + for g in hkl_allowed: + + # ignore [0 0 0] as it is the direct beam + if np.sum(np.abs(g)) != 0: + + dspace = 1./self.CalcLength(g, 'r') + + if dspace >= dmin: + hkl_dsp.append(g) + + ''' + we now have a list of g vectors which are all within dmin range + plus the systematic absences due to lattice centering and glide + planes/screw axis has been taken care of + + the next order of business is to go through the list and only pick + out one of the symetrically equivalent hkls from the list. + ''' + hkl_dsp = np.array(hkl_dsp).astype(np.int32) + ''' + the inversionsymmetry switch enforces the application of the inversion + symmetry regradless of whether the crystal has the symmetry or not + this is necessary in the case of xrays due to friedel's law + ''' + hkl = self.ChooseSymmetric(hkl_dsp, InversionSymmetry=True) + + ''' + finally sort in order of decreasing dspacing + ''' + self.hkls = self.SortHKL(hkl) + + return self.hkls + ''' + set some properties for the unitcell class. only the lattice + parameters, space group and asymmetric positions can change, + but all the dependent parameters will be automatically updated + ''' + + def Required_lp(self, p): + return _rqpDict[self.latticeType][1](p) + + def Required_C(self, C): + return np.array([C[x] for x in _StiffnessDict[self._laueGroup][0]]) + + def MakeStiffnessMatrix(self, inp_Cvals): + if len(inp_Cvals) != len(_StiffnessDict[self._laueGroup][0]): + x = len(_StiffnessDict[self._laueGroup][0]) + msg = (f"number of constants entered is not correct." + f" need a total of {x} independent constants.") + raise IOError(msg) + + # initialize all zeros and fill the supplied values + C = np.zeros([6, 6]) + for i, x in enumerate(_StiffnessDict[self._laueGroup][0]): + + C[x] = inp_Cvals[i] + + # enforce the equality constraints + C = _StiffnessDict[self._laueGroup][1](C) + + # finally fill the lower triangular matrix + for i in range(6): + for j in range(i): + C[i, j] = C[j, i] + + self.stiffness = C + self.compliance = np.linalg.inv(C) + + def inside_spheretriangle(self, conn, dir3, hemisphere, switch): + ''' + check if direction is inside a spherical triangle + the logic used as follows: + if determinant of [A B x], [A x C] and [x B C] are + all same sign, then the sphere is inside the traingle + formed by A, B and C + + returns a mask with inside as True and outside as False + + 11/23/2020 SS switch is now a string specifying which + symmetry group to use for reducing directions + 11/23/2020 SS catching cases when vertices are empty + ''' + + ''' + first get vertices of the triangles in the + ''' + vertex = self.sphere_sector.vertices[switch] + # if switch == 'pg': + # vertex = self.sphere_sector.vertices + + # elif switch == 'laue': + # vertex = self.sphere_sector.vertices_laue + + # elif switch == 'super': + # vertex = self.sphere_sector.vertices_supergroup + + # elif switch == 'superlaue': + # vertex = self.sphere_sector.vertices_supergroup_laue + + A = np.atleast_2d(vertex[:, conn[0]]).T + B = np.atleast_2d(vertex[:, conn[1]]).T + C = np.atleast_2d(vertex[:, conn[2]]).T + + mask = [] + for x in dir3: + + x2 = np.atleast_2d(x).T + d1 = np.linalg.det(np.hstack((A, B, x2))) + d2 = np.linalg.det(np.hstack((A, x2, C))) + d3 = np.linalg.det(np.hstack((x2, B, C))) + ''' + catching cases very close to FZ boundary when the + determinant can be very small positive or negative + number + ''' + if np.abs(d1) < eps: + d1 = 0. + if np.abs(d2) < eps: + d2 = 0. + if np.abs(d3) < eps: + d3 = 0. + + ss = np.unique(np.sign([d1, d2, d3])) + if hemisphere == 'upper': + if np.all(ss >= 0.): + mask.append(True) + else: + mask.append(False) + + elif hemisphere == 'both': + if len(ss) == 1: + mask.append(True) + elif len(ss) == 2: + if 0 in ss: + mask.append(True) + else: + mask.append(False) + elif len(ss) == 3: + mask.append(False) + + mask = np.array(mask) + return mask + + ''' + @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + + @date 10/28/2020 SS 1.0 original + 11/23/2020 SS 1.1 the laueswitch has been changed from a boolean + variable to a string input with threee possible values + @params dir3 : n x 3 array of directions to reduce + switch switch to decide which symmetry group to use. one of four: + (a) 'pg' use the cartesian point group symmetry + (b) 'laue' use the laue symmetry + (c) 'super' use the supergroup symmetry used in coloring + (d) 'superlaue' use the supergroup of the laue group + + @detail this subroutine takes a direction vector and uses the point group + symmetry of the unitcell to reduce it to the fundamental stereographic + triangle for that point group. this function is used in generating the IPF + color legend for orientations. for now we are assuming dir3 is a nx3 array + of directions. + ''' + + def reduce_dirvector(self, dir3, switch='pg'): + ''' + check if the dimensions of the dir3 array is to spec + ''' + idx = np.arange(dir3.shape[0], dtype=np.int32) + dir3 = np.ascontiguousarray(np.atleast_2d(dir3)) + if dir3.ndim != 2: + raise RuntimeError("reduce_dirvector: invalid shape of dir3 array") + + ''' + check if the direction vector is a unit vector or not. + if it is not normalize it to get a unit vector. the dir vector + is in the sample frame, so by default it is assumed to be in a + orthonormal cartesian frame. this defines the normalization as + just division by the L2 norm + ''' + eps = constants.sqrt_epsf + + if np.all(np.abs(np.linalg.norm(dir3, axis=1) - 1.0) < eps): + dir3n = dir3 + else: + if np.all(np.linalg.norm(dir3) > eps): + dir3n = dir3/np.tile(np.linalg.norm(dir3, axis=1), [3, 1]).T + else: + raise RuntimeError( + "atleast one of the input direction seems \ + to be a null vector") + + ''' + we need both the symmetry reductions for the point group and laue group + this will be used later on in the coloring routines to determine if the + points needs to be moved to the southern hemisphere or not + ''' + dir3_copy = np.copy(dir3n) + dir3_reduced = np.array([]) + idx_copy = np.copy(idx) + idx_red = np.array([], dtype=np.int32) + ''' + laue switch is used to determine which set of symmetry operations to + loop over + ''' + hemisphere = self.sphere_sector.hemisphere[switch] + ntriangle = self.sphere_sector.ntriangle[switch] + connectivity = self.sphere_sector.connectivity[switch] + + if switch == 'pg': + sym = self.SYM_PG_c + + elif switch == 'super': + sym = self.SYM_PG_supergroup + + elif switch == 'laue': + sym = self.SYM_PG_c_laue + + elif switch == 'superlaue': + sym = self.SYM_PG_supergroup_laue + + for sop in sym: + + if dir3_copy.size != 0: + + dir3_sym = np.dot(sop, dir3_copy.T).T + + mask = np.zeros(dir3_sym.shape[0]).astype(bool) + + if ntriangle == 0: + if hemisphere == 'both': + mask = np.ones(dir3_sym.shape[0], dtype=bool) + elif hemisphere == 'upper': + mask = dir3_sym[:, 2] >= 0. + else: + for ii in range(ntriangle): + tmpmask = self.inside_spheretriangle( + connectivity[:, ii], dir3_sym, + hemisphere, switch) + mask = np.logical_or(mask, tmpmask) + + if np.sum(mask) > 0: + if dir3_reduced.size != 0: + dir3_reduced = np.vstack( + (dir3_reduced, dir3_sym[mask, :])) + idx_red = np.hstack((idx_red, idx[mask])) + else: + dir3_reduced = np.copy(dir3_sym[mask, :]) + idx_red = np.copy(idx[mask]) + + dir3_copy = dir3_copy[np.logical_not(mask), :] + idx = idx[np.logical_not(mask)] + else: + break + dir3_r = np.zeros(dir3_reduced.shape) + dir3_r[idx_red, :] = dir3_reduced + + return dir3_r + + def color_directions(self, dir3, laueswitch): + ''' + @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + @DATE 11/12/2020 SS 1.0 original + @PARAM dir3 is crystal direction obtained by multiplying inverse of + crystal orientation with reference direction + laueswitch perform reducion based on lauegroup or the point group + + @DETAIL this is the routine which makes the calls to sphere_sector + class which correctly color the orientations for this crystal class. the + logic is as follows: + + 1. reduce direction to fundamental zone of point group + 2. reduce to fundamental zone of super group + 3. If both are same, then color (hsl) assigned by polar and azimuth + 4. If different, then barycenter lightness is replaced by 1-L (equivalent to + replaceing barycenter to pi-theta) + ''' + + if laueswitch: + ''' + this is the case where we color orientations based on the laue group + of the crystal. this is always going to be the case with x-ray which + introduces inversion symmetry. For other probes, this is not the case. + ''' + dir3_red = self.reduce_dirvector(dir3, switch='laue') + dir3_red_supergroup = self.reduce_dirvector( + dir3, switch='superlaue') + switch = 'superlaue' + + else: + ''' + follow the logic in the function description + ''' + dir3_red = self.reduce_dirvector(dir3, switch='pg') + dir3_red_supergroup = self.reduce_dirvector(dir3, switch='super') + switch = 'super' + + mask = np.linalg.norm(dir3_red - dir3_red_supergroup, axis=1) < eps + hsl = self.sphere_sector.get_color(dir3_red_supergroup, mask, switch) + + rgb = colorspace.hsl2rgb(hsl) + return rgb + + def color_orientations(self, + rmats, + ref_dir=np.array([0., 0., 1.]), + laueswitch=True): + ''' + @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + @DATE 11/12/2020 SS 1.0 original + @PARAM rmats rotation matrices of size nx3x3 + ref_dir reference direction of the sample frame along which all crystal + directions are colored + laueswitch should we use laue group for coloring or not + @DETAIL this is a simple routine which takes orientations as rotations matrices + and a reference sample direction ([0 0 1] by default) and returns the directions in + the crystal reference frame. Note that the crystal orientations is defined as the the + orientation which takes the """SAMPLE""" reference frame TO the """CRYSTAL""" frame. + Since we are computing the conversion from crystal to sample, we will need to INVERT + these matrices. Thanksfully, this is just a transpose + + ''' + + ''' + first make sure that the rotation matric is size nx3x3 + ''' + if rmats.ndim == 2: + rmats = np.atleast_3d(rmats).T + else: + assert rmats.ndim == 3, "rotations matrices need to \ + be nx3x3. Please check size." + + ''' + obtain the direction vectors by simple matrix multiplication of transpose + of rotation matrix with the reference direction + ''' + dir3 = [] + for r in rmats: + dir3.append(np.dot(r.T, ref_dir)) + + dir3 = np.array(dir3) + ''' + finally get the rgb colors + ''' + rgb = self.color_directions(dir3, laueswitch) + return rgb + + def is_editable(self, lp_name): + """ + @author Saransh Singh, Lawrence Livermore National Lab + @date 03/17/2021 SS 1.0 original + @details check if a certain field in the lattice parameter + is editable. this depends on the space group number or the + lattice class + """ + + _lpnamelist = list(_lpname) + index = _lpnamelist.index(lp_name) + editable_fields = _rqpDict[self.latticeType][0] + return index in editable_fields + + def convert_lp_to_valunits(self, lp): + """ + added 03/17/2021 SS + """ + lp_valunit = [] + for i in range(6): + if i < 3: + lp_valunit.append( + valWUnit('lp', 'length', lp[i], 'nm')) + + else: + lp_valunit.append( + valWUnit('lp', 'angle', lp[i], 'degrees')) + + return lp_valunit + + def fill_correct_lp_vals(self, lp, val, lp_name): + """ + added 03/17/2021 SS + """ + index = list(_lpname).index(lp_name) + lp[index] = val + lp_red = [lp[i] for i in + _rqpDict[self.latticeType][0]] + lp = _rqpDict[self.latticeType][1](lp_red) + lp_valunit = self.convert_lp_to_valunits(lp) + return lp_valunit + + @property + def compliance(self): + # Compliance in TPa⁻¹. Stiffness is in GPa. + if not hasattr(self, 'stiffness'): + raise AttributeError('Stiffness not set on unit cell') + + return np.linalg.inv(self.stiffness / 1.e3) + + @compliance.setter + def compliance(self, v): + # Compliance in TPa⁻¹. Stiffness is in GPa. + self.stiffness = np.linalg.inv(v) * 1.e3 + + # lattice constants as properties + + @property + def lparms(self): + return [self.a, self.b, + self.c, self.alpha, self.beta, + self.gamma] + + @lparms.setter + def lparms(self, lp): + """ + set the lattice parameters here + """ + self._a = lp[0].getVal("nm") + self._b = lp[1].getVal("nm") + self._c = lp[2].getVal("nm") + self._alpha = lp[3].getVal("degrees") + self._beta = lp[4].getVal("degrees") + self._gamma = lp[5].getVal("degrees") + self.calcmatrices() + self.init_max_g_index() + self.CalcMaxGIndex() + if hasattr(self, 'numat'): + self.CalcDensity() + + @property + def lparms_reduced(self): + lp = self.lparms + lp_red = [lp[i] for i in + _rqpDict[self.latticeType][0]] + return lp_red + + @property + def a(self): + return self._a + + @a.setter + def a(self, val): + if self.is_editable("a"): + lp = self.lparms + lp_valunit = self.fill_correct_lp_vals( + lp, val, "a") + self.lparms = lp_valunit + else: + msg = (f"not an editable field" + f" for this space group") + raise RuntimeError(msg) + + @property + def b(self): + return self._b + + @b.setter + def b(self, val): + if self.is_editable("b"): + lp = self.lparms + lp_valunit = self.fill_correct_lp_vals( + lp, val, "b") + self.lparms = lp_valunit + else: + msg = (f"not an editable field" + f" for this space group") + raise RuntimeError(msg) + + @property + def c(self): + return self._c + + @c.setter + def c(self, val): + if self.is_editable("c"): + lp = self.lparms + lp_valunit = self.fill_correct_lp_vals( + lp, val, "c") + self.lparms = lp_valunit + else: + msg = (f"not an editable field" + f" for this space group") + raise RuntimeError(msg) + + @property + def alpha(self): + return self._alpha + + @alpha.setter + def alpha(self, val): + if self.is_editable("alpha"): + lp = self.lparms + lp_valunit = self.fill_correct_lp_vals( + lp, val, "alpha") + self.lparms = lp_valunit + else: + msg = (f"not an editable field" + f" for this space group") + raise RuntimeError(msg) + + @property + def beta(self): + return self._beta + + @beta.setter + def beta(self, val): + if self.is_editable("beta"): + lp = self.lparms + lp_valunit = self.fill_correct_lp_vals( + lp, val, "beta") + self.lparms = lp_valunit + else: + msg = (f"not an editable field" + f" for this space group") + raise RuntimeError(msg) + + @property + def gamma(self): + return self._gamma + + @gamma.setter + def gamma(self, val): + if self.is_editable("gamma"): + lp = self.lparms + lp_valunit = self.fill_correct_lp_vals( + lp, val, "gamma") + self.lparms = lp_valunit + else: + msg = (f"not an editable field" + f" for this space group") + raise RuntimeError(msg) + + @property + def dmin(self): + return self._dmin + + @dmin.setter + def dmin(self, v): + if self._dmin == v: + return + self._dmin = v + # Update the Max G Index + self.CalcMaxGIndex() + + @property + def U(self): + return self._U + + @U.setter + def U(self, Uarr): + self._U = Uarr + self.aniU = False + if Uarr.ndim > 1: + self.aniU = True + self.calcBetaij() + + @property + def voltage(self): + return self._voltage + + @voltage.setter + def voltage(self, v): + self._voltage = v + self.CalcWavelength() + + @property + def wavelength(self): + return self._mlambda + + @wavelength.setter + def wavelength(self, mlambda): + self._mlambda = mlambda + + # space group number + @property + def sgnum(self): + return self._sym_sgnum + + @sgnum.setter + def sgnum(self, val): + if not(isinstance(val, int)): + raise ValueError('space group should be integer') + if not((val >= 1) and (val <= 230)): + raise ValueError('space group number should be between 1 and 230.') + + self._sym_sgnum = val + self.sg_hmsymbol = symbols.pstr_spacegroup[val-1].strip() + + self.SYM_SG, self.SYM_PG_d, self.SYM_PG_d_laue, \ + self.centrosymmetric, self.symmorphic = \ + symmetry.GenerateSGSym(self.sgnum, self.sgsetting) + + self.latticeType = symmetry.latticeType(self.sgnum) + + self.nsgsym = self.SYM_SG.shape[0] + self.npgsym = self.SYM_PG_d.shape[0] + + self.GenerateRecipPGSym() + + ''' + asymmetric positions due to space group symmetry + used for structure factor calculations + ''' + self.CalcPositions() + self.GetPgLg() + + ''' + SS 11/10/2020 added cartesian PG sym for reducing directions + to standard stereographic triangle + ''' + self.GenerateCartesianPGSym() + + ''' + SS 11/11/2020 adding the sphere_sector class initialization here + ''' + self.sphere_sector = sphere_sector.sector(self._pointGroup, + self._laueGroup, + self._supergroup, + self._supergroup_laue) + self.CalcDensity() + self.calc_absorption_length() + + @property + def pgnum(self): + return constants.SYM_PG_to_PGNUM[self.point_group] + + @property + def point_group(self): + return self._pointGroup + + @property + def atom_pos(self): + return self._atom_pos + + @atom_pos.setter + def atom_pos(self, val): + """ + SS 03/08/2021 fixing some issues with + updating asymmetric positions after + updating atominfo + fixing + """ + if hasattr(self, 'atom_type'): + if self.atom_ntype != val.shape[0]: + msg = (f"incorrect number of atom positions." + f" number of atom type = {self.atom_ntype} " + f" and number of" + f" atom positions = {val.shape[0]}.") + raise ValueError(msg) + + self._atom_pos = val + """ + update only if its not the first time + """ + if hasattr(self, 'asym_pos'): + self.CalcPositions() + + if hasattr(self, 'density'): + self.CalcDensity() + self.calc_absorption_length() + + @property + def atom_ntype(self): + return self.atom_type.shape[0] + + # asymmetric positions in unit cell + @property + def asym_pos(self): + return self._asym_pos + + @asym_pos.setter + def asym_pos(self, val): + assert(type(val) == list),\ + 'input type to asymmetric positions should be list' + self._asym_pos = val + + @property + def numat(self): + return self._numat + + @numat.setter + def numat(self, val): + assert(val.shape[0] == + self.atom_ntype), 'shape of numat is not consistent' + self._numat = val + + # direct metric tensor is read only + @property + def dmt(self): + return self._dmt + + # reciprocal metric tensor is read only + @property + def rmt(self): + return self._rmt + + # direct structure matrix is read only + @property + def dsm(self): + return self._dsm + + # reciprocal structure matrix is read only + @property + def rsm(self): + return self._rsm + + @property + def num_atom(self): + return np.sum(self.numat) + + @property + def vol(self): + return self._vol + + @property + def vol_per_atom(self): + # vol per atom in A^3 + return 1e3*self.vol/self.num_atom + + +_rqpDict = { + 'triclinic': (tuple(range(6)), lambda p: p), # all 6 + # note beta + 'monoclinic': ((0, 1, 2, 4), lambda p: (p[0], p[1], p[2], 90, p[3], 90)), + 'orthorhombic': ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), + 'tetragonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), + 'trigonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + 'hexagonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + 'cubic': ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), +} + +_lpname = np.array(['a', 'b', 'c', 'alpha', 'beta', 'gamma']) + +laue_1 = 'ci' +laue_2 = 'c2h' +laue_3 = 'd2h' +laue_4 = 'c4h' +laue_5 = 'd4h' +laue_6 = 's6' +laue_7 = 'd3d' +laue_8 = 'c6h' +laue_9 = 'd6h' +laue_10 = 'th' +laue_11 = 'oh' + + +''' +these supergroups are the three exceptions to the coloring scheme +the point groups are not topological and can't have no discontinuities +in the IPF coloring scheme. they are -1, -3 and -4 point groups. +''' +supergroup_00 = 'c1' +supergroup_01 = 'c4' +supergroup_02 = 'c3' + +supergroup_1 = 'cs' +supergroup_2 = 'c2v' +supergroup_3 = 'd2h' +supergroup_4 = 'c4v' +supergroup_5 = 'd4h' +supergroup_6 = 'c3v' +supergroup_7 = 'c6v' +supergroup_8 = 'd3h' +supergroup_9 = 'd6h' +supergroup_10 = 'td' +supergroup_11 = 'oh' + + +def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range + +''' +11/20/2020 SS added supergroup to the list which is used +for coloring the fundamental zone IPF +''' +_pgDict = { + _sgrange(1, 1): ('c1', laue_1, + supergroup_1, supergroup_00), # Triclinic + _sgrange(2, 2): ('ci', laue_1, \ + supergroup_00, supergroup_00), # laue 1 + _sgrange(3, 5): ('c2', laue_2, \ + supergroup_2, supergroup_3), # Monoclinic + _sgrange(6, 9): ('cs', laue_2, \ + supergroup_1, supergroup_3), + _sgrange(10, 15): ('c2h', laue_2, \ + supergroup_3, supergroup_3), # laue 2 + _sgrange(16, 24): ('d2', laue_3, \ + supergroup_3, supergroup_3), # Orthorhombic + _sgrange(25, 46): ('c2v', laue_3, \ + supergroup_2, supergroup_3), + _sgrange(47, 74): ('d2h', laue_3, \ + supergroup_3, supergroup_3), # laue 3 + _sgrange(75, 80): ('c4', laue_4, \ + supergroup_4, supergroup_5), # Tetragonal + _sgrange(81, 82): ('s4', laue_4, \ + supergroup_01, supergroup_5), + _sgrange(83, 88): ('c4h', laue_4, \ + supergroup_5, supergroup_5), # laue 4 + _sgrange(89, 98): ('d4', laue_5, \ + supergroup_5, supergroup_5), + _sgrange(99, 110): ('c4v', laue_5, \ + supergroup_4, supergroup_5), + _sgrange(111, 122): ('d2d', laue_5, \ + supergroup_5, supergroup_5), + _sgrange(123, 142): ('d4h', laue_5, \ + supergroup_5, supergroup_5), # laue 5 + # Trigonal # laue 6 [also c3i] + _sgrange(143, 146): ('c3', laue_6, \ + supergroup_6, supergroup_02), + _sgrange(147, 148): ('s6', laue_6, \ + supergroup_02, supergroup_02), + _sgrange(149, 155): ('d3', laue_7, \ + supergroup_7, supergroup_9), + _sgrange(156, 161): ('c3v', laue_7, \ + supergroup_6, supergroup_9), + _sgrange(162, 167): ('d3d', laue_7, \ + supergroup_9, supergroup_9), # laue 7 + _sgrange(168, 173): ('c6', laue_8, \ + supergroup_7, supergroup_9), # Hexagonal + _sgrange(174, 174): ('c3h', laue_8, \ + supergroup_7, supergroup_9), + _sgrange(175, 176): ('c6h', laue_8, \ + supergroup_9, supergroup_9), # laue 8 + _sgrange(177, 182): ('d6', laue_9, \ + supergroup_9, supergroup_9), + _sgrange(183, 186): ('c6v', laue_9, \ + supergroup_7, supergroup_9), + _sgrange(187, 190): ('d3h', laue_9, \ + supergroup_9, supergroup_9), + _sgrange(191, 194): ('d6h', laue_9, \ + supergroup_9, supergroup_9), # laue 9 + _sgrange(195, 199): ('t', laue_10, \ + supergroup_10, supergroup_11), # Cubic + _sgrange(200, 206): ('th', laue_10, \ + supergroup_11, supergroup_11), # laue 10 + _sgrange(207, 214): ('o', laue_11, \ + supergroup_11, supergroup_11), + _sgrange(215, 220): ('td', laue_11, \ + supergroup_10, supergroup_11), + _sgrange(221, 230): ('oh', laue_11, \ + supergroup_11, supergroup_11) # laue 11 +} + +''' +this dictionary has the mapping from laue group to number of elastic +constants needed in the voight 6x6 stiffness matrix. the compliance +matrix is just the inverse of the stiffness matrix +taken from International Tables for Crystallography Volume H +Powder diffraction +Edited by C. J. Gilmore, J. A. Kaduk and H. Schenk +''' +# independent components for the triclinic laue group +type1 = [] +for i in range(6): + for j in range(i, 6): + type1.append((i, j)) +type1 = tuple(type1) + +# independent components for the monoclinic laue group +# C14 = C15 = C24 = C25 = C34 = C35 = C46 = C56 = 0 +type2 = list(type1) +type2.remove((0, 3)) +type2.remove((0, 4)) +type2.remove((1, 3)) +type2.remove((1, 4)) +type2.remove((2, 3)) +type2.remove((2, 4)) +type2.remove((3, 5)) +type2.remove((4, 5)) +type2 = tuple(type2) + +# independent components for the orthorhombic laue group +# Above, plus C16 = C26 = C36 = C45 = 0 +type3 = list(type2) +type3.remove((0, 5)) +type3.remove((1, 5)) +type3.remove((2, 5)) +type3.remove((3, 4)) +type3 = tuple(type3) + +# independent components for the cyclic tetragonal laue group +# monoclinic, plus C36 = C45 = 0, C22 = C11, C23 = C13, C26 = −C16, C55 = C44 +type4 = list(type2) +type4.remove((2, 5)) +type4.remove((3, 4)) +type4.remove((1, 1)) +type4.remove((1, 2)) +type4.remove((1, 5)) +type4.remove((4, 4)) +type4 = tuple(type4) + +# independent components for the dihedral tetragonal laue group +# Above, plus C16 = 0 +type5 = list(type4) +type5.remove((0, 5)) +type5 = tuple(type5) + +# independent components for the trigonal laue group +# C16 = C26 = C34 = C35 = C36 = C45 = 0, C22 = C11, C23 = C13, C24 = −C14, +# C25 = −C15, C46 = −C15, C55 = C44, C56 = C14, C66 = (C11 − C12)/2 +type6 = list(type1) +type6.remove((0, 5)) +type6.remove((1, 5)) +type6.remove((2, 3)) +type6.remove((2, 4)) +type6.remove((2, 5)) +type6.remove((3, 4)) +type6.remove((1, 1)) +type6.remove((1, 2)) +type6.remove((1, 3)) +type6.remove((1, 4)) +type6.remove((3, 5)) +type6.remove((4, 4)) +type6.remove((4, 5)) +type6.remove((5, 5)) +type6 = tuple(type6) + +# independent components for the rhombohedral laue group +# Above, plus C15 = 0 +type7 = list(type6) +type7.remove((0, 4)) +type7 = tuple(type7) + +# independent components for the hexagonal laue group +# Above, plus C14 = 0 +type8 = list(type7) +type8.remove((0, 3)) +type8 = tuple(type8) + +# independent components for the cubic laue group +# As for dihedral tetragonal, plus C13 = C12, C33 = C11, C66 = C44 +type9 = list(type5) +type9.remove((0, 2)) +type9.remove((2, 2)) +type9.remove((5, 5)) + +''' +these lambda functions take care of the equality constrains in the +matrices. if there are no equality constraints, then the identity +function is used +C22 = C11, C23 = C13, C24 = −C14, +# C25 = −C15, C46 = −C15, C55 = C44, C56 = C14, C66 = (C11 − C12)/2 +''' + + +def identity(x): return x + + +def C_cyclictet_eq(x): + x[1, 1] = x[0, 0] + x[1, 2] = x[0, 2] + x[1, 5] = -x[0, 5] + x[4, 4] = x[3, 3] + return x + + +def C_trigonal_eq(x): + x[1, 1] = x[0, 0] + x[1, 2] = x[0, 2] + x[1, 3] = -x[0, 3] + x[1, 4] = -x[0, 4] + x[3, 5] = -x[0, 4] + x[4, 4] = x[3, 3] + x[4, 5] = x[0, 3] + x[5, 5] = 0.5*(x[0, 0]-x[0, 1]) + return x + + +def C_cubic_eq(x): + x[0, 2] = x[0, 1] + x[2, 2] = x[0, 0] + x[5, 5] = x[3, 3] + x[1, 1] = x[0, 0] + x[1, 2] = x[0, 2] + x[1, 5] = -x[0, 5] + x[4, 4] = x[3, 3] + return x + + +_StiffnessDict = { + # triclinic, all 21 components in upper triangular matrix needed + laue_1: [type1, identity], + laue_2: [type2, identity], # monoclinic, 13 components needed + laue_3: [type3, identity], # orthorhombic, 9 components needed + laue_4: [type4, C_cyclictet_eq], # cyclic tetragonal, 7 components needed + # dihedral tetragonal, 6 components needed + laue_5: [type5, C_cyclictet_eq], + laue_6: [type6, C_trigonal_eq], # trigonal I, 7 components + laue_7: [type7, C_trigonal_eq], # rhombohedral, 6 components + laue_8: [type8, C_trigonal_eq], # cyclic hexagonal, 5 components needed + laue_9: [type8, C_trigonal_eq], # dihedral hexagonal, 5 components + laue_10: [type9, C_cubic_eq], # cubic, 3 components + laue_11: [type9, C_cubic_eq] # cubic, 3 components +} diff --git a/hexrd/resources/instrument_templates/__init__.py b/hexrd/hedm/preprocess/__init__.py similarity index 100% rename from hexrd/resources/instrument_templates/__init__.py rename to hexrd/hedm/preprocess/__init__.py diff --git a/hexrd/preprocess/argument_classes_factory.py b/hexrd/hedm/preprocess/argument_classes_factory.py similarity index 100% rename from hexrd/preprocess/argument_classes_factory.py rename to hexrd/hedm/preprocess/argument_classes_factory.py diff --git a/hexrd/preprocess/preprocessors.py b/hexrd/hedm/preprocess/preprocessors.py similarity index 100% rename from hexrd/preprocess/preprocessors.py rename to hexrd/hedm/preprocess/preprocessors.py diff --git a/hexrd/preprocess/profiles.py b/hexrd/hedm/preprocess/profiles.py similarity index 100% rename from hexrd/preprocess/profiles.py rename to hexrd/hedm/preprocess/profiles.py diff --git a/hexrd/preprocess/yaml_internals.py b/hexrd/hedm/preprocess/yaml_internals.py similarity index 100% rename from hexrd/preprocess/yaml_internals.py rename to hexrd/hedm/preprocess/yaml_internals.py diff --git a/hexrd/sampleOrientations/__init__.py b/hexrd/hedm/sampleOrientations/__init__.py similarity index 100% rename from hexrd/sampleOrientations/__init__.py rename to hexrd/hedm/sampleOrientations/__init__.py diff --git a/hexrd/sampleOrientations/conversions.py b/hexrd/hedm/sampleOrientations/conversions.py similarity index 100% rename from hexrd/sampleOrientations/conversions.py rename to hexrd/hedm/sampleOrientations/conversions.py diff --git a/hexrd/sampleOrientations/rfz.py b/hexrd/hedm/sampleOrientations/rfz.py similarity index 100% rename from hexrd/sampleOrientations/rfz.py rename to hexrd/hedm/sampleOrientations/rfz.py diff --git a/hexrd/sampleOrientations/sampleRFZ.py b/hexrd/hedm/sampleOrientations/sampleRFZ.py similarity index 100% rename from hexrd/sampleOrientations/sampleRFZ.py rename to hexrd/hedm/sampleOrientations/sampleRFZ.py diff --git a/hexrd/xrdutil/__init__.py b/hexrd/hedm/xrdutil/__init__.py similarity index 100% rename from hexrd/xrdutil/__init__.py rename to hexrd/hedm/xrdutil/__init__.py diff --git a/hexrd/hedm/xrdutil/utils.py b/hexrd/hedm/xrdutil/utils.py new file mode 100644 index 000000000..2cbae2b6f --- /dev/null +++ b/hexrd/hedm/xrdutil/utils.py @@ -0,0 +1,1516 @@ +#! /usr/bin/env python3 +# ============================================================ +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ + + +from typing import Optional, Union, Any, Generator +from hexrd.material.crystallography import PlaneData +from hexrd.distortion.distortionabc import DistortionABC + +import numba +import numpy as np +import numba + +from hexrd import constants +from hexrd import matrixutil as mutil +from hexrd import rotations as rot +from hexrd import gridutil as gutil + +from hexrd.material.crystallography import processWavelength, PlaneData + +from hexrd.transforms import xfcapi +from hexrd.valunits import valWUnit + +from hexrd import distortion as distortion_pkg + +from hexrd.deprecation import deprecated + + +simlp = 'hexrd.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +distortion_key = 'distortion' + +d2r = piby180 = constants.d2r +r2d = constants.r2d + +epsf = constants.epsf # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 +sqrt_epsf = constants.sqrt_epsf # ~1.5e-8 + +bHat_l_DFLT = constants.beam_vec.flatten() +eHat_l_DFLT = constants.eta_vec.flatten() + +nans_1x2 = np.nan * np.ones((1, 2)) + +# ============================================================================= +# CLASSES +# ============================================================================= + + +class EtaOmeMaps(object): + """ + find-orientations loads pickled eta-ome data, but CollapseOmeEta is not + pickleable, because it holds a list of ReadGE, each of which holds a + reference to an open file object, which is not pickleable. + """ + + def __init__(self, ome_eta_archive: str): + ome_eta: np.ndarray = np.load(ome_eta_archive, allow_pickle=True) + + planeData_args = ome_eta['planeData_args'] + planeData_hkls = ome_eta['planeData_hkls'] + self.planeData = PlaneData(planeData_hkls, *planeData_args) + self.planeData.exclusions = ome_eta['planeData_excl'] + self.dataStore = ome_eta['dataStore'] + self.iHKLList = ome_eta['iHKLList'] + self.etaEdges = ome_eta['etaEdges'] + self.omeEdges = ome_eta['omeEdges'] + self.etas = ome_eta['etas'] + self.omegas = ome_eta['omegas'] + + def save_eta_ome_maps(self, filename: str) -> None: + """ + eta_ome.dataStore + eta_ome.planeData + eta_ome.iHKLList + eta_ome.etaEdges + eta_ome.omeEdges + eta_ome.etas + eta_ome.omegas + """ + args = np.array(self.planeData.getParams(), dtype=object)[:4] + args[2] = valWUnit('wavelength', 'length', args[2], 'angstrom') + hkls = np.vstack([i['hkl'] for i in self.planeData.hklDataList]).T + save_dict = { + 'dataStore': self.dataStore, + 'etas': self.etas, + 'etaEdges': self.etaEdges, + 'iHKLList': self.iHKLList, + 'omegas': self.omegas, + 'omeEdges': self.omeEdges, + 'planeData_args': args, + 'planeData_hkls': hkls, + 'planeData_excl': self.planeData.exclusions, + } + np.savez_compressed(filename, **save_dict) + + +# ============================================================================= +# FUNCTIONS +# ============================================================================= + + +def _zproject(x: np.ndarray, y: np.ndarray): + return np.cos(x) * np.sin(y) - np.sin(x) * np.cos(y) + + +def zproject_sph_angles( + invecs: np.ndarray, + chi: float = 0.0, + method: str = 'stereographic', + source: str = 'd', + use_mask: bool = False, + invert_z: bool = False, + rmat: Optional[np.ndarray] = None, +) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray]]: + """ + Projects spherical angles to 2-d mapping. + + Parameters + ---------- + invec : array_like + The (n, 3) array of input points, interpreted via the 'source' kwarg. + chi : scalar, optional + The inclination angle of the sample frame. The default is 0.. + method : str, optional + Mapping type spec, either 'stereographic' or 'equal-area'. + The default is 'stereographic'. + source : str, optional + The type specifier of the input vectors, either 'd', 'q', or 'g'. + 'd' signifies unit diffraction vectors as (2theta, eta, omega), + 'q' specifies unit scattering vectors as (2theta, eta, omega), + 'g' specifies unit vectors in the sample frame as (x, y, z). + The default is 'd'. + use_mask : bool, optional + If True, trim points not on the +z hemishpere (polar angles > 90). + The default is False. + invert_z : bool, optional + If True, invert the Z-coordinates of the unit vectors calculated from + the input angles. The default is False. + rmat : numpy.ndarry, shape=(3, 3), optional + Array representing a change of basis (rotation) to appy to the + calculated unit vectors. The default is None. + + Raises + ------ + RuntimeError + If method not in ('stereographic', 'equal-area'). + + Returns + ------- + numpy.ndarray or tuple + If use_mask = False, then the array of n mapped input points with shape + (n, 2). If use_mask = True, then the first element is the ndarray of + mapped points with shape (<=n, 2), and the second is a bool array with + shape (n,) marking the point that fell on the upper hemishpere. + . + + Notes + ----- + CAVEAT: +Z axis projections only!!! + TODO: check mask application. + """ + assert isinstance(source, str), "source kwarg must be a string" + + invecs = np.atleast_2d(invecs) + if source.lower() == 'd': + spts_s = xfcapi.angles_to_dvec(invecs, chi=chi) + elif source.lower() == 'q': + spts_s = xfcapi.angles_to_gvec(invecs, chi=chi) + elif source.lower() == 'g': + spts_s = invecs + + if rmat is not None: + spts_s = np.dot(spts_s, rmat.T) + + if invert_z: + spts_s[:, 2] = -spts_s[:, 2] + + # filter based on hemisphere + if use_mask: + pzi = spts_s[:, 2] <= 0 + spts_s = spts_s[pzi, :] + + if method.lower() == 'stereographic': + ppts = np.vstack( + [ + spts_s[:, 0] / (1.0 - spts_s[:, 2]), + spts_s[:, 1] / (1.0 - spts_s[:, 2]), + ] + ).T + elif method.lower() == 'equal-area': + chords = spts_s + np.tile([0, 0, 1], (len(spts_s), 1)) + scl = np.tile(mutil.rowNorm(chords), (2, 1)).T + ucrd = mutil.unitVector( + np.hstack([chords[:, :2], np.zeros((len(spts_s), 1))]).T + ) + + ppts = ucrd[:2, :].T * scl + else: + raise RuntimeError(f"method '{method}' not recognized") + + if use_mask: + return ppts, pzi + else: + return ppts + + +def make_polar_net( + ndiv: int = 24, projection: str = 'stereographic', max_angle: float = 120.0 +) -> np.ndarray: + """ + TODO: options for generating net boundaries; fixed to Z proj. + """ + ndiv_tth = int(np.floor(0.5 * ndiv)) + 1 + wtths = np.radians( + np.linspace(0, 1, num=ndiv_tth, endpoint=True) * max_angle + ) + wetas = np.radians(np.linspace(-1, 1, num=ndiv + 1, endpoint=True) * 180.0) + weta_gen = np.radians(np.linspace(-1, 1, num=181, endpoint=True) * 180.0) + pts = [] + for eta in wetas: + net_ang = np.vstack( + [[wtths[0], wtths[-1]], np.tile(eta, 2), np.zeros(2)] + ).T + pts.append(zproject_sph_angles(net_ang, method=projection, source='d')) + pts.append(np.nan * np.ones((1, 2))) + for tth in wtths[1:]: + net_ang = np.vstack( + [tth * np.ones_like(weta_gen), weta_gen, np.zeros_like(weta_gen)] + ).T + pts.append(zproject_sph_angles(net_ang, method=projection, source='d')) + pts.append(nans_1x2) + + return np.vstack(pts) + + +validateAngleRanges = xfcapi.validate_angle_ranges + + +@deprecated(removal_date='2025-01-01') +def simulateOmeEtaMaps( + omeEdges, + etaEdges, + planeData, + expMaps, + chi=0.0, + etaTol=None, + omeTol=None, + etaRanges=None, + omeRanges=None, + bVec=constants.beam_vec, + eVec=constants.eta_vec, + vInv=constants.identity_6x1, +): + """ + Simulate spherical maps. + + Parameters + ---------- + omeEdges : TYPE + DESCRIPTION. + etaEdges : TYPE + DESCRIPTION. + planeData : TYPE + DESCRIPTION. + expMaps : (3, n) ndarray + DESCRIPTION. + chi : TYPE, optional + DESCRIPTION. The default is 0.. + etaTol : TYPE, optional + DESCRIPTION. The default is None. + omeTol : TYPE, optional + DESCRIPTION. The default is None. + etaRanges : TYPE, optional + DESCRIPTION. The default is None. + omeRanges : TYPE, optional + DESCRIPTION. The default is None. + bVec : TYPE, optional + DESCRIPTION. The default is [0, 0, -1]. + eVec : TYPE, optional + DESCRIPTION. The default is [1, 0, 0]. + vInv : TYPE, optional + DESCRIPTION. The default is [1, 1, 1, 0, 0, 0]. + + Returns + ------- + eta_ome : TYPE + DESCRIPTION. + + Notes + ----- + all angular info is entered in degrees + + ??? might want to creat module-level angluar unit flag + ??? might want to allow resvers delta omega + + """ + # convert to radians + etaEdges = np.radians(np.sort(etaEdges)) + omeEdges = np.radians(np.sort(omeEdges)) + + omeIndices = list(range(len(omeEdges))) + etaIndices = list(range(len(etaEdges))) + + i_max = omeIndices[-1] + j_max = etaIndices[-1] + + etaMin = etaEdges[0] + etaMax = etaEdges[-1] + omeMin = omeEdges[0] + omeMax = omeEdges[-1] + if omeRanges is None: + omeRanges = [ + [omeMin, omeMax], + ] + + if etaRanges is None: + etaRanges = [ + [etaMin, etaMax], + ] + + # signed deltas IN RADIANS + del_ome = omeEdges[1] - omeEdges[0] + del_eta = etaEdges[1] - etaEdges[0] + + delOmeSign = np.sign(del_eta) + + # tolerances are in degrees (easier) + if omeTol is None: + omeTol = abs(del_ome) + else: + omeTol = np.radians(omeTol) + if etaTol is None: + etaTol = abs(del_eta) + else: + etaTol = np.radians(etaTol) + + # pixel dialtions + dpix_ome = round(omeTol / abs(del_ome)) + dpix_eta = round(etaTol / abs(del_eta)) + + i_dil, j_dil = np.meshgrid( + np.arange(-dpix_ome, dpix_ome + 1), np.arange(-dpix_eta, dpix_eta + 1) + ) + + # get symmetrically expanded hkls from planeData + sym_hkls = planeData.getSymHKLs() + nhkls = len(sym_hkls) + + # make things C-contiguous for use in xfcapi functions + expMaps = np.array(expMaps.T, order='C') + nOrs = len(expMaps) + + bMat = np.array(planeData.latVecOps['B'], order='C') + wlen = planeData.wavelength + + bVec = np.array(bVec.flatten(), order='C') + eVec = np.array(eVec.flatten(), order='C') + vInv = np.array(vInv.flatten(), order='C') + + eta_ome = np.zeros((nhkls, max(omeIndices), max(etaIndices)), order='C') + for iHKL in range(nhkls): + these_hkls = np.ascontiguousarray(sym_hkls[iHKL].T, dtype=float) + for iOr in range(nOrs): + rMat_c = xfcapi.make_rmat_of_expmap(expMaps[iOr, :]) + angList = np.vstack( + xfcapi.oscill_angles_of_hkls( + these_hkls, + chi, + rMat_c, + bMat, + wlen, + beam_vec=bVec, + eta_vec=eVec, + v_inv=vInv, + ) + ) + if not np.all(np.isnan(angList)): + # + angList[:, 1] = rot.mapAngle( + angList[:, 1], [etaEdges[0], etaEdges[0] + 2 * np.pi] + ) + angList[:, 2] = rot.mapAngle( + angList[:, 2], [omeEdges[0], omeEdges[0] + 2 * np.pi] + ) + # + # do eta ranges + angMask_eta = np.zeros(len(angList), dtype=bool) + for etas in etaRanges: + angMask_eta = np.logical_or( + angMask_eta, + xfcapi.validate_angle_ranges( + angList[:, 1], etas[0], etas[1] + ), + ) + + # do omega ranges + ccw = True + angMask_ome = np.zeros(len(angList), dtype=bool) + for omes in omeRanges: + if omes[1] - omes[0] < 0: + ccw = False + angMask_ome = np.logical_or( + angMask_ome, + xfcapi.validate_angle_ranges( + angList[:, 2], omes[0], omes[1], ccw=ccw + ), + ) + + # mask angles list, hkls + angMask = np.logical_and(angMask_eta, angMask_ome) + + culledTTh = angList[angMask, 0] + culledEta = angList[angMask, 1] + culledOme = angList[angMask, 2] + + for iTTh in range(len(culledTTh)): + culledEtaIdx = np.where(etaEdges - culledEta[iTTh] > 0)[0] + if len(culledEtaIdx) > 0: + culledEtaIdx = culledEtaIdx[0] - 1 + if culledEtaIdx < 0: + culledEtaIdx = None + else: + culledEtaIdx = None + culledOmeIdx = np.where(omeEdges - culledOme[iTTh] > 0)[0] + if len(culledOmeIdx) > 0: + if delOmeSign > 0: + culledOmeIdx = culledOmeIdx[0] - 1 + else: + culledOmeIdx = culledOmeIdx[-1] + if culledOmeIdx < 0: + culledOmeIdx = None + else: + culledOmeIdx = None + + if culledEtaIdx is not None and culledOmeIdx is not None: + if dpix_ome > 0 or dpix_eta > 0: + i_sup = omeIndices[culledOmeIdx] + np.array( + [i_dil.flatten()], dtype=int + ) + j_sup = etaIndices[culledEtaIdx] + np.array( + [j_dil.flatten()], dtype=int + ) + + # catch shit that falls off detector... + # maybe make this fancy enough to wrap at 2pi? + idx_mask = np.logical_and( + np.logical_and(i_sup >= 0, i_sup < i_max), + np.logical_and(j_sup >= 0, j_sup < j_max), + ) + eta_ome[iHKL, i_sup[idx_mask], j_sup[idx_mask]] = ( + 1.0 + ) + else: + eta_ome[ + iHKL, + omeIndices[culledOmeIdx], + etaIndices[culledEtaIdx], + ] = 1.0 + return eta_ome + + +def _fetch_hkls_from_planedata(pd: PlaneData): + return np.hstack(pd.getSymHKLs(withID=True)).T + + +def _filter_hkls_eta_ome( + hkls: np.ndarray, + angles: np.ndarray, + eta_range: list[tuple[float]], + ome_range: list[tuple[float]], + return_mask: bool = False, +) -> Union[ + tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray, np.ndarray] +]: + """ + given a set of hkls and angles, filter them by the + eta and omega ranges + """ + angMask_eta = np.zeros(len(angles), dtype=bool) + for etas in eta_range: + angMask_eta = np.logical_or( + angMask_eta, xfcapi.validate_angle_ranges( + angles[:, 1], etas[0], etas[1] + ) + ) + + ccw = True + angMask_ome = np.zeros(len(angles), dtype=bool) + for omes in ome_range: + if omes[1] - omes[0] < 0: + ccw = False + angMask_ome = np.logical_or( + angMask_ome, + xfcapi.validate_angle_ranges( + angles[:, 2], omes[0], omes[1], ccw=ccw + ), + ) + + angMask = np.logical_and(angMask_eta, angMask_ome) + + allAngs = angles[angMask, :] + allHKLs = np.vstack([hkls, hkls])[angMask, :] + + if return_mask: + return allAngs, allHKLs, angMask + else: + return allAngs, allHKLs + + +def _project_on_detector_plane( + allAngs: np.ndarray, + rMat_d: np.ndarray, + rMat_c: np.ndarray, + chi: float, + tVec_d: np.ndarray, + tVec_c: np.ndarray, + tVec_s: np.ndarray, + distortion: DistortionABC, + beamVec: np.ndarray = constants.beam_vec, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + utility routine for projecting a list of (tth, eta, ome) onto the + detector plane parameterized by the args + """ + gVec_cs = xfcapi.angles_to_gvec( + allAngs, chi=chi, rmat_c=rMat_c, beam_vec=beamVec + ) + + rMat_ss = xfcapi.make_sample_rmat(chi, allAngs[:, 2]) + + tmp_xys = xfcapi.gvec_to_xy( + gVec_cs, + rMat_d, + rMat_ss, + rMat_c, + tVec_d, + tVec_s, + tVec_c, + beam_vec=beamVec, + ) + + valid_mask = ~(np.isnan(tmp_xys[:, 0]) | np.isnan(tmp_xys[:, 1])) + + det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) + + # apply distortion if specified + if distortion is not None: + det_xy = distortion.apply_inverse(det_xy) + + return det_xy, rMat_ss, valid_mask + + +def _project_on_detector_cylinder( + allAngs: np.ndarray, + chi: float, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + distortion: DistortionABC = None, + beamVec: np.ndarray = constants.beam_vec, + etaVec: np.ndarray = constants.eta_vec, + tVec_s: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, + tVec_c: np.ndarray = constants.zeros_3x1, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + utility routine for projecting a list of (tth, eta, ome) onto the + detector plane parameterized by the args. this function does the + computation for a cylindrical detector + """ + dVec_cs = xfcapi.angles_to_dvec( + allAngs, chi=chi, rmat_c=np.eye(3), beam_vec=beamVec, eta_vec=etaVec + ) + + rMat_ss = np.tile(rmat_s, [allAngs.shape[0], 1, 1]) + + tmp_xys, valid_mask = _dvecToDetectorXYcylinder( + dVec_cs, + tVec_d, + caxis, + paxis, + radius, + physical_size, + angle_extent, + tVec_s=tVec_s, + rmat_s=rmat_s, + tVec_c=tVec_c, + ) + + det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) + + # apply distortion if specified + if distortion is not None: + det_xy = distortion.apply_inverse(det_xy) + + return det_xy, rMat_ss, valid_mask + + +def _dvecToDetectorXYcylinder( + dVec_cs: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> tuple[np.ndarray, np.ndarray]: + + cvec = _unitvec_to_cylinder( + dVec_cs, + caxis, + paxis, + radius, + tVec_d, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + cvec_det, valid_mask = _clip_to_cylindrical_detector( + cvec, + tVec_d, + caxis, + paxis, + radius, + physical_size, + angle_extent, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + xy_det = _dewarp_from_cylinder( + cvec_det, + tVec_d, + caxis, + paxis, + radius, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + return xy_det, valid_mask + + +def _unitvec_to_cylinder( + uvw: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + tvec: np.ndarray, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> np.ndarray: + """ + get point where unitvector uvw + intersect the cylindrical detector. + this will give points which are + outside the actual panel. the points + will be clipped to the panel later + + Parameters + ---------- + uvw : numpy.ndarray + unit vectors stacked row wise (nx3) shape + + Returns + ------- + numpy.ndarray + (x,y,z) vectors point which intersect with + the cylinder with (nx3) shape + """ + naxis = np.cross(caxis, paxis) + naxis = naxis / np.linalg.norm(naxis) + + tvec_c_l = np.dot(rmat_s, tVec_c) + + delta = tvec - (radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l)) + num = uvw.shape[0] + cx = np.atleast_2d(caxis).T + + delta_t = np.tile(delta, [num, 1]) + + t1 = np.dot(uvw, delta.T) + t2 = np.squeeze(np.dot(uvw, cx)) + t3 = np.squeeze(np.dot(delta, cx)) + t4 = np.dot(uvw, cx) + + A = np.squeeze(1 - t4**2) + B = t1 - t2 * t3 + C = radius**2 - np.linalg.norm(delta) ** 2 + t3**2 + + mask = np.abs(A) < 1e-10 + beta = np.zeros( + [ + num, + ] + ) + + beta[~mask] = (B[~mask] + np.sqrt(B[~mask] ** 2 + A[~mask] * C)) / A[~mask] + + beta[mask] = np.nan + return np.tile(beta, [3, 1]).T * uvw + + +def _clip_to_cylindrical_detector( + uvw: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> tuple[np.ndarray, np.ndarray]: + """ + takes in the intersection points uvw + with the cylindrical detector and + prunes out points which don't actually + hit the actual panel + + Parameters + ---------- + uvw : numpy.ndarray + unit vectors stacked row wise (nx3) shape + + Returns + ------- + numpy.ndarray + (x,y,z) vectors point which fall on panel + with (mx3) shape + """ + # first get rid of points which are above + # or below the detector + naxis = np.cross(caxis, paxis) + num = uvw.shape[0] + + cx = np.atleast_2d(caxis).T + nx = np.atleast_2d(naxis).T + + tvec_c_l = np.dot(rmat_s, tVec_c) + + delta = tVec_d - ( + radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) + ) + + delta_t = np.tile(delta, [num, 1]) + + uvwp = uvw - delta_t + dp = np.dot(uvwp, cx) + + uvwpxy = uvwp - np.tile(dp, [1, 3]) * np.tile(cx, [1, num]).T + + size = physical_size + tvec = np.atleast_2d(tVec_d).T + + # ycomp = uvwp - np.tile(tVec_d,[num, 1]) + mask1 = np.squeeze(np.abs(dp) > size[0] * 0.5) + uvwp[mask1, :] = np.nan + + # next get rid of points that fall outside + # the polar angle range + + ang = np.dot(uvwpxy, nx) / radius + ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) + + ang = np.arccos(ang) + mask2 = np.squeeze(ang >= angle_extent) + mask = np.logical_or(mask1, mask2) + res = uvw.copy() + res[mask, :] = np.nan + + return res, ~mask + + +def _dewarp_from_cylinder( + uvw: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +): + """ + routine to convert cylindrical coordinates + to cartesian coordinates in image frame + """ + naxis = np.cross(caxis, paxis) + naxis = naxis / np.linalg.norm(naxis) + + cx = np.atleast_2d(caxis).T + px = np.atleast_2d(paxis).T + nx = np.atleast_2d(naxis).T + num = uvw.shape[0] + + tvec_c_l = np.dot(rmat_s, tVec_c) + + delta = tVec_d - ( + radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) + ) + + delta_t = np.tile(delta, [num, 1]) + + uvwp = uvw - delta_t + + uvwpxy = uvwp - np.tile(np.dot(uvwp, cx), [1, 3]) * np.tile(cx, [1, num]).T + + sgn = np.sign(np.dot(uvwpxy, px)) + sgn[sgn == 0.0] = 1.0 + ang = np.dot(uvwpxy, nx) / radius + ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) + ang = np.arccos(ang) + xcrd = np.squeeze(radius * ang * sgn) + ycrd = np.squeeze(np.dot(uvwp, cx)) + return np.vstack((xcrd, ycrd)).T + + +def _warp_to_cylinder( + cart: np.ndarray, + tVec_d: np.ndarray, + radius: float, + caxis: np.ndarray, + paxis: np.ndarray, + tVec_s: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, + tVec_c: np.ndarray = constants.zeros_3x1, + normalize: bool = True, +) -> np.ndarray: + """ + routine to convert cartesian coordinates + in image frame to cylindrical coordinates + """ + tvec = np.atleast_2d(tVec_d).T + if tVec_s.ndim == 1: + tVec_s = np.atleast_2d(tVec_s).T + if tVec_c.ndim == 1: + tVec_c = np.atleast_2d(tVec_c).T + num = cart.shape[0] + naxis = np.cross(paxis, caxis) + x = cart[:, 0] + y = cart[:, 1] + th = x / radius + xp = radius * np.sin(th) + xn = radius * (1 - np.cos(th)) + + ccomp = np.tile(y, [3, 1]).T * np.tile(caxis, [num, 1]) + pcomp = np.tile(xp, [3, 1]).T * np.tile(paxis, [num, 1]) + ncomp = np.tile(xn, [3, 1]).T * np.tile(naxis, [num, 1]) + cart3d = pcomp + ccomp + ncomp + + tVec_c_l = np.dot(rmat_s, tVec_c) + + res = cart3d + np.tile(tvec - tVec_s - tVec_c_l, [1, num]).T + + if normalize: + return res / np.tile(np.linalg.norm(res, axis=1), [3, 1]).T + else: + return res + + +def _dvec_to_angs( + dvecs: np.ndarray, bvec: np.ndarray, evec: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + """ + convert diffraction vectors to (tth, eta) + angles in the 'eta' frame + dvecs is assumed to have (nx3) shape + """ + num = dvecs.shape[0] + exb = np.cross(evec, bvec) + exb = exb / np.linalg.norm(exb) + bxexb = np.cross(bvec, exb) + bxexb = bxexb / np.linalg.norm(bxexb) + + dp = np.dot(bvec, dvecs.T) + dp[np.abs(dp) > 1.0] = np.sign(dp[np.abs(dp) > 1.0]) + tth = np.arccos(dp) + + dvecs_p = dvecs - np.tile(dp, [3, 1]).T * np.tile(bvec, [num, 1]) + + dpx = np.dot(bxexb, dvecs_p.T) + dpy = np.dot(exb, dvecs_p.T) + eta = np.arctan2(dpy, dpx) + + return tth, eta + + +def simulateGVecs( + pd: PlaneData, + detector_params: np.ndarray, + grain_params: np.ndarray, + ome_range: list[tuple[float]] = [ + (-np.pi, np.pi), + ], + ome_period: tuple[float] = (-np.pi, np.pi), + eta_range: list[tuple[float]] = [ + (-np.pi, np.pi), + ], + panel_dims: list[tuple[float]] = [(-204.8, -204.8), (204.8, 204.8)], + pixel_pitch: tuple[float] = (0.2, 0.2), + distortion: DistortionABC = None, + beam_vector: np.ndarray = constants.beam_vec, +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + """ + returns valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps + + panel_dims are [(xmin, ymin), (xmax, ymax)] in mm + + pixel_pitch is [row_size, column_size] in mm + + simulate the monochormatic scattering for a specified + + - space group + - wavelength + - orientation + - strain + - position + - detector parameters + - oscillation axis tilt (chi) + + subject to + + - omega (oscillation) ranges (list of (min, max) tuples) + - eta (azimuth) ranges + + pd................a hexrd.crystallography.PlaneData instance + detector_params...a (10,) ndarray containing the tilt angles (3), + translation (3), chi (1), and sample frame translation + (3) parameters + grain_params......a (12,) ndarray containing the exponential map (3), + translation (3), and inverse stretch tensor compnents + in Mandel-Voigt notation (6). + + * currently only one panel is supported, but this will likely change soon + """ + bMat = pd.latVecOps['B'] + wlen = pd.wavelength + full_hkls = _fetch_hkls_from_planedata(pd) + + # extract variables for convenience + rMat_d = xfcapi.make_detector_rmat(detector_params[:3]) + tVec_d = np.ascontiguousarray(detector_params[3:6]) + chi = detector_params[6] + tVec_s = np.ascontiguousarray(detector_params[7:10]) + rMat_c = xfcapi.make_rmat_of_expmap(grain_params[:3]) + tVec_c = np.ascontiguousarray(grain_params[3:6]) + vInv_s = np.ascontiguousarray(grain_params[6:12]) + beam_vector = np.ascontiguousarray(beam_vector) + + # first find valid G-vectors + angList = np.vstack( + xfcapi.oscill_angles_of_hkls( + full_hkls[:, 1:], chi, rMat_c, bMat, wlen, v_inv=vInv_s, + beam_vec=beam_vector + ) + ) + allAngs, allHKLs = _filter_hkls_eta_ome( + full_hkls, angList, eta_range, ome_range + ) + + if len(allAngs) == 0: + valid_ids = [] + valid_hkl = [] + valid_ang = [] + valid_xy = [] + ang_ps = [] + else: + # ??? preallocate for speed? + det_xy, rMat_ss, _ = _project_on_detector_plane( + allAngs, rMat_d, rMat_c, chi, tVec_d, tVec_c, tVec_s, distortion, + beamVec=beam_vector + ) + + on_panel = np.logical_and( + np.logical_and( + det_xy[:, 0] >= panel_dims[0][0], + det_xy[:, 0] <= panel_dims[1][0], + ), + np.logical_and( + det_xy[:, 1] >= panel_dims[0][1], + det_xy[:, 1] <= panel_dims[1][1], + ), + ) + + op_idx = np.where(on_panel)[0] + + valid_ang = allAngs[op_idx, :] + valid_ang[:, 2] = xfcapi.mapAngle(valid_ang[:, 2], ome_period) + valid_ids = allHKLs[op_idx, 0] + valid_hkl = allHKLs[op_idx, 1:] + valid_xy = det_xy[op_idx, :] + ang_ps = angularPixelSize( + valid_xy, + pixel_pitch, + rMat_d, + # Provide only the first sample rotation matrix to angularPixelSize + # Perhaps this is something that can be improved in the future? + rMat_ss[0], + tVec_d, + tVec_s, + tVec_c, + distortion=distortion, + beamVec=beam_vector, + ) + + return valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps + + +@deprecated(new_func=simlp, removal_date='2025-01-01') +def simulateLauePattern( + hkls, + bMat, + rmat_d, + tvec_d, + panel_dims, + panel_buffer=5, + minEnergy=8, + maxEnergy=24, + rmat_s=np.eye(3), + grain_params=None, + distortion=None, + beamVec=None, +): + + if beamVec is None: + beamVec = constants.beam_vec + + # parse energy ranges + multipleEnergyRanges = False + if hasattr(maxEnergy, '__len__'): + assert len(maxEnergy) == len( + minEnergy + ), 'energy cutoff ranges must have the same length' + multipleEnergyRanges = True + lmin = [processWavelength(e) for e in maxEnergy] + lmax = [processWavelength(e) for e in minEnergy] + else: + lmin = processWavelength(maxEnergy) + lmax = processWavelength(minEnergy) + + # process crystal rmats and inverse stretches + if grain_params is None: + grain_params = np.atleast_2d( + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0] + ) + + n_grains = len(grain_params) + + # dummy translation vector... make input + tvec_s = np.zeros((3, 1)) + + # number of hkls + nhkls_tot = hkls.shape[1] + + # unit G-vectors in crystal frame + ghat_c = mutil.unitVector(np.dot(bMat, hkls)) + + # pre-allocate output arrays + xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan * np.ones((n_grains, nhkls_tot)) + energy = np.nan * np.ones((n_grains, nhkls_tot)) + + """ + LOOP OVER GRAINS + """ + + for iG, gp in enumerate(grain_params): + rmat_c = xfcapi.make_rmat_of_expmap(gp[:3]) + tvec_c = gp[3:6].reshape(3, 1) + vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) + + # stretch them: V^(-1) * R * Gc + ghat_s_str = mutil.unitVector(np.dot(vInv_s, np.dot(rmat_c, ghat_c))) + ghat_c_str = np.dot(rmat_c.T, ghat_s_str) + + # project + dpts = xfcapi.gvec_to_xy( + ghat_c_str.T, + rmat_d, + rmat_s, + rmat_c, + tvec_d, + tvec_s, + tvec_c, + beam_vec=beamVec, + ).T + + # check intersections with detector plane + canIntersect = ~np.isnan(dpts[0, :]) + npts_in = sum(canIntersect) + + if np.any(canIntersect): + dpts = dpts[:, canIntersect].reshape(2, npts_in) + dhkl = hkls[:, canIntersect].reshape(3, npts_in) + + rmat_b = xfcapi.make_beam_rmat(beamVec, constants.eta_vec) + + # back to angles + tth_eta, gvec_l = xfcapi.xy_to_gvec( + dpts.T, rmat_d, rmat_s, tvec_d, tvec_s, tvec_c, rmat_b=rmat_b + ) + tth_eta = np.vstack(tth_eta).T + + # warp measured points + if distortion is not None: + dpts = distortion.apply_inverse(dpts) + + # plane spacings and energies + dsp = 1.0 / mutil.columnNorm(np.dot(bMat, dhkl)) + wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) + + # find on spatial extent of detector + xTest = np.logical_and( + dpts[0, :] >= -0.5 * panel_dims[1] + panel_buffer, + dpts[0, :] <= 0.5 * panel_dims[1] - panel_buffer, + ) + yTest = np.logical_and( + dpts[1, :] >= -0.5 * panel_dims[0] + panel_buffer, + dpts[1, :] <= 0.5 * panel_dims[0] - panel_buffer, + ) + + onDetector = np.logical_and(xTest, yTest) + if multipleEnergyRanges: + validEnergy = np.zeros(len(wlen), dtype=bool) + for i in range(len(lmin)): + validEnergy = validEnergy | np.logical_and( + wlen >= lmin[i], wlen <= lmax[i] + ) + else: + validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) + + # index for valid reflections + keepers = np.where(np.logical_and(onDetector, validEnergy))[0] + + # assign output arrays + xy_det[iG][keepers, :] = dpts[:, keepers].T + hkls_in[iG][:, keepers] = dhkl[:, keepers] + angles[iG][keepers, :] = tth_eta[keepers, :] + dspacing[iG, keepers] = dsp[keepers] + energy[iG, keepers] = processWavelength(wlen[keepers]) + return xy_det, hkls_in, angles, dspacing, energy + + +@numba.njit(nogil=True, cache=True) +def _expand_pixels( + original: np.ndarray, w: float, h: float, result: np.ndarray +) -> np.ndarray: + hw = 0.5 * w + hh = 0.5 * h + for el in range(len(original)): + x, y = original[el, 0], original[el, 1] + result[el * 4 + 0, 0] = x - hw + result[el * 4 + 0, 1] = y - hh + result[el * 4 + 1, 0] = x + hw + result[el * 4 + 1, 1] = y - hh + result[el * 4 + 2, 0] = x + hw + result[el * 4 + 2, 1] = y + hh + result[el * 4 + 3, 0] = x - hw + result[el * 4 + 3, 1] = y + hh + + return result + + +@numba.njit(nogil=True, cache=True) +def _compute_max( + tth: np.ndarray, eta: np.ndarray, result: np.ndarray +) -> np.ndarray: + period = 2.0 * np.pi + hperiod = np.pi + for el in range(0, len(tth), 4): + max_tth = np.abs(tth[el + 0] - tth[el + 3]) + eta_diff = eta[el + 0] - eta[el + 3] + max_eta = np.abs(np.remainder(eta_diff + hperiod, period) - hperiod) + for i in range(3): + curr_tth = np.abs(tth[el + i] - tth[el + i + 1]) + eta_diff = eta[el + i] - eta[el + i + 1] + curr_eta = np.abs( + np.remainder(eta_diff + hperiod, period) - hperiod + ) + max_tth = np.maximum(curr_tth, max_tth) + max_eta = np.maximum(curr_eta, max_eta) + result[el // 4, 0] = max_tth + result[el // 4, 1] = max_eta + + return result + + +def angularPixelSize( + xy_det: np.ndarray, + xy_pixelPitch: tuple[float], + rMat_d: np.ndarray, + rMat_s: np.ndarray, + tVec_d: np.ndarray, + tVec_s: np.ndarray, + tVec_c: np.ndarray, + distortion: DistortionABC = None, + beamVec: np.ndarray = None, + etaVec: np.ndarray = None, +) -> np.ndarray: + """ + Calculate angular pixel sizes on a detector. + + * choices to beam vector and eta vector specs have been supressed + * assumes xy_det in UNWARPED configuration + """ + xy_det = np.atleast_2d(xy_det) + if distortion is not None: # !!! check this logic + xy_det = distortion.apply(xy_det) + if beamVec is None: + beamVec = constants.beam_vec + if etaVec is None: + etaVec = constants.eta_vec + + # Verify that rMat_s is only 2D (a single matrix). + # Arrays of matrices were previously provided, which `xy_to_gvec` + # cannot currently handle. + if rMat_s.ndim != 2: + msg = ( + f'rMat_s should have 2 dimensions, but has {rMat_s.ndim} ' + 'dimensions instead' + ) + raise ValueError(msg) + + xy_expanded = np.empty((len(xy_det) * 4, 2), dtype=xy_det.dtype) + xy_expanded = _expand_pixels( + xy_det, xy_pixelPitch[0], xy_pixelPitch[1], xy_expanded + ) + + rmat_b = xfcapi.make_beam_rmat(beamVec, etaVec) + + gvec_space, _ = xfcapi.xy_to_gvec( + xy_expanded, + rMat_d, + rMat_s, + tVec_d, + tVec_s, + tVec_c, + rmat_b=rmat_b, + ) + result = np.empty_like(xy_det) + return _compute_max(gvec_space[0], gvec_space[1], result) + + +def make_reflection_patches( + instr_cfg: dict[str, Any], + tth_eta: np.ndarray, + ang_pixel_size: np.ndarray, + omega: Optional[np.ndarray] = None, + tth_tol: float = 0.2, + eta_tol: float = 1.0, + rmat_c: np.ndarray = np.eye(3), + tvec_c: np.ndarray = np.zeros((3, 1)), + npdiv: int = 1, + quiet: bool = False, # TODO: Remove this parameter - it isn't used + compute_areas_func: np.ndarray = gutil.compute_areas, +) -> Generator[ + tuple[ + np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray + ], + None, + None, +]: + """Make angular patches on a detector. + + panel_dims are [(xmin, ymin), (xmax, ymax)] in mm + + pixel_pitch is [row_size, column_size] in mm + + FIXME: DISTORTION HANDING IS STILL A KLUDGE!!! + + patches are: + + delta tth + d ------------- ... ------------- + e | x | x | x | ... | x | x | x | + l ------------- ... ------------- + t . + a . + . + e ------------- ... ------------- + t | x | x | x | ... | x | x | x | + a ------------- ... ------------- + + outputs are: + (tth_vtx, eta_vtx), + (x_vtx, y_vtx), + connectivity, + subpixel_areas, + (x_center, y_center), + (i_row, j_col) + """ + + # detector quantities + rmat_d = xfcapi.make_rmat_of_expmap( + np.r_[instr_cfg['detector']['transform']['tilt']] + ) + tvec_d = np.r_[instr_cfg['detector']['transform']['translation']] + pixel_size = instr_cfg['detector']['pixels']['size'] + + frame_nrows = instr_cfg['detector']['pixels']['rows'] + frame_ncols = instr_cfg['detector']['pixels']['columns'] + + panel_dims = ( + -0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]], + 0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]], + ) + row_edges = ( + np.arange(frame_nrows + 1)[::-1] * pixel_size[1] + panel_dims[0][1] + ) + col_edges = np.arange(frame_ncols + 1) * pixel_size[0] + panel_dims[0][0] + + # handle distortion + distortion = None + if distortion_key in instr_cfg['detector']: + distortion_cfg = instr_cfg['detector'][distortion_key] + if distortion_cfg is not None: + try: + func_name = distortion_cfg['function_name'] + dparams = distortion_cfg['parameters'] + distortion = distortion_pkg.get_mapping(func_name, dparams) + except KeyError: + raise RuntimeError("problem with distortion specification") + + # sample frame + chi = instr_cfg['oscillation_stage']['chi'] + tvec_s = np.r_[instr_cfg['oscillation_stage']['translation']] + bvec = np.r_[instr_cfg['beam']['vector']] + + # data to loop + # ??? WOULD IT BE CHEAPER TO CARRY ZEROS OR USE CONDITIONAL? + if omega is None: + full_angs = np.hstack([tth_eta, np.zeros((len(tth_eta), 1))]) + else: + full_angs = np.hstack([tth_eta, omega.reshape(len(tth_eta), 1)]) + + for angs, pix in zip(full_angs, ang_pixel_size): + # calculate bin edges for patch based on local angular pixel size + # tth + ntths, tth_edges = gutil.make_tolerance_grid( + bin_width=np.degrees(pix[0]), + window_width=tth_tol, + num_subdivisions=npdiv, + ) + + # eta + netas, eta_edges = gutil.make_tolerance_grid( + bin_width=np.degrees(pix[1]), + window_width=eta_tol, + num_subdivisions=npdiv, + ) + + # FOR ANGULAR MESH + conn = gutil.cellConnectivity(netas, ntths, origin='ll') + + # meshgrid args are (cols, rows), a.k.a (fast, slow) + m_tth, m_eta = np.meshgrid(tth_edges, eta_edges) + npts_patch = m_tth.size + + # calculate the patch XY coords from the (tth, eta) angles + # !!! will CHEAT and ignore the small perturbation the different + # omega angle values causes and simply use the central value + gVec_angs_vtx = np.tile(angs, (npts_patch, 1)) + np.radians( + np.vstack( + [m_tth.flatten(), m_eta.flatten(), np.zeros(npts_patch)] + ).T + ) + + xy_eval_vtx, _, _ = _project_on_detector_plane( + gVec_angs_vtx, + rmat_d, + rmat_c, + chi, + tvec_d, + tvec_c, + tvec_s, + distortion, + beamVec=bvec, + ) + + areas = compute_areas_func(xy_eval_vtx, conn) + + # EVALUATION POINTS + # !!! for lack of a better option will use centroids + tth_eta_cen = gutil.cellCentroids( + np.atleast_2d(gVec_angs_vtx[:, :2]), conn + ) + + gVec_angs = np.hstack( + [tth_eta_cen, np.tile(angs[2], (len(tth_eta_cen), 1))] + ) + + xy_eval, _, _ = _project_on_detector_plane( + gVec_angs, + rmat_d, + rmat_c, + chi, + tvec_d, + tvec_c, + tvec_s, + distortion, + beamVec=bvec, + ) + + row_indices = gutil.cellIndices(row_edges, xy_eval[:, 1]) + col_indices = gutil.cellIndices(col_edges, xy_eval[:, 0]) + + yield ( + ( + ( + gVec_angs_vtx[:, 0].reshape(m_tth.shape), + gVec_angs_vtx[:, 1].reshape(m_tth.shape), + ), + ( + xy_eval_vtx[:, 0].reshape(m_tth.shape), + xy_eval_vtx[:, 1].reshape(m_tth.shape), + ), + conn, + areas.reshape(netas, ntths), + ( + xy_eval[:, 0].reshape(netas, ntths), + xy_eval[:, 1].reshape(netas, ntths), + ), + ( + row_indices.reshape(netas, ntths), + col_indices.reshape(netas, ntths), + ), + ) + ) + + +def extract_detector_transformation( + detector_params: Union[dict[str, Any], np.ndarray] +) -> tuple[np.ndarray, np.ndarray, float, np.ndarray]: + """ + Construct arrays from detector parameters. + + goes from 10 vector of detector parames OR instrument config dictionary + (from YAML spec) to affine transformation arrays + + Parameters + ---------- + detector_params : TYPE + DESCRIPTION. + + Returns + ------- + rMat_d : TYPE + DESCRIPTION. + tVec_d : TYPE + DESCRIPTION. + chi : TYPE + DESCRIPTION. + tVec_s : TYPE + DESCRIPTION. + + """ + # extract variables for convenience + if isinstance(detector_params, dict): + rMat_d = xfcapi.make_rmat_of_expmap( + np.array(detector_params['detector']['transform']['tilt']) + ) + tVec_d = np.r_[detector_params['detector']['transform']['translation']] + chi = detector_params['oscillation_stage']['chi'] + tVec_s = np.r_[detector_params['oscillation_stage']['translation']] + else: + assert len( + detector_params >= 10 + ), "list of detector parameters must have length >= 10" + rMat_d = xfcapi.make_rmat_of_expmap(detector_params[:3]) + tVec_d = np.ascontiguousarray(detector_params[3:6]) + chi = detector_params[6] + tVec_s = np.ascontiguousarray(detector_params[7:10]) + return rMat_d, tVec_d, chi, tVec_s diff --git a/hexrd/fitting/calibration/laue.py b/hexrd/laue/fitting/calibration/laue.py similarity index 100% rename from hexrd/fitting/calibration/laue.py rename to hexrd/laue/fitting/calibration/laue.py diff --git a/hexrd/laue/instrument/detector.py b/hexrd/laue/instrument/detector.py new file mode 100644 index 000000000..db4f95d1a --- /dev/null +++ b/hexrd/laue/instrument/detector.py @@ -0,0 +1,2086 @@ +from abc import abstractmethod +import copy +import os +from typing import Optional + +from hexrd.instrument.constants import ( + COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +) +from hexrd.instrument.physics_package import AbstractPhysicsPackage +import numpy as np +import numba + +from hexrd import constants as ct +from hexrd import distortion as distortion_pkg +from hexrd import matrixutil as mutil +from hexrd import xrdutil +from hexrd.rotations import mapAngle + +from hexrd.material import crystallography +from hexrd.material.crystallography import PlaneData + +from hexrd.transforms.xfcapi import ( + xy_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_rmat_of_expmap, + oscill_angles_of_hkls, + angles_to_dvec, +) + +from hexrd.utils.decorators import memoize +from hexrd.gridutil import cellIndices +from hexrd.instrument import detector_coatings +from hexrd.material.utils import ( + calculate_linear_absorption_length, + calculate_incoherent_scattering) + +distortion_registry = distortion_pkg.Registry() + +max_workers_DFLT = max(1, os.cpu_count() - 1) + +beam_energy_DFLT = 65.351 + +# Memoize these, so each detector can avoid re-computing if nothing +# has changed. +_lorentz_factor = memoize(crystallography.lorentz_factor) +_polarization_factor = memoize(crystallography.polarization_factor) + + +class Detector: + """ + Base class for 2D detectors with functions and properties + common to planar and cylindrical detectors. This class + will be inherited by both those classes. + """ + + __pixelPitchUnit = 'mm' + + # Abstract methods that must be redefined in derived classes + @property + @abstractmethod + def detector_type(self): + raise NotImplementedError + + @abstractmethod + def cart_to_angles( + self, + xy_data, + rmat_s=None, + tvec_s=None, + tvec_c=None, + apply_distortion=False, + ): + """ + Transform cartesian coordinates to angular. + + Parameters + ---------- + xy_data : TYPE + The (n, 2) array of n (x, y) coordinates to be transformed in + either the raw or ideal cartesian plane (see `apply_distortion` + kwarg below). + rmat_s : array_like, optional + The (3, 3) COB matrix for the sample frame. The default is None. + tvec_s : array_like, optional + The (3, ) translation vector for the sample frame. + The default is None. + tvec_c : array_like, optional + The (3, ) translation vector for the crystal frame. + The default is None. + apply_distortion : bool, optional + If True, apply distortion to the inpout cartesian coordinates. + The default is False. + + Returns + ------- + tth_eta : TYPE + DESCRIPTION. + g_vec : TYPE + DESCRIPTION. + + """ + raise NotImplementedError + + @abstractmethod + def angles_to_cart( + self, + tth_eta, + rmat_s=None, + tvec_s=None, + rmat_c=None, + tvec_c=None, + apply_distortion=False, + ): + """ + Transform angular coordinates to cartesian. + + Parameters + ---------- + tth_eta : array_like + The (n, 2) array of n (tth, eta) coordinates to be transformed. + rmat_s : array_like, optional + The (3, 3) COB matrix for the sample frame. The default is None. + tvec_s : array_like, optional + The (3, ) translation vector for the sample frame. + The default is None. + rmat_c : array_like, optional + (3, 3) COB matrix for the crystal frame. + The default is None. + tvec_c : array_like, optional + The (3, ) translation vector for the crystal frame. + The default is None. + apply_distortion : bool, optional + If True, apply distortion to take cartesian coordinates to the + "warped" configuration. The default is False. + + Returns + ------- + xy_det : array_like + The (n, 2) array on the n input coordinates in the . + + """ + raise NotImplementedError + + @abstractmethod + def cart_to_dvecs(self, xy_data): + """Convert cartesian coordinates to dvectors""" + raise NotImplementedError + + @abstractmethod + def pixel_angles(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def pixel_tth_gradient(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def pixel_eta_gradient(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def calc_filter_coating_transmission(self, energy): + pass + + @property + @abstractmethod + def beam_position(self): + """ + returns the coordinates of the beam in the cartesian detector + frame {Xd, Yd, Zd}. NaNs if no intersection. + """ + raise NotImplementedError + + @property + def extra_config_kwargs(self): + return {} + + # End of abstract methods + + def __init__( + self, + rows=2048, + cols=2048, + pixel_size=(0.2, 0.2), + tvec=np.r_[0.0, 0.0, -1000.0], + tilt=ct.zeros_3, + name='default', + bvec=ct.beam_vec, + xrs_dist=None, + evec=ct.eta_vec, + saturation_level=None, + panel_buffer=None, + tth_distortion=None, + roi=None, + group=None, + distortion=None, + max_workers=max_workers_DFLT, + detector_filter: Optional[detector_coatings.Filter] = None, + detector_coating: Optional[detector_coatings.Coating] = None, + phosphor: Optional[detector_coatings.Phosphor] = None, + ): + """ + Instantiate a PlanarDetector object. + + Parameters + ---------- + rows : TYPE, optional + DESCRIPTION. The default is 2048. + cols : TYPE, optional + DESCRIPTION. The default is 2048. + pixel_size : TYPE, optional + DESCRIPTION. The default is (0.2, 0.2). + tvec : TYPE, optional + DESCRIPTION. The default is np.r_[0., 0., -1000.]. + tilt : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + name : TYPE, optional + DESCRIPTION. The default is 'default'. + bvec : TYPE, optional + DESCRIPTION. The default is ct.beam_vec. + evec : TYPE, optional + DESCRIPTION. The default is ct.eta_vec. + saturation_level : TYPE, optional + DESCRIPTION. The default is None. + panel_buffer : TYPE, optional + If a scalar or len(2) array_like, the interpretation is a border + in mm. If an array with shape (nrows, ncols), interpretation is a + boolean with True marking valid pixels. The default is None. + roi : TYPE, optional + DESCRIPTION. The default is None. + group : TYPE, optional + DESCRIPTION. The default is None. + distortion : TYPE, optional + DESCRIPTION. The default is None. + detector_filter : detector_coatings.Filter, optional + filter specifications including material type, + density and thickness. Used for absorption correction + calculations. + detector_coating : detector_coatings.Coating, optional + coating specifications including material type, + density and thickness. Used for absorption correction + calculations. + phosphor : detector_coatings.Phosphor, optional + phosphor specifications including material type, + density and thickness. Used for absorption correction + calculations. + + Returns + ------- + None. + + """ + self._name = name + + self._rows = rows + self._cols = cols + + self._pixel_size_row = pixel_size[0] + self._pixel_size_col = pixel_size[1] + + self._saturation_level = saturation_level + + self._panel_buffer = panel_buffer + + self._tth_distortion = tth_distortion + + if roi is None: + self._roi = roi + else: + assert len(roi) == 2, "roi is set via (start_row, start_col)" + self._roi = ( + (roi[0], roi[0] + self._rows), + (roi[1], roi[1] + self._cols), + ) + + self._tvec = np.array(tvec).flatten() + self._tilt = np.array(tilt).flatten() + + self._bvec = np.array(bvec).flatten() + self._xrs_dist = xrs_dist + + self._evec = np.array(evec).flatten() + + self._distortion = distortion + + self.max_workers = max_workers + + self.group = group + + if detector_filter is None: + detector_filter = detector_coatings.Filter( + **FILTER_DEFAULTS.TARDIS) + self.filter = detector_filter + + if detector_coating is None: + detector_coating = detector_coatings.Coating(**COATING_DEFAULT) + self.coating = detector_coating + + if phosphor is None: + phosphor = detector_coatings.Phosphor(**PHOSPHOR_DEFAULT) + self.phosphor = phosphor + + # detector ID + @property + def name(self): + return self._name + + @name.setter + def name(self, s): + assert isinstance(s, str), "requires string input" + self._name = s + + @property + def lmfit_name(self): + # lmfit requires underscores instead of dashes + return self.name.replace('-', '_') + + # properties for physical size of rectangular detector + @property + def rows(self): + return self._rows + + @rows.setter + def rows(self, x): + assert isinstance(x, int) + self._rows = x + + @property + def cols(self): + return self._cols + + @cols.setter + def cols(self, x): + assert isinstance(x, int) + self._cols = x + + @property + def pixel_size_row(self): + return self._pixel_size_row + + @pixel_size_row.setter + def pixel_size_row(self, x): + self._pixel_size_row = float(x) + + @property + def pixel_size_col(self): + return self._pixel_size_col + + @pixel_size_col.setter + def pixel_size_col(self, x): + self._pixel_size_col = float(x) + + @property + def pixel_area(self): + return self.pixel_size_row * self.pixel_size_col + + @property + def saturation_level(self): + return self._saturation_level + + @saturation_level.setter + def saturation_level(self, x): + if x is not None: + assert np.isreal(x) + self._saturation_level = x + + @property + def panel_buffer(self): + return self._panel_buffer + + @panel_buffer.setter + def panel_buffer(self, x): + """if not None, a buffer in mm (x, y)""" + if x is not None: + assert len(x) == 2 or x.ndim == 2 + self._panel_buffer = x + + @property + def tth_distortion(self): + return self._tth_distortion + + @tth_distortion.setter + def tth_distortion(self, x): + """if not None, a buffer in mm (x, y)""" + if x is not None: + assert x.ndim == 2 and x.shape == self.shape + self._tth_distortion = x + + @property + def roi(self): + return self._roi + + @roi.setter + def roi(self, vertex_array): + """ + !!! vertex array must be (r0, c0) + """ + if vertex_array is not None: + assert ( + len(vertex_array) == 2 + ), "roi is set via (start_row, start_col)" + self._roi = ( + (vertex_array[0], vertex_array[0] + self.rows), + (vertex_array[1], vertex_array[1] + self.cols), + ) + + @property + def row_dim(self): + return self.rows * self.pixel_size_row + + @property + def col_dim(self): + return self.cols * self.pixel_size_col + + @property + def row_pixel_vec(self): + return self.pixel_size_row * ( + 0.5 * (self.rows - 1) - np.arange(self.rows) + ) + + @property + def row_edge_vec(self): + return _row_edge_vec(self.rows, self.pixel_size_row) + + @property + def col_pixel_vec(self): + return self.pixel_size_col * ( + np.arange(self.cols) - 0.5 * (self.cols - 1) + ) + + @property + def col_edge_vec(self): + return _col_edge_vec(self.cols, self.pixel_size_col) + + @property + def corner_ul(self): + return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] + + @property + def corner_ll(self): + return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim] + + @property + def corner_lr(self): + return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim] + + @property + def corner_ur(self): + return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] + + @property + def shape(self): + return (self.rows, self.cols) + + @property + def tvec(self): + return self._tvec + + @tvec.setter + def tvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3, 'input must have length = 3' + self._tvec = x + + @property + def tilt(self): + return self._tilt + + @tilt.setter + def tilt(self, x): + assert len(x) == 3, 'input must have length = 3' + self._tilt = np.array(x).squeeze() + + @property + def bvec(self): + return self._bvec + + @bvec.setter + def bvec(self, x): + x = np.array(x).flatten() + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' + self._bvec = x + + @property + def xrs_dist(self): + return self._xrs_dist + + @xrs_dist.setter + def xrs_dist(self, x): + assert x is None or np.isscalar( + x + ), f"'source_distance' must be None or scalar; you input '{x}'" + self._xrs_dist = x + + @property + def evec(self): + return self._evec + + @evec.setter + def evec(self, x): + x = np.array(x).flatten() + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' + self._evec = x + + @property + def distortion(self): + return self._distortion + + @distortion.setter + def distortion(self, x): + if x is not None: + registry = distortion_registry.distortion_registry + check_arg = np.zeros(len(registry), dtype=bool) + for i, dcls in enumerate(registry.values()): + check_arg[i] = isinstance(x, dcls) + assert np.any(check_arg), 'input distortion is not in registry!' + self._distortion = x + + @property + def rmat(self): + return make_rmat_of_expmap(self.tilt) + + @property + def normal(self): + return self.rmat[:, 2] + + # ...memoize??? + @property + def pixel_coords(self): + pix_i, pix_j = np.meshgrid( + self.row_pixel_vec, self.col_pixel_vec, indexing='ij' + ) + return pix_i, pix_j + + # ========================================================================= + # METHODS + # ========================================================================= + + def pixel_Q(self, energy: np.floating, + origin: np.ndarray = ct.zeros_3) -> np.ndarray: + '''get the equivalent momentum transfer + for the angles. + + Parameters + ---------- + energy: float + incident photon energy in keV + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise Q in A^-1 + + ''' + lam = ct.keVToAngstrom(energy) + tth, _ = self.pixel_angles(origin=origin) + return 4.*np.pi*np.sin(tth*0.5)/lam + + def pixel_compton_energy_loss( + self, + energy: np.floating, + origin: np.ndarray = ct.zeros_3, + ) -> np.ndarray: + '''inelastic compton scattering leads + to energy loss of the incident photons. + compute the final energy of the photons + for each pixel. + + Parameters + ---------- + energy: float + incident photon energy in keV + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise energy of inelastically + scatterd photons in keV + ''' + energy = np.asarray(energy) + tth, _ = self.pixel_angles() + ang_fact = (1 - np.cos(tth)) + beta = energy/ct.cRestmasskeV + return energy/(1 + beta*ang_fact) + + def pixel_compton_attenuation_length( + self, + energy: np.floating, + density: np.floating, + formula: str, + origin: np.ndarray = ct.zeros_3, + ) -> np.ndarray: + '''each pixel intercepts inelastically + scattered photons of different energy. + the attenuation length and the transmission + for these photons are different. this function + calculate attenuatin length for each pixel + on the detector. + + Parameters + ---------- + energy: float + incident photon energy in keV + density: float + density of material in g/cc + formula: str + formula of the material scattering + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise attentuation length of compton + scattered photons + ''' + pixel_energy = self.pixel_compton_energy_loss(energy) + + pixel_attenuation_length = calculate_linear_absorption_length( + density, + formula, + pixel_energy.flatten(), + ) + return pixel_attenuation_length.reshape(self.shape) + + def compute_compton_scattering_intensity( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + origin: np.array = ct.zeros_3, + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + + ''' compute the theoretical compton scattering + signal on the detector. this value is corrected + for the transmission of compton scattered photons + and normlaized before getting subtracting from the + raw intensity + + Parameters + ----------- + energy: float + energy of incident photon + rMat_s: np.ndarray + rotation matrix of sample orientation + physics_package: AbstractPhysicsPackage + physics package information + Returns + ------- + compton_intensity: np.ndarray + transmission corrected compton scattering + intensity + ''' + + q = self.pixel_Q(energy) + inc_s = calculate_incoherent_scattering( + physics_package.sample_material, + q.flatten()).reshape(self.shape) + + inc_w = calculate_incoherent_scattering( + physics_package.window_material, + q.flatten()).reshape(self.shape) + + t_s = self.calc_compton_physics_package_transmission( + energy, rMat_s, physics_package) + + t_w = self.calc_compton_window_transmission( + energy, rMat_s, physics_package) + + return inc_s * t_s + inc_w * t_w, t_s, t_w + + def polarization_factor(self, f_hor, f_vert, unpolarized=False): + """ + Calculated the polarization factor for every pixel. + + Parameters + ---------- + f_hor : float + the fraction of horizontal polarization. for XFELs + this is close to 1. + f_vert : TYPE + the fraction of vertical polarization, which is ~0 for XFELs. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + s = f_hor + f_vert + if np.abs(s - 1) > ct.sqrt_epsf: + msg = ( + "sum of fraction of " + "horizontal and vertical polarizations " + "must be equal to 1." + ) + raise RuntimeError(msg) + + if f_hor < 0 or f_vert < 0: + msg = ( + "fraction of polarization in horizontal " + "or vertical directions can't be negative." + ) + raise RuntimeError(msg) + + tth, eta = self.pixel_angles() + kwargs = { + 'tth': tth, + 'eta': eta, + 'f_hor': f_hor, + 'f_vert': f_vert, + 'unpolarized': unpolarized, + } + + return _polarization_factor(**kwargs) + + def lorentz_factor(self): + """ + calculate the lorentz factor for every pixel + + Parameters + ---------- + None + + Raises + ------ + None + + Returns + ------- + numpy.ndarray + returns an array the same size as the detector panel + with each element containg the lorentz factor of the + corresponding pixel + """ + tth, eta = self.pixel_angles() + return _lorentz_factor(tth) + + def config_dict( + self, + chi=0, + tvec=ct.zeros_3, + beam_energy=beam_energy_DFLT, + beam_vector=ct.beam_vec, + sat_level=None, + panel_buffer=None, + style='yaml', + ): + """ + Return a dictionary of detector parameters. + + Optional instrument level parameters. This is a convenience function + to work with the APIs in several functions in xrdutil. + + Parameters + ---------- + chi : float, optional + DESCRIPTION. The default is 0. + tvec : array_like (3,), optional + DESCRIPTION. The default is ct.zeros_3. + beam_energy : float, optional + DESCRIPTION. The default is beam_energy_DFLT. + beam_vector : aray_like (3,), optional + DESCRIPTION. The default is ct.beam_vec. + sat_level : scalar, optional + DESCRIPTION. The default is None. + panel_buffer : scalar, array_like (2,), optional + DESCRIPTION. The default is None. + + Returns + ------- + config_dict : dict + DESCRIPTION. + + """ + assert style.lower() in ['yaml', 'hdf5'], ( + "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) + + config_dict = {} + + # ===================================================================== + # DETECTOR PARAMETERS + # ===================================================================== + # transform and pixels + # + # assign local vars; listify if necessary + tilt = self.tilt + translation = self.tvec + roi = ( + None + if self.roi is None + else np.array([self.roi[0][0], self.roi[1][0]]).flatten() + ) + if style.lower() == 'yaml': + tilt = tilt.tolist() + translation = translation.tolist() + tvec = tvec.tolist() + roi = None if roi is None else roi.tolist() + + det_dict = dict( + detector_type=self.detector_type, + transform=dict( + tilt=tilt, + translation=translation, + ), + pixels=dict( + rows=int(self.rows), + columns=int(self.cols), + size=[float(self.pixel_size_row), float(self.pixel_size_col)], + ), + ) + + if roi is not None: + # Only add roi if it is not None + det_dict['pixels']['roi'] = roi + + if self.group is not None: + # Only add group if it is not None + det_dict['group'] = self.group + + # distortion + if self.distortion is not None: + dparams = self.distortion.params + if style.lower() == 'yaml': + dparams = dparams.tolist() + dist_d = dict( + function_name=self.distortion.maptype, parameters=dparams + ) + det_dict['distortion'] = dist_d + + # saturation level + if sat_level is None: + sat_level = self.saturation_level + det_dict['saturation_level'] = float(sat_level) + + # panel buffer + if panel_buffer is None: + # could be none, a 2-element list, or a 2-d array (rows, cols) + panel_buffer = copy.deepcopy(self.panel_buffer) + # !!! now we have to do some style-dependent munging of panel_buffer + if isinstance(panel_buffer, np.ndarray): + if panel_buffer.ndim == 1: + assert len(panel_buffer) == 2, "length of 1-d buffer must be 2" + # if here is a 2-element array + if style.lower() == 'yaml': + panel_buffer = panel_buffer.tolist() + elif panel_buffer.ndim == 2: + if style.lower() == 'yaml': + # !!! can't practically write array-like buffers to YAML + # so forced to clobber + print("clobbering panel buffer array in yaml-ready output") + panel_buffer = [0.0, 0.0] + else: + raise RuntimeError( + "panel buffer ndim must be 1 or 2; you specified %d" + % panel_buffer.ndmin + ) + elif panel_buffer is None: + # still None on self + # !!! this gets handled by unwrap_dict_to_h5 now + + # if style.lower() == 'hdf5': + # # !!! can't write None to hdf5; substitute with zeros + # panel_buffer = np.r_[0., 0.] + pass + det_dict['buffer'] = panel_buffer + + det_dict.update(self.extra_config_kwargs) + + # ===================================================================== + # SAMPLE STAGE PARAMETERS + # ===================================================================== + stage_dict = dict(chi=chi, translation=tvec) + + # ===================================================================== + # BEAM PARAMETERS + # ===================================================================== + # !!! make_reflection_patches is still using the vector + # azim, pola = calc_angles_from_beam_vec(beam_vector) + # beam_dict = dict( + # energy=beam_energy, + # vector=dict( + # azimuth=azim, + # polar_angle=pola + # ) + # ) + beam_dict = dict(energy=beam_energy, vector=beam_vector) + + config_dict['detector'] = det_dict + config_dict['oscillation_stage'] = stage_dict + config_dict['beam'] = beam_dict + + return config_dict + + def cartToPixel(self, xy_det, pixels=False, apply_distortion=False): + """ + Coverts cartesian coordinates to pixel coordinates + + Parameters + ---------- + xy_det : array_like + The (n, 2) vstacked array of (x, y) pairs in the reference + cartesian frame (possibly subject to distortion). + pixels : bool, optional + If True, return discrete pixel indices; otherwise fractional pixel + coordinates are returned. The default is False. + apply_distortion : bool, optional + If True, apply self.distortion to the input (if applicable). + The default is False. + + Returns + ------- + ij_det : array_like + The (n, 2) array of vstacked (i, j) coordinates in the pixel + reference frame where i is the (slow) row dimension and j is the + (fast) column dimension. + + """ + xy_det = np.atleast_2d(xy_det) + if apply_distortion and self.distortion is not None: + xy_det = self.distortion.apply(xy_det) + + npts = len(xy_det) + + tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1)) + i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5 + j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5 + + ij_det = np.vstack([i_pix, j_pix]).T + if pixels: + # Hide any runtime warnings in this conversion. Their output values + # will certainly be off the detector, which is fine. + with np.errstate(invalid='ignore'): + ij_det = np.array(np.round(ij_det), dtype=int) + + return ij_det + + def pixelToCart(self, ij_det): + """ + Convert vstacked array or list of [i,j] pixel indices + (or UL corner-based points) and convert to (x,y) in the + cartesian frame {Xd, Yd, Zd} + """ + ij_det = np.atleast_2d(ij_det) + + x = (ij_det[:, 1] + 0.5) * self.pixel_size_col + self.corner_ll[0] + y = ( + self.rows - ij_det[:, 0] - 0.5 + ) * self.pixel_size_row + self.corner_ll[1] + return np.vstack([x, y]).T + + def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): + """ + Notes + ----- + !!! assumes xy are in raw (distorted) frame, if applicable + """ + # munge kwargs + if rMat_s is None: + rMat_s = ct.identity_3x3 + if tVec_s is None: + tVec_s = ct.zeros_3x1 + if tVec_c is None: + tVec_c = ct.zeros_3x1 + + # FIXME: perhaps not necessary, but safe... + xy = np.atleast_2d(xy) + + ''' + # --------------------------------------------------------------------- + # TODO: needs testing and memoized gradient arrays! + # --------------------------------------------------------------------- + # need origin arg + origin = np.dot(rMat_s, tVec_c).flatten() + tVec_s.flatten() + + # get pixel indices + i_crds = cellIndices(self.row_edge_vec, xy[:, 1]) + j_crds = cellIndices(self.col_edge_vec, xy[:, 0]) + + ptth_grad = self.pixel_tth_gradient(origin=origin)[i_crds, j_crds] + peta_grad = self.pixel_eta_gradient(origin=origin)[i_crds, j_crds] + + return np.vstack([ptth_grad, peta_grad]).T + ''' + # call xrdutil function + ang_ps = xrdutil.angularPixelSize( + xy, + (self.pixel_size_row, self.pixel_size_col), + self.rmat, + rMat_s, + self.tvec, + tVec_s, + tVec_c, + distortion=self.distortion, + beamVec=self.bvec, + etaVec=self.evec, + ) + return ang_ps + + def clip_to_panel(self, xy, buffer_edges=True): + """ + if self.roi is not None, uses it by default + + TODO: check if need shape kwarg + TODO: optimize ROI search better than list comprehension below + TODO: panel_buffer can be a 2-d boolean mask, but needs testing + + """ + xy = np.atleast_2d(xy) + + ''' + # !!! THIS LOGIC IS OBSOLETE + if self.roi is not None: + ij_crds = self.cartToPixel(xy, pixels=True) + ii, jj = polygon(self.roi[:, 0], self.roi[:, 1], + shape=(self.rows, self.cols)) + on_panel_rows = [i in ii for i in ij_crds[:, 0]] + on_panel_cols = [j in jj for j in ij_crds[:, 1]] + on_panel = np.logical_and(on_panel_rows, on_panel_cols) + else: + ''' + xlim = 0.5 * self.col_dim + ylim = 0.5 * self.row_dim + if buffer_edges and self.panel_buffer is not None: + if self.panel_buffer.ndim == 2: + pix = self.cartToPixel(xy, pixels=True) + + roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows) + coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols) + + idx = np.logical_or(roff, coff) + + on_panel = np.full(pix.shape[0], False) + valid_pix = pix[~idx, :] + on_panel[~idx] = self.panel_buffer[ + valid_pix[:, 0], valid_pix[:, 1] + ] + else: + xlim -= self.panel_buffer[0] + ylim -= self.panel_buffer[1] + on_panel_x = np.logical_and( + xy[:, 0] >= -xlim, xy[:, 0] <= xlim + ) + on_panel_y = np.logical_and( + xy[:, 1] >= -ylim, xy[:, 1] <= ylim + ) + on_panel = np.logical_and(on_panel_x, on_panel_y) + elif not buffer_edges or self.panel_buffer is None: + on_panel_x = np.logical_and(xy[:, 0] >= -xlim, xy[:, 0] <= xlim) + on_panel_y = np.logical_and(xy[:, 1] >= -ylim, xy[:, 1] <= ylim) + on_panel = np.logical_and(on_panel_x, on_panel_y) + return xy[on_panel, :], on_panel + + def interpolate_nearest(self, xy, img, pad_with_nans=True): + """ + TODO: revisit normalization in here? + + """ + is_2d = img.ndim == 2 + right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) + + # initialize output with nans + if pad_with_nans: + int_xy = np.nan * np.ones(len(xy)) + else: + int_xy = np.zeros(len(xy)) + + # clip away points too close to or off the edges of the detector + xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) + + # get pixel indices of clipped points + i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) + j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) + + # next interpolate across cols + int_vals = img[i_src, j_src] + int_xy[on_panel] = int_vals + return int_xy + + def interpolate_bilinear(self, xy, img, pad_with_nans=True, + clip_to_panel=True, + on_panel: Optional[np.ndarray] = None): + """ + Interpolate an image array at the specified cartesian points. + + Parameters + ---------- + xy : array_like, (n, 2) + Array of cartesian coordinates in the image plane at which + to evaluate intensity. + img : array_like + 2-dimensional image array. + pad_with_nans : bool, optional + Toggle for assigning NaN to points that fall off the detector. + The default is True. + on_panel : np.ndarray, optional + If you want to skip clip_to_panel() for performance reasons, + just provide an array of which pixels are on the panel. + + Returns + ------- + int_xy : array_like, (n,) + The array of interpolated intensities at each of the n input + coordinates. + + Notes + ----- + TODO: revisit normalization in here? + """ + + is_2d = img.ndim == 2 + right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) + + # initialize output with nans + if pad_with_nans: + int_xy = np.nan * np.ones(len(xy)) + else: + int_xy = np.zeros(len(xy)) + + if on_panel is None: + # clip away points too close to or off the edges of the detector + xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) + else: + xy_clip = xy[on_panel] + + # grab fractional pixel indices of clipped points + ij_frac = self.cartToPixel(xy_clip) + + # get floors/ceils from array of pixel _centers_ + # and fix indices running off the pixel centers + # !!! notice we already clipped points to the panel! + i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) + i_floor_img = _fix_indices(i_floor, 0, self.rows - 1) + + j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) + j_floor_img = _fix_indices(j_floor, 0, self.cols - 1) + + # ceilings from floors + i_ceil = i_floor + 1 + i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1) + + j_ceil = j_floor + 1 + j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) + + # first interpolate at top/bottom rows + row_floor_int = (j_ceil - ij_frac[:, 1]) * img[ + i_floor_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_floor_img, j_ceil_img] + row_ceil_int = (j_ceil - ij_frac[:, 1]) * img[ + i_ceil_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_ceil_img, j_ceil_img] + + # next interpolate across cols + int_vals = (i_ceil - ij_frac[:, 0]) * row_floor_int + ( + ij_frac[:, 0] - i_floor + ) * row_ceil_int + int_xy[on_panel] = int_vals + return int_xy + + def make_powder_rings( + self, + pd, + merge_hkls=False, + delta_tth=None, + delta_eta=10.0, + eta_period=None, + eta_list=None, + rmat_s=ct.identity_3x3, + tvec_s=ct.zeros_3, + tvec_c=ct.zeros_3, + full_output=False, + tth_distortion=None, + ): + """ + Generate points on Debye_Scherrer rings over the detector. + + !!! it is assuming that rmat_s is built from (chi, ome) as it the case + for HEDM! + + Parameters + ---------- + pd : TYPE + DESCRIPTION. + merge_hkls : TYPE, optional + DESCRIPTION. The default is False. + delta_tth : TYPE, optional + DESCRIPTION. The default is None. + delta_eta : TYPE, optional + DESCRIPTION. The default is 10.. + eta_period : TYPE, optional + DESCRIPTION. The default is None. + eta_list : TYPE, optional + DESCRIPTION. The default is None. + rmat_s : TYPE, optional + DESCRIPTION. The default is ct.identity_3x3. + tvec_s : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + tvec_c : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + full_output : TYPE, optional + DESCRIPTION. The default is False. + tth_distortion : special class, optional + Special distortion class. The default is None. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + if tth_distortion is not None: + tnorms = mutil.rowNorm(np.vstack([tvec_s, tvec_c])) + assert ( + np.all(tnorms) < ct.sqrt_epsf + ), "If using distrotion function, translations must be zero" + + # in case you want to give it tth angles directly + if isinstance(pd, PlaneData): + pd = PlaneData(None, pd) + if delta_tth is not None: + pd.tThWidth = np.radians(delta_tth) + else: + delta_tth = np.degrees(pd.tThWidth) + + # !!! conversions, meh... + del_eta = np.radians(delta_eta) + + # do merging if asked + if merge_hkls: + _, tth_ranges = pd.getMergedRanges(cullDupl=True) + tth = np.average(tth_ranges, axis=1) + else: + tth_ranges = pd.getTThRanges() + tth = pd.getTTh() + tth_pm = tth_ranges - np.tile(tth, (2, 1)).T + sector_vertices = np.vstack( + [ + [ + i[0], + -del_eta, + i[0], + del_eta, + i[1], + del_eta, + i[1], + -del_eta, + 0.0, + 0.0, + ] + for i in tth_pm + ] + ) + else: + # Okay, we have a array-like tth specification + tth = np.array(pd).flatten() + if delta_tth is None: + raise RuntimeError( + "If supplying a 2theta list as first arg, " + + "must supply a delta_tth" + ) + tth_pm = 0.5 * delta_tth * np.r_[-1.0, 1.0] + tth_ranges = np.radians([i + tth_pm for i in tth]) # !!! units + sector_vertices = np.tile( + 0.5 + * np.radians( + [ + -delta_tth, + -delta_eta, + -delta_tth, + delta_eta, + delta_tth, + delta_eta, + delta_tth, + -delta_eta, + 0.0, + 0.0, + ] + ), + (len(tth), 1), + ) + # !! conversions, meh... + tth = np.radians(tth) + del_eta = np.radians(delta_eta) + + # for generating rings, make eta vector in correct period + if eta_period is None: + eta_period = (-np.pi, np.pi) + + if eta_list is None: + neta = int(360.0 / float(delta_eta)) + # this is the vector of ETA EDGES + eta_edges = mapAngle( + np.radians(delta_eta * np.linspace(0.0, neta, num=neta + 1)) + + eta_period[0], + eta_period, + ) + + # get eta bin centers from edges + """ + # !!! this way is probably overkill, since we have delta eta + eta_centers = np.average( + np.vstack([eta[:-1], eta[1:]), + axis=0) + """ + # !!! should be safe as eta_edges are monotonic + eta_centers = eta_edges[:-1] + 0.5 * del_eta + else: + eta_centers = np.radians(eta_list).flatten() + neta = len(eta_centers) + eta_edges = ( + np.tile(eta_centers, (2, 1)) + + np.tile(0.5 * del_eta * np.r_[-1, 1], (neta, 1)).T + ).T.flatten() + + # get chi and ome from rmat_s + # !!! API ambiguity + # !!! this assumes rmat_s was made from the composition + # !!! rmat_s = R(Xl, chi) * R(Yl, ome) + ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) + + # make list of angle tuples + angs = [ + np.vstack([i * np.ones(neta), eta_centers, ome * np.ones(neta)]) + for i in tth + ] + + # need xy coords and pixel sizes + valid_ang = [] + valid_xy = [] + map_indices = [] + npp = 5 # [ll, ul, ur, lr, center] + for i_ring in range(len(angs)): + # expand angles to patch vertices + these_angs = angs[i_ring].T + + # push to vertices to see who falls off + # FIXME: clipping is not checking if masked regions are on the + # patch interior + patch_vertices = ( + np.tile(these_angs[:, :2], (1, npp)) + + np.tile(sector_vertices[i_ring], (neta, 1)) + ).reshape(npp * neta, 2) + + # find vertices that all fall on the panel + # !!! not API ambiguity regarding rmat_s above + all_xy = self.angles_to_cart( + patch_vertices, + rmat_s=rmat_s, + tvec_s=tvec_s, + rmat_c=None, + tvec_c=tvec_c, + apply_distortion=True, + ) + + _, on_panel = self.clip_to_panel(all_xy) + + # all vertices must be on... + + patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) + patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] + + # !!! Have to apply after clipping, distortion can get wonky near + # the edeg of the panel, and it is assumed to be <~1 deg + # !!! The tth_ranges are NOT correct! + if tth_distortion is not None: + patch_valid_angs = tth_distortion.apply( + self.angles_to_cart(these_angs[patch_is_on, :2]), + return_nominal=True, + ) + patch_valid_xys = self.angles_to_cart( + patch_valid_angs, apply_distortion=True + ) + else: + patch_valid_angs = these_angs[patch_is_on, :2] + patch_valid_xys = patch_xys[:, -1, :].squeeze() + + # form output arrays + valid_ang.append(patch_valid_angs) + valid_xy.append(patch_valid_xys) + map_indices.append(patch_is_on) + # ??? is this option necessary? + if full_output: + return valid_ang, valid_xy, tth_ranges, map_indices, eta_edges + else: + return valid_ang, valid_xy, tth_ranges + + def map_to_plane(self, pts, rmat, tvec): + """ + Map detctor points to specified plane. + + Parameters + ---------- + pts : TYPE + DESCRIPTION. + rmat : TYPE + DESCRIPTION. + tvec : TYPE + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + Notes + ----- + by convention: + + n * (u*pts_l - tvec) = 0 + + [pts]_l = rmat*[pts]_m + tvec + + """ + # arg munging + pts = np.atleast_2d(pts) + npts = len(pts) + + # map plane normal & translation vector, LAB FRAME + nvec_map_lab = rmat[:, 2].reshape(3, 1) + tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1) + tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1) + + # put pts as 3-d in panel CS and transform to 3-d lab coords + pts_det = np.hstack([pts, np.zeros((npts, 1))]) + pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab + + # scaling along pts vectors to hit map plane + u = np.dot(nvec_map_lab.T, tvec_map_lab) / np.dot( + nvec_map_lab.T, pts_lab + ) + + # pts on map plane, in LAB FRAME + pts_map_lab = np.tile(u, (3, 1)) * pts_lab + + return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T + + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + chi=0.0, + tVec_s=ct.zeros_3, + wavelength=None, + ): + """ + Simulate a monochromatic rotation series for a list of grains. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_param_list : TYPE + DESCRIPTION. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + chi : TYPE, optional + DESCRIPTION. The default is 0.. + tVec_s : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + wavelength : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + valid_ids : TYPE + DESCRIPTION. + valid_hkls : TYPE + DESCRIPTION. + valid_angs : TYPE + DESCRIPTION. + valid_xys : TYPE + DESCRIPTION. + ang_pixel_size : TYPE + DESCRIPTION. + + """ + # grab B-matrix from plane data + bMat = plane_data.latVecOps['B'] + + # reconcile wavelength + # * added sanity check on exclusions here; possible to + # * make some reflections invalid (NaN) + if wavelength is None: + wavelength = plane_data.wavelength + else: + if plane_data.wavelength != wavelength: + plane_data.wavelength = ct.keVToAngstrom(wavelength) + assert not np.any( + np.isnan(plane_data.getTTh()) + ), "plane data exclusions incompatible with wavelength" + + # vstacked G-vector id, h, k, l + full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) + + """ LOOP OVER GRAINS """ + valid_ids = [] + valid_hkls = [] + valid_angs = [] + valid_xys = [] + ang_pixel_size = [] + for gparm in grain_param_list: + + # make useful parameters + rMat_c = make_rmat_of_expmap(gparm[:3]) + tVec_c = gparm[3:6] + vInv_s = gparm[6:] + + # All possible bragg conditions as vstacked [tth, eta, ome] + # for each omega solution + angList = np.vstack( + oscill_angles_of_hkls( + full_hkls[:, 1:], + chi, + rMat_c, + bMat, + wavelength, + v_inv=vInv_s, + beam_vec=self.bvec, + ) + ) + + # filter by eta and omega ranges + # ??? get eta range from detector? + allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( + full_hkls, angList, eta_ranges, ome_ranges + ) + allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) + + # find points that fall on the panel + det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( + allAngs, + self.rmat, + rMat_c, + chi, + self.tvec, + tVec_c, + tVec_s, + self.distortion, + self.bvec, + ) + xys_p, on_panel = self.clip_to_panel(det_xy) + valid_xys.append(xys_p) + + # filter angs and hkls that are on the detector plane + # !!! check this -- seems unnecessary but the results of + # _project_on_detector_plane() can have len < the input? + # the output of _project_on_detector_plane has been modified to + # hand back the index array to remedy this JVB 2020-05-27 + if np.any(~on_plane): + allAngs = np.atleast_2d(allAngs[on_plane, :]) + allHKLs = np.atleast_2d(allHKLs[on_plane, :]) + + # grab hkls and gvec ids for this panel + valid_hkls.append(allHKLs[on_panel, 1:]) + valid_ids.append(allHKLs[on_panel, 0]) + + # reflection angles (voxel centers) and pixel size in (tth, eta) + valid_angs.append(allAngs[on_panel, :]) + ang_pixel_size.append(self.angularPixelSize(xys_p)) + return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size + + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + tvec_s=None, + grain_params=None, + beam_vec=None, + ): + """ """ + if isinstance(crystal_data, PlaneData): + + plane_data = crystal_data + + # grab the expanded list of hkls from plane_data + hkls = np.hstack(plane_data.getSymHKLs()) + + # and the unit plane normals (G-vectors) in CRYSTAL FRAME + gvec_c = np.dot(plane_data.latVecOps['B'], hkls) + + # Filter out g-vectors going in the wrong direction. `gvec_to_xy()` used + # to do this, but not anymore. + to_keep = np.dot(gvec_c.T, self.bvec) <= 0 + + hkls = hkls[:, to_keep] + gvec_c = gvec_c[:, to_keep] + elif len(crystal_data) == 2: + # !!! should clean this up + hkls = np.array(crystal_data[0]) + bmat = crystal_data[1] + gvec_c = np.dot(bmat, hkls) + else: + raise RuntimeError( + f'argument list not understood: {crystal_data=}' + ) + nhkls_tot = hkls.shape[1] + + # parse energy ranges + # TODO: allow for spectrum parsing + multipleEnergyRanges = False + if hasattr(maxEnergy, '__len__'): + assert len(maxEnergy) == len( + minEnergy + ), 'energy cutoff ranges must have the same length' + multipleEnergyRanges = True + lmin = [] + lmax = [] + for i in range(len(maxEnergy)): + lmin.append(ct.keVToAngstrom(maxEnergy[i])) + lmax.append(ct.keVToAngstrom(minEnergy[i])) + else: + lmin = ct.keVToAngstrom(maxEnergy) + lmax = ct.keVToAngstrom(minEnergy) + + # parse grain parameters kwarg + if grain_params is None: + grain_params = np.atleast_2d( + np.hstack([np.zeros(6), ct.identity_6x1]) + ) + n_grains = len(grain_params) + + # sample rotation + if rmat_s is None: + rmat_s = ct.identity_3x3 + + # dummy translation vector... make input + if tvec_s is None: + tvec_s = ct.zeros_3 + + # beam vector + if beam_vec is None: + beam_vec = ct.beam_vec + + # ========================================================================= + # LOOP OVER GRAINS + # ========================================================================= + + # pre-allocate output arrays + xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan * np.ones((n_grains, nhkls_tot)) + energy = np.nan * np.ones((n_grains, nhkls_tot)) + for iG, gp in enumerate(grain_params): + rmat_c = make_rmat_of_expmap(gp[:3]) + tvec_c = gp[3:6].reshape(3, 1) + vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) + + # stretch them: V^(-1) * R * Gc + gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) + ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) + + # project + dpts = gvec_to_xy( + ghat_c_str.T, + self.rmat, + rmat_s, + rmat_c, + self.tvec, + tvec_s, + tvec_c, + beam_vec=beam_vec, + ) + + # check intersections with detector plane + canIntersect = ~np.isnan(dpts[:, 0]) + npts_in = sum(canIntersect) + + if np.any(canIntersect): + dpts = dpts[canIntersect, :].reshape(npts_in, 2) + dhkl = hkls[:, canIntersect].reshape(3, npts_in) + + rmat_b = make_beam_rmat(beam_vec, ct.eta_vec) + # back to angles + tth_eta, gvec_l = xy_to_gvec( + dpts, + self.rmat, + rmat_s, + self.tvec, + tvec_s, + tvec_c, + rmat_b=rmat_b, + ) + tth_eta = np.vstack(tth_eta).T + + # warp measured points + if self.distortion is not None: + dpts = self.distortion.apply_inverse(dpts) + + # plane spacings and energies + dsp = 1.0 / mutil.rowNorm(gvec_s_str[:, canIntersect].T) + wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) + + # clip to detector panel + _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) + + if multipleEnergyRanges: + validEnergy = np.zeros(len(wlen), dtype=bool) + for i in range(len(lmin)): + in_energy_range = np.logical_and( + wlen >= lmin[i], wlen <= lmax[i] + ) + validEnergy = validEnergy | in_energy_range + else: + validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) + + # index for valid reflections + keepers = np.where(np.logical_and(on_panel, validEnergy))[0] + + # assign output arrays + xy_det[iG][keepers, :] = dpts[keepers, :] + hkls_in[iG][:, keepers] = dhkl[:, keepers] + angles[iG][keepers, :] = tth_eta[keepers, :] + dspacing[iG, keepers] = dsp[keepers] + energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) + return xy_det, hkls_in, angles, dspacing, energy + + @staticmethod + def update_memoization_sizes(all_panels): + funcs = [ + _polarization_factor, + _lorentz_factor, + ] + + min_size = len(all_panels) + return Detector.increase_memoization_sizes(funcs, min_size) + + @staticmethod + def increase_memoization_sizes(funcs, min_size): + for f in funcs: + cache_info = f.cache_info() + if cache_info['maxsize'] < min_size: + f.set_cache_maxsize(min_size) + + def calc_physics_package_transmission(self, energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage) -> np.float64: + """get the transmission from the physics package + need to consider HED and HEDM samples separately + """ + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_sample = self.calc_transmission_sample( + seca, secb, energy, physics_package) + T_window = self.calc_transmission_window( + secb, energy, physics_package) + + transmission_physics_package = T_sample * T_window + return transmission_physics_package + + def calc_compton_physics_package_transmission( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + '''calculate the attenuation of inelastically + scattered photons. since these photons lose energy, + the attenuation length is angle dependent ergo a separate + routine than elastically scattered absorption. + ''' + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_sample = self.calc_compton_transmission( + seca, secb, energy, + physics_package, 'sample') + T_window = self.calc_compton_transmission_window( + secb, energy, physics_package) + + return T_sample * T_window + + def calc_compton_window_transmission( + self, + energy: np.floating, + rMat_s: np.ndarray, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + '''calculate the attenuation of inelastically + scattered photons just fropm the window. + since these photons lose energy, the attenuation length + is angle dependent ergo a separate routine than + elastically scattered absorption. + ''' + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_window = self.calc_compton_transmission( + seca, secb, energy, + physics_package, 'window') + T_sample = self.calc_compton_transmission_sample( + seca, energy, physics_package) + + return T_sample * T_window + + def calc_transmission_sample(self, seca: np.array, + secb: np.array, energy: np.floating, + physics_package: AbstractPhysicsPackage) -> np.array: + thickness_s = physics_package.sample_thickness # in microns + if np.isclose(thickness_s, 0): + return np.ones(self.shape) + + # in microns^-1 + mu_s = 1./physics_package.sample_absorption_length(energy) + x = (mu_s*thickness_s) + pre = 1./x/(secb - seca) + num = np.exp(-x*seca) - np.exp(-x*secb) + return pre * num + + def calc_transmission_window(self, secb: np.array, energy: np.floating, + physics_package: AbstractPhysicsPackage) -> np.array: + material_w = physics_package.window_material + thickness_w = physics_package.window_thickness # in microns + if material_w is None or np.isclose(thickness_w, 0): + return np.ones(self.shape) + + # in microns^-1 + mu_w = 1./physics_package.window_absorption_length(energy) + return np.exp(-thickness_w*mu_w*secb) + + def calc_compton_transmission( + self, + seca: np.ndarray, + secb: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + pp_layer: str, + ) -> np.ndarray: + + if pp_layer == 'sample': + formula = physics_package.sample_material + density = physics_package.sample_density + thickness = physics_package.sample_thickness + mu = 1./physics_package.sample_absorption_length(energy) + mu_prime = 1. / self.pixel_compton_attenuation_length( + energy, density, formula, + ) + elif pp_layer == 'window': + formula = physics_package.window_material + if formula is None: + return np.ones(self.shape) + + density = physics_package.window_density + thickness = physics_package.window_thickness + mu = 1./physics_package.sample_absorption_length(energy) + mu_prime = 1./self.pixel_compton_attenuation_length( + energy, density, formula) + + if thickness <= 0: + return np.ones(self.shape) + + x1 = mu*thickness*seca + x2 = mu_prime*thickness*secb + num = (np.exp(-x1) - np.exp(-x2)) + return -num/(x1 - x2) + + def calc_compton_transmission_sample( + self, + seca: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + thickness_s = physics_package.sample_thickness # in microns + + mu_s = 1./physics_package.sample_absorption_length( + energy) + return np.exp(-mu_s*thickness_s*seca) + + def calc_compton_transmission_window( + self, + secb: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + formula = physics_package.window_material + if formula is None: + return np.ones(self.shape) + + density = physics_package.window_density # in g/cc + thickness_w = physics_package.window_thickness # in microns + + mu_w_prime = 1./self.pixel_compton_attenuation_length( + energy, density, formula) + return np.exp(-mu_w_prime*thickness_w*secb) + + def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) -> np.array: + """get the effective pinhole area correction + """ + if (np.isclose(physics_package.pinhole_diameter, 0) + or np.isclose(physics_package.pinhole_thickness, 0)): + return np.ones(self.shape) + + hod = (physics_package.pinhole_thickness / + physics_package.pinhole_diameter) + bvec = self.bvec + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cth = -dvecs[:, 2].reshape(self.shape) + tanth = np.tan(np.arccos(cth)) + f = hod*tanth + f[np.abs(f) > 1.] = np.nan + asinf = np.arcsin(f) + return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) + + def calc_transmission_generic(self, + secb: np.array, + thickness: np.floating, + absorption_length: np.floating) -> np.array: + if np.isclose(thickness, 0): + return np.ones(self.shape) + + mu = 1./absorption_length # in microns^-1 + return np.exp(-thickness*mu*secb) + + def calc_transmission_phosphor(self, + secb: np.array, + thickness: np.floating, + readout_length: np.floating, + absorption_length: np.floating, + energy: np.floating, + pre_U0: np.floating) -> np.array: + if np.isclose(thickness, 0): + return np.ones(self.shape) + + f1 = absorption_length*thickness + f2 = absorption_length*readout_length + arg = (secb + 1/f2) + return pre_U0 * energy*((1.0 - np.exp(-f1*arg))/arg) + +# ============================================================================= +# UTILITY METHODS +# ============================================================================= + + +def _fix_indices(idx, lo, hi): + nidx = np.array(idx) + off_lo = nidx < lo + off_hi = nidx > hi + nidx[off_lo] = lo + nidx[off_hi] = hi + return nidx + + +def _row_edge_vec(rows, pixel_size_row): + return pixel_size_row * (0.5 * rows - np.arange(rows + 1)) + + +def _col_edge_vec(cols, pixel_size_col): + return pixel_size_col * (np.arange(cols + 1) - 0.5 * cols) + + +# FIXME find a better place for this, and maybe include loop over pixels +@numba.njit(nogil=True, cache=True) +def _solid_angle_of_triangle(vtx_list): + norms = np.sqrt(np.sum(vtx_list * vtx_list, axis=1)) + norms_prod = norms[0] * norms[1] * norms[2] + scalar_triple_product = np.dot( + vtx_list[0], np.cross(vtx_list[2], vtx_list[1]) + ) + denominator = ( + norms_prod + + norms[0] * np.dot(vtx_list[1], vtx_list[2]) + + norms[1] * np.dot(vtx_list[2], vtx_list[0]) + + norms[2] * np.dot(vtx_list[0], vtx_list[1]) + ) + + return 2.0 * np.arctan2(scalar_triple_product, denominator) diff --git a/hexrd/laue/instrument/hedm_instrument.py b/hexrd/laue/instrument/hedm_instrument.py new file mode 100644 index 000000000..1d768b47c --- /dev/null +++ b/hexrd/laue/instrument/hedm_instrument.py @@ -0,0 +1,2747 @@ +# -*- coding: utf-8 -*- +# ============================================================================= +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================================= +""" +Created on Fri Dec 9 13:05:27 2016 + +@author: bernier2 +""" +from contextlib import contextmanager +import copy +import logging +import os +from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor +from functools import partial +from typing import Optional + +from tqdm import tqdm + +import yaml + +import h5py + +import numpy as np + +from io import IOBase + +from scipy import ndimage +from scipy.linalg import logm +from skimage.measure import regionprops + +from hexrd import constants +from hexrd.imageseries import ImageSeries +from hexrd.imageseries.process import ProcessedImageSeries +from hexrd.imageseries.omega import OmegaImageSeries +from hexrd.fitting.utils import fit_ring +from hexrd.gridutil import make_tolerance_grid +from hexrd import matrixutil as mutil +from hexrd.transforms.xfcapi import ( + angles_to_gvec, + gvec_to_xy, + make_sample_rmat, + make_rmat_of_expmap, + unit_vector, +) +from hexrd import xrdutil +from hexrd.material.crystallography import PlaneData +from hexrd import constants as ct +from hexrd.rotations import mapAngle +from hexrd import distortion as distortion_pkg +from hexrd.utils.concurrent import distribute_tasks +from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict +from hexrd.utils.yaml import NumpyToNativeDumper +from hexrd.valunits import valWUnit +from hexrd.wppf import LeBail + +from .cylindrical_detector import CylindricalDetector +from .detector import ( + beam_energy_DFLT, + Detector, + max_workers_DFLT, +) +from .planar_detector import PlanarDetector + +from skimage.draw import polygon +from skimage.util import random_noise +from hexrd.wppf import wppfsupport + +try: + from fast_histogram import histogram1d + fast_histogram = True +except ImportError: + from numpy import histogram as histogram1d + fast_histogram = False + +logger = logging.getLogger() +logger.setLevel('INFO') + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +instrument_name_DFLT = 'instrument' + +beam_vec_DFLT = ct.beam_vec +source_distance_DFLT = np.inf + +eta_vec_DFLT = ct.eta_vec + +panel_id_DFLT = 'generic' +nrows_DFLT = 2048 +ncols_DFLT = 2048 +pixel_size_DFLT = (0.2, 0.2) + +tilt_params_DFLT = np.zeros(3) +t_vec_d_DFLT = np.r_[0., 0., -1000.] + +chi_DFLT = 0. +t_vec_s_DFLT = np.zeros(3) + +multi_ims_key = ct.shared_ims_key +ims_classes = (ImageSeries, ProcessedImageSeries, OmegaImageSeries) + +buffer_key = 'buffer' +distortion_key = 'distortion' + +# ============================================================================= +# UTILITY METHODS +# ============================================================================= + + +def generate_chunks(nrows, ncols, base_nrows, base_ncols, + row_gap=0, col_gap=0): + """ + Generate chunking data for regularly tiled composite detectors. + + Parameters + ---------- + nrows : int + DESCRIPTION. + ncols : int + DESCRIPTION. + base_nrows : int + DESCRIPTION. + base_ncols : int + DESCRIPTION. + row_gap : int, optional + DESCRIPTION. The default is 0. + col_gap : int, optional + DESCRIPTION. The default is 0. + + Returns + ------- + rects : array_like + The (nrows*ncols, ) list of ROI specs (see Notes). + labels : array_like + The (nrows*ncols, ) list of ROI (i, j) matrix indexing labels 'i_j'. + + Notes + ----- + ProcessedImageSeries needs a (2, 2) array for the 'rect' kwarg: + [[row_start, row_stop], + [col_start, col_stop]] + """ + row_starts = np.array([i*(base_nrows + row_gap) for i in range(nrows)]) + col_starts = np.array([i*(base_ncols + col_gap) for i in range(ncols)]) + rr = np.vstack([row_starts, row_starts + base_nrows]) + cc = np.vstack([col_starts, col_starts + base_ncols]) + rects = [] + labels = [] + for i in range(nrows): + for j in range(ncols): + this_rect = np.array( + [[rr[0, i], rr[1, i]], + [cc[0, j], cc[1, j]]] + ) + rects.append(this_rect) + labels.append('%d_%d' % (i, j)) + return rects, labels + + +def chunk_instrument(instr, rects, labels, use_roi=False): + """ + Generate chunked config fro regularly tiled composite detectors. + + Parameters + ---------- + instr : TYPE + DESCRIPTION. + rects : TYPE + DESCRIPTION. + labels : TYPE + DESCRIPTION. + + Returns + ------- + new_icfg_dict : TYPE + DESCRIPTION. + + """ + icfg_dict = instr.write_config() + new_icfg_dict = dict(beam=icfg_dict['beam'], + oscillation_stage=icfg_dict['oscillation_stage'], + detectors={}) + for panel_id, panel in instr.detectors.items(): + pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] + + for pnum, pdata in enumerate(zip(rects, labels)): + rect, label = pdata + panel_name = f'{panel_id}_{label}' + + row_col_dim = np.diff(rect) # (2, 1) + shape = tuple(row_col_dim.flatten()) + center = (rect[:, 0].reshape(2, 1) + 0.5*row_col_dim) + + sp_tvec = np.concatenate( + [panel.pixelToCart(center.T).flatten(), np.zeros(1)] + ) + + tvec = np.dot(panel.rmat, sp_tvec) + panel.tvec + + # new config dict + tmp_cfg = copy.deepcopy(pcfg_dict) + + # fix sizes + tmp_cfg['pixels']['rows'] = shape[0] + tmp_cfg['pixels']['columns'] = shape[1] + if use_roi: + tmp_cfg['pixels']['roi'] = (rect[0][0], rect[1][0]) + + # update tvec + tmp_cfg['transform']['translation'] = tvec.tolist() + + new_icfg_dict['detectors'][panel_name] = copy.deepcopy(tmp_cfg) + + if panel.panel_buffer is not None: + if panel.panel_buffer.ndim == 2: # have a mask array! + submask = panel.panel_buffer[ + rect[0, 0]:rect[0, 1], rect[1, 0]:rect[1, 1] + ] + new_icfg_dict['detectors'][panel_name]['buffer'] = submask + return new_icfg_dict + + +def _parse_imgser_dict(imgser_dict, det_key, roi=None): + """ + Associates a dict of imageseries to the target panel(s). + + Parameters + ---------- + imgser_dict : dict + The input dict of imageseries. Either `det_key` is in imgser_dict, or + the shared key is. Entries can be an ImageSeries object or a 2- or 3-d + ndarray of images. + det_key : str + The target detector key. + roi : tuple or None, optional + The roi of the target images. Format is + ((row_start, row_stop), (col_start, col_stop)) + The stops are used in the normal sense of a slice. The default is None. + + Raises + ------ + RuntimeError + If niether `det_key` nor the shared key is in the input imgser_dict; + Also, if the shared key is specified but the roi is None. + + Returns + ------- + ims : hexrd.imageseries + The desired imageseries object. + + """ + # grab imageseries for this detector + try: + ims = imgser_dict[det_key] + except KeyError: + matched_det_keys = [det_key in k for k in imgser_dict] + if multi_ims_key in imgser_dict: + images_in = imgser_dict[multi_ims_key] + elif np.any(matched_det_keys): + if sum(matched_det_keys) != 1: + raise RuntimeError( + f"multiple entries found for '{det_key}'" + ) + # use boolean array to index the proper key + # !!! these should be in the same order + img_keys = img_keys = np.asarray(list(imgser_dict.keys())) + matched_det_key = img_keys[matched_det_keys][0] # !!! only one + images_in = imgser_dict[matched_det_key] + else: + raise RuntimeError( + f"neither '{det_key}' nor '{multi_ims_key}' found" + + 'in imageseries input' + ) + + # have images now + if roi is None: + raise RuntimeError( + "roi must be specified to use shared imageseries" + ) + + if isinstance(images_in, ims_classes): + # input is an imageseries of some kind + ims = ProcessedImageSeries(images_in, [('rectangle', roi), ]) + if isinstance(images_in, OmegaImageSeries): + # if it was an OmegaImageSeries, must re-cast + ims = OmegaImageSeries(ims) + elif isinstance(images_in, np.ndarray): + # 2- or 3-d array of images + ndim = images_in.ndim + if ndim == 2: + ims = images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + elif ndim == 3: + nrows = roi[0][1] - roi[0][0] + ncols = roi[1][1] - roi[1][0] + n_images = len(images_in) + ims = np.empty((n_images, nrows, ncols), + dtype=images_in.dtype) + for i, image in images_in: + ims[i, :, :] = \ + images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + else: + raise RuntimeError( + f"image input dim must be 2 or 3; you gave {ndim}" + ) + return ims + + +def calc_beam_vec(azim, pola): + """ + Calculate unit beam propagation vector from + spherical coordinate spec in DEGREES. + + ...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL! + """ + tht = np.radians(azim) + phi = np.radians(pola) + bv = np.r_[ + np.sin(phi)*np.cos(tht), + np.cos(phi), + np.sin(phi)*np.sin(tht)] + return -bv + + +def calc_angles_from_beam_vec(bvec): + """ + Return the azimuth and polar angle from a beam + vector + """ + bvec = np.atleast_1d(bvec).flatten() + nvec = unit_vector(-bvec) + azim = float( + np.degrees(np.arctan2(nvec[2], nvec[0])) + ) + pola = float(np.degrees(np.arccos(nvec[1]))) + return azim, pola + + +def migrate_instrument_config(instrument_config): + """utility function to generate old instrument config dictionary""" + cfg_list = [] + for detector_id in instrument_config['detectors']: + cfg_list.append( + dict( + detector=instrument_config['detectors'][detector_id], + oscillation_stage=instrument_config['oscillation_stage'], + ) + ) + return cfg_list + + +def angle_in_range(angle, ranges, ccw=True, units='degrees'): + """ + Return the index of the first wedge the angle is found in + + WARNING: always clockwise; assumes wedges are not overlapping + """ + tau = 360. + if units.lower() == 'radians': + tau = 2*np.pi + w = np.nan + for i, wedge in enumerate(ranges): + amin = wedge[0] + amax = wedge[1] + check = amin + np.mod(angle - amin, tau) + if check < amax: + w = i + break + return w + + +# ???: move to gridutil? +def centers_of_edge_vec(edges): + assert np.asarray(edges).ndim == 1, "edges must be 1-d" + return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0) + + +def max_tth(instr): + """ + Return the maximum Bragg angle (in radians) subtended by the instrument. + + Parameters + ---------- + instr : hexrd.instrument.HEDMInstrument instance + the instrument class to evalutate. + + Returns + ------- + tth_max : float + The maximum observable Bragg angle by the instrument in radians. + """ + tth_max = 0. + for det in instr.detectors.values(): + ptth, peta = det.pixel_angles() + tth_max = max(np.max(ptth), tth_max) + return tth_max + + +def pixel_resolution(instr): + """ + Return the minimum, median, and maximum angular + resolution of the instrument. + + Parameters + ---------- + instr : HEDMInstrument instance + An instrument. + + Returns + ------- + tth_stats : float + min/median/max tth resolution in radians. + eta_stats : TYPE + min/median/max eta resolution in radians. + + """ + max_tth = np.inf + max_eta = np.inf + min_tth = -np.inf + min_eta = -np.inf + ang_ps_full = [] + for panel in instr.detectors.values(): + angps = panel.angularPixelSize( + np.stack( + panel.pixel_coords, + axis=0 + ).reshape(2, np.cumprod(panel.shape)[-1]).T + ) + ang_ps_full.append(angps) + max_tth = min(max_tth, np.min(angps[:, 0])) + max_eta = min(max_eta, np.min(angps[:, 1])) + min_tth = max(min_tth, np.max(angps[:, 0])) + min_eta = max(min_eta, np.max(angps[:, 1])) + med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten() + return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta) + + +def max_resolution(instr): + """ + Return the maximum angular resolution of the instrument. + + Parameters + ---------- + instr : HEDMInstrument instance + An instrument. + + Returns + ------- + max_tth : float + Maximum tth resolution in radians. + max_eta : TYPE + maximum eta resolution in radians. + + """ + max_tth = np.inf + max_eta = np.inf + for panel in instr.detectors.values(): + angps = panel.angularPixelSize( + np.stack( + panel.pixel_coords, + axis=0 + ).reshape(2, np.cumprod(panel.shape)[-1]).T + ) + max_tth = min(max_tth, np.min(angps[:, 0])) + max_eta = min(max_eta, np.min(angps[:, 1])) + return max_tth, max_eta + + +def _gaussian_dist(x, cen, fwhm): + sigm = fwhm/(2*np.sqrt(2*np.log(2))) + return np.exp(-0.5*(x - cen)**2/sigm**2) + + +def _sigma_to_fwhm(sigm): + return sigm*ct.sigma_to_fwhm + + +def _fwhm_to_sigma(fwhm): + return fwhm/ct.sigma_to_fwhm + + +# ============================================================================= +# CLASSES +# ============================================================================= + + +class HEDMInstrument(object): + """ + Abstraction of XRD instrument. + + * Distortion needs to be moved to a class with registry; tuple unworkable + * where should reference eta be defined? currently set to default config + """ + + def __init__(self, instrument_config=None, + image_series=None, eta_vector=None, + instrument_name=None, tilt_calibration_mapping=None, + max_workers=max_workers_DFLT, + physics_package=None, + active_beam_name: Optional[str] = None): + self._id = instrument_name_DFLT + + self._active_beam_name = active_beam_name + self._beam_dict = {} + + if eta_vector is None: + self._eta_vector = eta_vec_DFLT + else: + self._eta_vector = eta_vector + + self.max_workers = max_workers + + self.physics_package = physics_package + + if instrument_config is None: + # Default instrument + if instrument_name is not None: + self._id = instrument_name + self._num_panels = 1 + self._create_default_beam() + + # FIXME: must add cylindrical + self._detectors = dict( + panel_id_DFLT=PlanarDetector( + rows=nrows_DFLT, cols=ncols_DFLT, + pixel_size=pixel_size_DFLT, + tvec=t_vec_d_DFLT, + tilt=tilt_params_DFLT, + bvec=self.beam_vector, + xrs_dist=self.source_distance, + evec=self._eta_vector, + distortion=None, + roi=None, group=None, + max_workers=self.max_workers), + ) + + self._tvec = t_vec_s_DFLT + self._chi = chi_DFLT + else: + if isinstance(instrument_config, h5py.File): + tmp = {} + unwrap_h5_to_dict(instrument_config, tmp) + instrument_config = tmp['instrument'] + elif not isinstance(instrument_config, dict): + raise RuntimeError( + "instrument_config must be either an HDF5 file object" + + "or a dictionary. You gave a %s" + % type(instrument_config) + ) + if instrument_name is None: + if 'id' in instrument_config: + self._id = instrument_config['id'] + else: + self._id = instrument_name + + self._num_panels = len(instrument_config['detectors']) + + if instrument_config.get('physics_package', None) is not None: + self.physics_package = instrument_config['physics_package'] + + xrs_config = instrument_config['beam'] + is_single_beam = ( + 'energy' in xrs_config and + 'vector' in xrs_config + ) + if is_single_beam: + # Assume single beam. Load the same way as multibeam + self._create_default_beam() + xrs_config = {self.active_beam_name: xrs_config} + + # Multi beam load + for beam_name, beam in xrs_config.items(): + self._beam_dict[beam_name] = { + 'energy': beam['energy'], + 'vector': calc_beam_vec( + beam['vector']['azimuth'], + beam['vector']['polar_angle'], + ), + 'distance': beam.get('source_distance', np.inf), + } + + # Set the active beam name if not set already + if self._active_beam_name is None: + self._active_beam_name = next(iter(self._beam_dict)) + + # now build detector dict + detectors_config = instrument_config['detectors'] + det_dict = dict.fromkeys(detectors_config) + for det_id, det_info in detectors_config.items(): + det_group = det_info.get('group') # optional detector group + pixel_info = det_info['pixels'] + affine_info = det_info['transform'] + detector_type = det_info.get('detector_type', 'planar') + filter = det_info.get('filter', None) + coating = det_info.get('coating', None) + phosphor = det_info.get('phosphor', None) + try: + saturation_level = det_info['saturation_level'] + except KeyError: + saturation_level = 2**16 + shape = (pixel_info['rows'], pixel_info['columns']) + + panel_buffer = None + if buffer_key in det_info: + det_buffer = det_info[buffer_key] + if det_buffer is not None: + if isinstance(det_buffer, np.ndarray): + if det_buffer.ndim == 2: + if det_buffer.shape != shape: + msg = ( + f'Buffer shape for {det_id} ' + f'({det_buffer.shape}) does not match ' + f'detector shape ({shape})' + ) + raise BufferShapeMismatchError(msg) + else: + assert len(det_buffer) == 2 + panel_buffer = det_buffer + elif isinstance(det_buffer, list): + panel_buffer = np.asarray(det_buffer) + elif np.isscalar(det_buffer): + panel_buffer = det_buffer*np.ones(2) + else: + raise RuntimeError( + "panel buffer spec invalid for %s" % det_id + ) + + # optional roi + roi = pixel_info.get('roi') + + # handle distortion + distortion = None + if distortion_key in det_info: + distortion_cfg = det_info[distortion_key] + if distortion_cfg is not None: + try: + func_name = distortion_cfg['function_name'] + dparams = distortion_cfg['parameters'] + distortion = distortion_pkg.get_mapping( + func_name, dparams + ) + except KeyError: + raise RuntimeError( + "problem with distortion specification" + ) + if detector_type.lower() not in DETECTOR_TYPES: + msg = f'Unknown detector type: {detector_type}' + raise NotImplementedError(msg) + + DetectorClass = DETECTOR_TYPES[detector_type.lower()] + kwargs = dict( + name=det_id, + rows=pixel_info['rows'], + cols=pixel_info['columns'], + pixel_size=pixel_info['size'], + panel_buffer=panel_buffer, + saturation_level=saturation_level, + tvec=affine_info['translation'], + tilt=affine_info['tilt'], + bvec=self.beam_vector, + xrs_dist=self.source_distance, + evec=self._eta_vector, + distortion=distortion, + roi=roi, + group=det_group, + max_workers=self.max_workers, + detector_filter=filter, + detector_coating=coating, + phosphor=phosphor, + ) + + if DetectorClass is CylindricalDetector: + # Add cylindrical detector kwargs + kwargs['radius'] = det_info.get('radius', 49.51) + + det_dict[det_id] = DetectorClass(**kwargs) + + self._detectors = det_dict + + self._tvec = np.r_[ + instrument_config['oscillation_stage']['translation'] + ] + self._chi = instrument_config['oscillation_stage']['chi'] + + # grab angles from beam vec + # !!! these are in DEGREES! + azim, pola = calc_angles_from_beam_vec(self.beam_vector) + + self.update_memoization_sizes() + + @property + def mean_detector_center(self) -> np.ndarray: + """Return the mean center for all detectors""" + centers = np.array([panel.tvec for panel in self.detectors.values()]) + return centers.sum(axis=0) / len(centers) + + def mean_group_center(self, group: str) -> np.ndarray: + """Return the mean center for detectors belonging to a group""" + centers = np.array([ + x.tvec for x in self.detectors_in_group(group).values() + ]) + return centers.sum(axis=0) / len(centers) + + @property + def detector_groups(self) -> list[str]: + groups = [] + for panel in self.detectors.values(): + group = panel.group + if group is not None and group not in groups: + groups.append(group) + + return groups + + def detectors_in_group(self, group: str) -> dict[str, Detector]: + return {k: v for k, v in self.detectors.items() if v.group == group} + + # properties for physical size of rectangular detector + @property + def id(self): + return self._id + + @property + def num_panels(self): + return self._num_panels + + @property + def detectors(self): + return self._detectors + + @property + def detector_parameters(self): + pdict = {} + for key, panel in self.detectors.items(): + pdict[key] = panel.config_dict( + self.chi, self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + return pdict + + @property + def tvec(self): + return self._tvec + + @tvec.setter + def tvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3, 'input must have length = 3' + self._tvec = x + + @property + def chi(self): + return self._chi + + @chi.setter + def chi(self, x): + self._chi = float(x) + + @property + def beam_energy(self) -> float: + return self.active_beam['energy'] + + @beam_energy.setter + def beam_energy(self, x: float): + self.active_beam['energy'] = float(x) + self.beam_dict_modified() + + @property + def beam_wavelength(self): + return ct.keVToAngstrom(self.beam_energy) + + @property + def has_multi_beam(self) -> bool: + return len(self.beam_dict) > 1 + + @property + def beam_dict(self) -> dict: + return self._beam_dict + + def _create_default_beam(self): + name = 'XRS1' + self._beam_dict[name] = { + 'energy': beam_energy_DFLT, + 'vector': beam_vec_DFLT.copy(), + 'distance': np.inf, + } + + if self._active_beam_name is None: + self._active_beam_name = name + + @property + def beam_names(self) -> list[str]: + return list(self.beam_dict) + + def xrs_beam_energy(self, beam_name: Optional[str]) -> float: + if beam_name is None: + beam_name = self.active_beam_name + + return self.beam_dict[beam_name]['energy'] + + @property + def active_beam_name(self) -> str: + return self._active_beam_name + + @active_beam_name.setter + def active_beam_name(self, name: str): + if self._active_beam_name not in self.beam_dict: + raise RuntimeError( + f'"{name}" is not present in "{self.beam_names}"' + ) + + self._active_beam_name = name + + # Update anything beam related where we need to + self._update_panel_beams() + + def beam_dict_modified(self): + # A function to call to indicate that the beam dict was modified. + # Update anything beam related where we need to + self._update_panel_beams() + + @property + def active_beam(self) -> dict: + return self.beam_dict[self.active_beam_name] + + def _update_panel_beams(self): + # FIXME: maybe we shouldn't store these on the panels? + # Might be hard to fix, though... + for panel in self.detectors.values(): + panel.bvec = self.beam_vector + panel.xrs_dist = self.source_distance + + @property + def beam_vector(self) -> np.ndarray: + return self.active_beam['vector'] + + @beam_vector.setter + def beam_vector(self, x: np.ndarray): + x = np.array(x).flatten() + if len(x) == 3: + assert sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + bvec = x + elif len(x) == 2: + bvec = calc_beam_vec(*x) + else: + raise RuntimeError("input must be a unit vector or angle pair") + + # Modify the beam vector for the active beam dict + self.active_beam['vector'] = bvec + self.beam_dict_modified() + + @property + def source_distance(self): + return self.active_beam['distance'] + + @source_distance.setter + def source_distance(self, x): + assert np.isscalar(x), \ + f"'source_distance' must be a scalar; you input '{x}'" + self.active_beam['distance'] = x + self.beam_dict_modified() + + @property + def eta_vector(self): + return self._eta_vector + + @eta_vector.setter + def eta_vector(self, x): + x = np.array(x).flatten() + assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + self._eta_vector = x + # ...maybe change dictionary item behavior for 3.x compatibility? + for detector_id in self.detectors: + panel = self.detectors[detector_id] + panel.evec = self._eta_vector + + # ========================================================================= + # METHODS + # ========================================================================= + + def write_config(self, file=None, style='yaml', calibration_dict={}): + """ WRITE OUT YAML FILE """ + # initialize output dictionary + assert style.lower() in ['yaml', 'hdf5'], \ + "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + + par_dict = {} + + par_dict['id'] = self.id + + # Multi beam writer + beam_dict = {} + for beam_name, beam in self.beam_dict.items(): + azim, polar = calc_angles_from_beam_vec(beam['vector']) + beam_dict[beam_name] = { + 'energy': beam['energy'], + 'vector': { + 'azimuth': azim, + 'polar_angle': polar, + }, + } + if beam['distance'] != np.inf: + beam_dict[beam_name]['source_distance'] = beam['distance'] + + if len(beam_dict) == 1: + # Just write it out a single beam (classical way) + beam_dict = next(iter(beam_dict.values())) + + par_dict['beam'] = beam_dict + + if calibration_dict: + par_dict['calibration_crystal'] = calibration_dict + + ostage = dict( + chi=self.chi, + translation=self.tvec.tolist() + ) + par_dict['oscillation_stage'] = ostage + + det_dict = dict.fromkeys(self.detectors) + for det_name, detector in self.detectors.items(): + # grab panel config + # !!! don't need beam or tvec + # !!! have vetted style + pdict = detector.config_dict(chi=self.chi, tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style=style) + det_dict[det_name] = pdict['detector'] + par_dict['detectors'] = det_dict + + # handle output file if requested + if file is not None: + if style.lower() == 'yaml': + with open(file, 'w') as f: + yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) + else: + def _write_group(file): + instr_grp = file.create_group('instrument') + unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) + + # hdf5 + if isinstance(file, str): + with h5py.File(file, 'w') as f: + _write_group(f) + elif isinstance(file, h5py.File): + _write_group(file) + else: + raise TypeError("Unexpected file type.") + + return par_dict + + def extract_polar_maps(self, plane_data, imgser_dict, + active_hkls=None, threshold=None, + tth_tol=None, eta_tol=0.25): + """ + Extract eta-omega maps from an imageseries. + + Quick and dirty way to histogram angular patch data for make + pole figures suitable for fiber generation + + TODO: streamline projection code + TODO: normalization + !!!: images must be non-negative! + !!!: plane_data is NOT a copy! + """ + if tth_tol is not None: + plane_data.tThWidth = np.radians(tth_tol) + else: + tth_tol = np.degrees(plane_data.tThWidth) + + # make rings clipped to panel + # !!! eta_idx has the same length as plane_data.exclusions + # each entry are the integer indices into the bins + # !!! eta_edges is the list of eta bin EDGES; same for all + # detectors, so calculate it once + # !!! grab first panel + panel = next(iter(self.detectors.values())) + pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = \ + panel.make_powder_rings( + plane_data, merge_hkls=False, + delta_eta=eta_tol, full_output=True + ) + + if active_hkls is not None: + assert hasattr(active_hkls, '__len__'), \ + "active_hkls must be an iterable with __len__" + + # need to re-cast for element-wise operations + active_hkls = np.array(active_hkls) + + # these are all active reflection unique hklIDs + active_hklIDs = plane_data.getHKLID( + plane_data.hkls, master=True + ) + + # find indices + idx = np.zeros_like(active_hkls, dtype=int) + for i, input_hklID in enumerate(active_hkls): + try: + idx[i] = np.where(active_hklIDs == input_hklID)[0] + except ValueError: + raise RuntimeError(f"hklID '{input_hklID}' is invalid") + tth_ranges = tth_ranges[idx] + + delta_eta = eta_edges[1] - eta_edges[0] + ncols_eta = len(eta_edges) - 1 + + ring_maps_panel = dict.fromkeys(self.detectors) + for i_d, det_key in enumerate(self.detectors): + print("working on detector '%s'..." % det_key) + + # grab panel + panel = self.detectors[det_key] + # native_area = panel.pixel_area # pixel ref area + + # pixel angular coords for the detector panel + ptth, peta = panel.pixel_angles() + + # grab imageseries for this detector + ims = _parse_imgser_dict(imgser_dict, det_key, roi=panel.roi) + + # grab omegas from imageseries and squawk if missing + try: + omegas = ims.metadata['omega'] + except KeyError: + raise RuntimeError( + f"imageseries for '{det_key}' has no omega info" + ) + + # initialize maps and assing by row (omega/frame) + nrows_ome = len(omegas) + + # init map with NaNs + shape = (len(tth_ranges), nrows_ome, ncols_eta) + ring_maps = np.full(shape, np.nan) + + # Generate ring parameters once, and re-use them for each image + ring_params = [] + for tthr in tth_ranges: + kwargs = { + 'tthr': tthr, + 'ptth': ptth, + 'peta': peta, + 'eta_edges': eta_edges, + 'delta_eta': delta_eta, + } + ring_params.append(_generate_ring_params(**kwargs)) + + # Divide up the images among processes + tasks = distribute_tasks(len(ims), self.max_workers) + func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges, + ring_maps=ring_maps, ring_params=ring_params, + threshold=threshold) + + max_workers = self.max_workers + if max_workers == 1 or len(tasks) == 1: + # Just execute it serially. + for task in tasks: + func(task) + else: + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Evaluate the results via `list()`, so that if an + # exception is raised in a thread, it will be re-raised + # and visible to the user. + list(executor.map(func, tasks)) + + ring_maps_panel[det_key] = ring_maps + + return ring_maps_panel, eta_edges + + def extract_line_positions(self, plane_data, imgser_dict, + tth_tol=None, eta_tol=1., npdiv=2, + eta_centers=None, + collapse_eta=True, collapse_tth=False, + do_interpolation=True, do_fitting=False, + tth_distortion=None, fitting_kwargs=None): + """ + Perform annular interpolation on diffraction images. + + Provides data for extracting the line positions from powder diffraction + images, pole figure patches from imageseries, or Bragg peaks from + Laue diffraction images. + + Parameters + ---------- + plane_data : hexrd.crystallography.PlaneData object or array_like + Object determining the 2theta positions for the integration + sectors. If PlaneData, this will be all non-excluded reflections, + subject to merging within PlaneData.tThWidth. If array_like, + interpreted as a list of 2theta angles IN DEGREES. + imgser_dict : dict + Dictionary of powder diffraction images, one for each detector. + tth_tol : scalar, optional + The radial (i.e. 2theta) width of the integration sectors + IN DEGREES. This arg is required if plane_data is array_like. + The default is None. + eta_tol : scalar, optional + The azimuthal (i.e. eta) width of the integration sectors + IN DEGREES. The default is 1. + npdiv : int, optional + The number of oversampling pixel subdivision (see notes). + The default is 2. + eta_centers : array_like, optional + The desired azimuthal sector centers. The default is None. If + None, then bins are distrubted sequentially from (-180, 180). + collapse_eta : bool, optional + Flag for summing sectors in eta. The default is True. + collapse_tth : bool, optional + Flag for summing sectors in 2theta. The default is False. + do_interpolation : bool, optional + If True, perform bilinear interpolation. The default is True. + do_fitting : bool, optional + If True, then perform spectrum fitting, and append the results + to the returned data. collapse_eta must also be True for this + to have any effect. The default is False. + tth_distortion : special class, optional + for special case of pinhole camera distortions. See + hexrd.xrdutil.phutil.SampleLayerDistortion (only type supported) + fitting_kwargs : dict, optional + kwargs passed to hexrd.fitting.utils.fit_ring if do_fitting is True + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + panel_data : dict + Dictionary over the detctors with the following structure: + [list over (merged) 2theta ranges] + [list over valid eta sectors] + [angle data , + bin intensities , + fitting results ] + + Notes + ----- + TODO: May change the array_like input units to degrees. + TODO: rename function. + + """ + + if fitting_kwargs is None: + fitting_kwargs = {} + + # ===================================================================== + # LOOP OVER DETECTORS + # ===================================================================== + logger.info("Interpolating ring data") + pbar_dets = partial(tqdm, total=self.num_panels, desc="Detector", + position=self.num_panels) + + # Split up the workers among the detectors + max_workers_per_detector = max(1, self.max_workers // self.num_panels) + + kwargs = { + 'plane_data': plane_data, + 'tth_tol': tth_tol, + 'eta_tol': eta_tol, + 'eta_centers': eta_centers, + 'npdiv': npdiv, + 'collapse_tth': collapse_tth, + 'collapse_eta': collapse_eta, + 'do_interpolation': do_interpolation, + 'do_fitting': do_fitting, + 'fitting_kwargs': fitting_kwargs, + 'tth_distortion': tth_distortion, + 'max_workers': max_workers_per_detector, + } + func = partial(_extract_detector_line_positions, **kwargs) + + def make_instr_cfg(panel): + return panel.config_dict( + chi=self.chi, tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + + images = [] + for detector_id, panel in self.detectors.items(): + images.append(_parse_imgser_dict(imgser_dict, detector_id, + roi=panel.roi)) + + panels = [self.detectors[k] for k in self.detectors] + instr_cfgs = [make_instr_cfg(x) for x in panels] + pbp_array = np.arange(self.num_panels) + iter_args = zip(panels, instr_cfgs, images, pbp_array) + with ProcessPoolExecutor(mp_context=constants.mp_context, + max_workers=self.num_panels) as executor: + results = list(pbar_dets(executor.map(func, iter_args))) + + panel_data = {} + for det, res in zip(self.detectors, results): + panel_data[det] = res + + return panel_data + + def simulate_powder_pattern(self, + mat_list, + params=None, + bkgmethod=None, + origin=None, + noise=None): + """ + Generate powder diffraction iamges from specified materials. + + Parameters + ---------- + mat_list : array_like (n, ) + List of Material classes. + params : dict, optional + Dictionary of LeBail parameters (see Notes). The default is None. + bkgmethod : dict, optional + Background function specification. The default is None. + origin : array_like (3,), optional + Vector describing the origin of the diffrction volume. + The default is None, wiich is equivalent to [0, 0, 0]. + noise : str, optional + Flag describing type of noise to be applied. The default is None. + + Returns + ------- + img_dict : dict + Dictionary of diffraciton images over the detectors. + + Notes + ----- + TODO: add more controls for noise function. + TODO: modify hooks to LeBail parameters. + TODO: add optional volume fraction weights for phases in mat_list + """ + """ + >> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab, + saransh1@llnl.gov + >> @DATE: 01/22/2021 SS 1.0 original + >> @DETAILS: adding hook to WPPF class. this changes the input list + significantly + """ + if origin is None: + origin = self.tvec + origin = np.asarray(origin).squeeze() + assert len(origin) == 3, \ + "origin must be a 3-element sequence" + + if bkgmethod is None: + bkgmethod = {'chebyshev': 3} + + ''' + if params is none, fill in some sane default values + only the first value is used. the rest of the values are + the upper, lower bounds and vary flag for refinement which + are not used but required for interfacing with WPPF + + zero_error : zero shift error + U, V, W : Cagliotti parameters + P, X, Y : Lorentzian parameters + eta1, eta2, eta3 : Mixing parameters + ''' + if params is None: + # params = {'zero_error': [0.0, -1., 1., True], + # 'U': [2e-1, -1., 1., True], + # 'V': [2e-2, -1., 1., True], + # 'W': [2e-2, -1., 1., True], + # 'X': [2e-1, -1., 1., True], + # 'Y': [2e-1, -1., 1., True] + # } + params = wppfsupport._generate_default_parameters_LeBail( + mat_list, + 1, + bkgmethod, + ) + ''' + use the material list to obtain the dictionary of initial intensities + we need to make sure that the intensities are properly scaled by the + lorentz polarization factor. since the calculation is done in the + LeBail class, all that means is the initial intensity needs that factor + in there + ''' + img_dict = dict.fromkeys(self.detectors) + + # find min and max tth over all panels + tth_mi = np.inf + tth_ma = 0. + ptth_dict = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + ptth, peta = panel.pixel_angles(origin=origin) + tth_mi = min(tth_mi, ptth.min()) + tth_ma = max(tth_ma, ptth.max()) + ptth_dict[det_key] = ptth + + ''' + now make a list of two theta and dummy ones for the experimental + spectrum this is never really used so any values should be okay. We + could also pas the integrated detector image if we would like to + simulate some realistic background. But thats for another day. + ''' + # convert angles to degrees because thats what the WPPF expects + tth_mi = np.degrees(tth_mi) + tth_ma = np.degrees(tth_ma) + + # get tth angular resolution for instrument + ang_res = max_resolution(self) + + # !!! calc nsteps by oversampling + nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0]))) + + # evaulation vector for LeBail + tth = np.linspace(tth_mi, tth_ma, nsteps) + + expt = np.vstack([tth, np.ones_like(tth)]).T + + wavelength = [ + valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), + 1. + ] + + ''' + now go through the material list and get the intensity dictionary + ''' + intensity = {} + for mat in mat_list: + + multiplicity = mat.planeData.getMultiplicity() + + tth = mat.planeData.getTTh() + + LP = (1 + np.cos(tth)**2) / \ + np.cos(0.5*tth)/np.sin(0.5*tth)**2 + + intensity[mat.name] = {} + intensity[mat.name]['synchrotron'] = \ + mat.planeData.structFact * LP * multiplicity + + kwargs = { + 'expt_spectrum': expt, + 'params': params, + 'phases': mat_list, + 'wavelength': { + 'synchrotron': wavelength + }, + 'bkgmethod': bkgmethod, + 'intensity_init': intensity, + 'peakshape': 'pvtch' + } + + self.WPPFclass = LeBail(**kwargs) + + self.simulated_spectrum = self.WPPFclass.spectrum_sim + self.background = self.WPPFclass.background + + ''' + now that we have the simulated intensities, its time to get the + two theta for the detector pixels and interpolate what the intensity + for each pixel should be + ''' + + img_dict = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + ptth = ptth_dict[det_key] + + img = np.interp(np.degrees(ptth), + self.simulated_spectrum.x, + self.simulated_spectrum.y + self.background.y) + + if noise is None: + img_dict[det_key] = img + + else: + # Rescale to be between 0 and 1 so random_noise() will work + prev_max = img.max() + img /= prev_max + + if noise.lower() == 'poisson': + im_noise = random_noise(img, + mode='poisson', + clip=True) + mi = im_noise.min() + ma = im_noise.max() + if ma > mi: + im_noise = (im_noise - mi)/(ma - mi) + + elif noise.lower() == 'gaussian': + im_noise = random_noise(img, mode='gaussian', clip=True) + + elif noise.lower() == 'salt': + im_noise = random_noise(img, mode='salt') + + elif noise.lower() == 'pepper': + im_noise = random_noise(img, mode='pepper') + + elif noise.lower() == 's&p': + im_noise = random_noise(img, mode='s&p') + + elif noise.lower() == 'speckle': + im_noise = random_noise(img, mode='speckle', clip=True) + + # Now scale back up + img_dict[det_key] = im_noise * prev_max + + return img_dict + + def simulate_laue_pattern(self, crystal_data, + minEnergy=5., maxEnergy=35., + rmat_s=None, grain_params=None): + """ + Simulate Laue diffraction over the instrument. + + Parameters + ---------- + crystal_data : TYPE + DESCRIPTION. + minEnergy : TYPE, optional + DESCRIPTION. The default is 5.. + maxEnergy : TYPE, optional + DESCRIPTION. The default is 35.. + rmat_s : TYPE, optional + DESCRIPTION. The default is None. + grain_params : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + results : TYPE + DESCRIPTION. + + xy_det, hkls_in, angles, dspacing, energy + + TODO: revisit output; dict, or concatenated list? + """ + results = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + results[det_key] = panel.simulate_laue_pattern( + crystal_data, + minEnergy=minEnergy, maxEnergy=maxEnergy, + rmat_s=rmat_s, tvec_s=self.tvec, + grain_params=grain_params, + beam_vec=self.beam_vector) + return results + + def simulate_rotation_series(self, plane_data, grain_param_list, + eta_ranges=[(-np.pi, np.pi), ], + ome_ranges=[(-np.pi, np.pi), ], + ome_period=(-np.pi, np.pi), + wavelength=None): + """ + Simulate a monochromatic rotation series over the instrument. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_param_list : TYPE + DESCRIPTION. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + wavelength : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + results : TYPE + DESCRIPTION. + + TODO: revisit output; dict, or concatenated list? + """ + results = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + results[det_key] = panel.simulate_rotation_series( + plane_data, grain_param_list, + eta_ranges=eta_ranges, + ome_ranges=ome_ranges, + ome_period=ome_period, + chi=self.chi, tVec_s=self.tvec, + wavelength=wavelength) + return results + + def pull_spots(self, plane_data, grain_params, + imgser_dict, + tth_tol=0.25, eta_tol=1., ome_tol=1., + npdiv=2, threshold=10, + eta_ranges=[(-np.pi, np.pi), ], + ome_period=None, + dirname='results', filename=None, output_format='text', + return_spot_list=False, + quiet=True, check_only=False, + interp='nearest'): + """ + Exctract reflection info from a rotation series. + + Input must be encoded as an OmegaImageseries object. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_params : TYPE + DESCRIPTION. + imgser_dict : TYPE + DESCRIPTION. + tth_tol : TYPE, optional + DESCRIPTION. The default is 0.25. + eta_tol : TYPE, optional + DESCRIPTION. The default is 1.. + ome_tol : TYPE, optional + DESCRIPTION. The default is 1.. + npdiv : TYPE, optional + DESCRIPTION. The default is 2. + threshold : TYPE, optional + DESCRIPTION. The default is 10. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + dirname : TYPE, optional + DESCRIPTION. The default is 'results'. + filename : TYPE, optional + DESCRIPTION. The default is None. + output_format : TYPE, optional + DESCRIPTION. The default is 'text'. + return_spot_list : TYPE, optional + DESCRIPTION. The default is False. + quiet : TYPE, optional + DESCRIPTION. The default is True. + check_only : TYPE, optional + DESCRIPTION. The default is False. + interp : TYPE, optional + DESCRIPTION. The default is 'nearest'. + + Returns + ------- + compl : TYPE + DESCRIPTION. + output : TYPE + DESCRIPTION. + + """ + # grain parameters + rMat_c = make_rmat_of_expmap(grain_params[:3]) + tVec_c = grain_params[3:6] + + # grab omega ranges from first imageseries + # + # WARNING: all imageseries AND all wedges within are assumed to have + # the same omega values; put in a check that they are all the same??? + oims0 = next(iter(imgser_dict.values())) + ome_ranges = [np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges] + if ome_period is None: + ims = next(iter(imgser_dict.values())) + ostart = ims.omega[0, 0] + ome_period = np.radians(ostart + np.r_[0., 360.]) + + # delta omega in DEGREES grabbed from first imageseries in the dict + delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] + + # make omega grid for frame expansion around reference frame + # in DEGREES + ndiv_ome, ome_del = make_tolerance_grid( + delta_ome, ome_tol, 1, adjust_window=True, + ) + + # generate structuring element for connected component labeling + if ndiv_ome == 1: + label_struct = ndimage.generate_binary_structure(2, 2) + else: + label_struct = ndimage.generate_binary_structure(3, 3) + + # simulate rotation series + sim_results = self.simulate_rotation_series( + plane_data, [grain_params, ], + eta_ranges=eta_ranges, + ome_ranges=ome_ranges, + ome_period=ome_period) + + # patch vertex generator (global for instrument) + tol_vec = 0.5*np.radians( + [-tth_tol, -eta_tol, + -tth_tol, eta_tol, + tth_tol, eta_tol, + tth_tol, -eta_tol]) + + # prepare output if requested + if filename is not None and output_format.lower() == 'hdf5': + this_filename = os.path.join(dirname, filename) + writer = GrainDataWriter_h5( + os.path.join(dirname, filename), + self.write_config(), grain_params) + + # ===================================================================== + # LOOP OVER PANELS + # ===================================================================== + iRefl = 0 + next_invalid_peak_id = -100 + compl = [] + output = dict.fromkeys(self.detectors) + for detector_id, panel in self.detectors.items(): + # initialize text-based output writer + if filename is not None and output_format.lower() == 'text': + output_dir = os.path.join( + dirname, detector_id + ) + os.makedirs(output_dir, exist_ok=True) + this_filename = os.path.join( + output_dir, filename + ) + writer = PatchDataWriter(this_filename) + + # grab panel + instr_cfg = panel.config_dict( + self.chi, self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + native_area = panel.pixel_area # pixel ref area + + # pull out the OmegaImageSeries for this panel from input dict + ome_imgser = _parse_imgser_dict(imgser_dict, + detector_id, + roi=panel.roi) + + # extract simulation results + sim_results_p = sim_results[detector_id] + hkl_ids = sim_results_p[0][0] + hkls_p = sim_results_p[1][0] + ang_centers = sim_results_p[2][0] + xy_centers = sim_results_p[3][0] + ang_pixel_size = sim_results_p[4][0] + + # now verify that full patch falls on detector... + # ???: strictly necessary? + # + # patch vertex array from sim + nangs = len(ang_centers) + patch_vertices = ( + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4*nangs, 2) + ome_dupl = np.tile( + ang_centers[:, 2], (4, 1) + ).T.reshape(len(patch_vertices), 1) + + # find vertices that all fall on the panel + det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( + np.hstack([patch_vertices, ome_dupl]), + panel.rmat, rMat_c, self.chi, + panel.tvec, tVec_c, self.tvec, + panel.distortion) + _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) + + # all vertices must be on... + patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) + patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] + + # re-filter... + hkl_ids = hkl_ids[patch_is_on] + hkls_p = hkls_p[patch_is_on, :] + ang_centers = ang_centers[patch_is_on, :] + xy_centers = xy_centers[patch_is_on, :] + ang_pixel_size = ang_pixel_size[patch_is_on, :] + + # TODO: add polygon testing right here! + # done + if check_only: + patch_output = [] + for i_pt, angs in enumerate(ang_centers): + # the evaluation omegas; + # expand about the central value using tol vector + ome_eval = np.degrees(angs[2]) + ome_del + + # ...vectorize the omega_to_frame function to avoid loop? + frame_indices = [ + ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval + ] + if -1 in frame_indices: + if not quiet: + msg = """ + window for (%d %d %d) falls outside omega range + """ % tuple(hkls_p[i_pt, :]) + print(msg) + continue + else: + these_vertices = patch_xys[i_pt] + ijs = panel.cartToPixel(these_vertices) + ii, jj = polygon(ijs[:, 0], ijs[:, 1]) + contains_signal = False + for i_frame in frame_indices: + contains_signal = contains_signal or np.any( + ome_imgser[i_frame][ii, jj] > threshold + ) + compl.append(contains_signal) + patch_output.append((ii, jj, frame_indices)) + else: + # make the tth,eta patches for interpolation + patches = xrdutil.make_reflection_patches( + instr_cfg, + ang_centers[:, :2], ang_pixel_size, + omega=ang_centers[:, 2], + tth_tol=tth_tol, eta_tol=eta_tol, + rmat_c=rMat_c, tvec_c=tVec_c, + npdiv=npdiv, quiet=True) + + # GRAND LOOP over reflections for this panel + patch_output = [] + for i_pt, patch in enumerate(patches): + + # strip relevant objects out of current patch + vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch + + prows, pcols = areas.shape + nrm_fac = areas/float(native_area) + nrm_fac = nrm_fac / np.min(nrm_fac) + + # grab hkl info + hkl = hkls_p[i_pt, :] + hkl_id = hkl_ids[i_pt] + + # edge arrays + tth_edges = vtx_angs[0][0, :] + delta_tth = tth_edges[1] - tth_edges[0] + eta_edges = vtx_angs[1][:, 0] + delta_eta = eta_edges[1] - eta_edges[0] + + # need to reshape eval pts for interpolation + xy_eval = np.vstack([xy_eval[0].flatten(), + xy_eval[1].flatten()]).T + + # the evaluation omegas; + # expand about the central value using tol vector + ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del + + # ???: vectorize the omega_to_frame function to avoid loop? + frame_indices = [ + ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval + ] + + if -1 in frame_indices: + if not quiet: + msg = """ + window for (%d%d%d) falls outside omega range + """ % tuple(hkl) + print(msg) + continue + else: + # initialize spot data parameters + # !!! maybe change these to nan to not fuck up writer + peak_id = next_invalid_peak_id + sum_int = np.nan + max_int = np.nan + meas_angs = np.nan*np.ones(3) + meas_xy = np.nan*np.ones(2) + + # quick check for intensity + contains_signal = False + patch_data_raw = [] + for i_frame in frame_indices: + tmp = ome_imgser[i_frame][ijs[0], ijs[1]] + contains_signal = contains_signal or np.any( + tmp > threshold + ) + patch_data_raw.append(tmp) + patch_data_raw = np.stack(patch_data_raw, axis=0) + compl.append(contains_signal) + + if contains_signal: + # initialize patch data array for intensities + if interp.lower() == 'bilinear': + patch_data = np.zeros( + (len(frame_indices), prows, pcols)) + for i, i_frame in enumerate(frame_indices): + patch_data[i] = \ + panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False + ).reshape(prows, pcols) # * nrm_fac + elif interp.lower() == 'nearest': + patch_data = patch_data_raw # * nrm_fac + else: + msg = "interpolation option " + \ + "'%s' not understood" + raise RuntimeError(msg % interp) + + # now have interpolated patch data... + labels, num_peaks = ndimage.label( + patch_data > threshold, structure=label_struct + ) + slabels = np.arange(1, num_peaks + 1) + + if num_peaks > 0: + peak_id = iRefl + props = regionprops(labels, patch_data) + coms = np.vstack( + [x.weighted_centroid for x in props]) + if num_peaks > 1: + center = np.r_[patch_data.shape]*0.5 + center_t = np.tile(center, (num_peaks, 1)) + com_diff = coms - center_t + closest_peak_idx = np.argmin( + np.sum(com_diff**2, axis=1) + ) + else: + closest_peak_idx = 0 + coms = coms[closest_peak_idx] + # meas_omes = \ + # ome_edges[0] + (0.5 + coms[0])*delta_ome + meas_omes = \ + ome_eval[0] + coms[0]*delta_ome + meas_angs = np.hstack( + [tth_edges[0] + (0.5 + coms[2])*delta_tth, + eta_edges[0] + (0.5 + coms[1])*delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ) + ] + ) + + # intensities + # - summed is 'integrated' over interpolated + # data + # - max is max of raw input data + sum_int = np.sum( + patch_data[ + labels == slabels[closest_peak_idx] + ] + ) + max_int = np.max( + patch_data_raw[ + labels == slabels[closest_peak_idx] + ] + ) + # ???: Should this only use labeled pixels? + # Those are segmented from interpolated data, + # not raw; likely ok in most cases. + + # need MEASURED xy coords + # FIXME: overload angles_to_cart? + gvec_c = angles_to_gvec( + meas_angs, + chi=self.chi, + rmat_c=rMat_c, + beam_vec=self.beam_vector) + rMat_s = make_sample_rmat( + self.chi, meas_angs[2] + ) + meas_xy = gvec_to_xy( + gvec_c, + panel.rmat, rMat_s, rMat_c, + panel.tvec, self.tvec, tVec_c, + beam_vec=self.beam_vector) + if panel.distortion is not None: + meas_xy = panel.distortion.apply_inverse( + np.atleast_2d(meas_xy) + ).flatten() + # FIXME: why is this suddenly necessary??? + meas_xy = meas_xy.squeeze() + else: + patch_data = patch_data_raw + + if peak_id < 0: + # The peak is invalid. + # Decrement the next invalid peak ID. + next_invalid_peak_id -= 1 + + # write output + if filename is not None: + if output_format.lower() == 'text': + writer.dump_patch( + peak_id, hkl_id, hkl, sum_int, max_int, + ang_centers[i_pt], meas_angs, + xy_centers[i_pt], meas_xy) + elif output_format.lower() == 'hdf5': + xyc_arr = xy_eval.reshape( + prows, pcols, 2 + ).transpose(2, 0, 1) + writer.dump_patch( + detector_id, iRefl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, np.radians(ome_eval), + xyc_arr, ijs, frame_indices, patch_data, + ang_centers[i_pt], xy_centers[i_pt], + meas_angs, meas_xy) + + if return_spot_list: + # Full output + xyc_arr = xy_eval.reshape( + prows, pcols, 2 + ).transpose(2, 0, 1) + _patch_output = [ + detector_id, iRefl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, np.radians(ome_eval), + xyc_arr, ijs, frame_indices, patch_data, + ang_centers[i_pt], xy_centers[i_pt], + meas_angs, meas_xy + ] + else: + # Trimmed output + _patch_output = [ + peak_id, hkl_id, hkl, sum_int, max_int, + ang_centers[i_pt], meas_angs, meas_xy + ] + patch_output.append(_patch_output) + iRefl += 1 + output[detector_id] = patch_output + if filename is not None and output_format.lower() == 'text': + writer.close() + if filename is not None and output_format.lower() == 'hdf5': + writer.close() + return compl, output + + def update_memoization_sizes(self): + # Resize all known memoization functions to have a cache at least + # the size of the number of detectors. + all_panels = list(self.detectors.values()) + PlanarDetector.update_memoization_sizes(all_panels) + CylindricalDetector.update_memoization_sizes(all_panels) + + def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: + """calculate the transmission from the + filter and polymer coating. the inverse of this + number is the intensity correction that needs + to be applied. actual computation is done inside + the detector class + """ + if rMat_s is None: + rMat_s = ct.identity_3x3 + + energy = self.beam_energy + transmissions = {} + for det_name, det in self.detectors.items(): + transmission_filter, transmission_phosphor = ( + det.calc_filter_coating_transmission(energy)) + + transmission = transmission_filter * transmission_phosphor + + if self.physics_package is not None: + transmission_physics_package = ( + det.calc_physics_package_transmission( + energy, rMat_s, self.physics_package)) + effective_pinhole_area = det.calc_effective_pinhole_area( + self.physics_package) + + transmission = ( + transmission * + transmission_physics_package * + effective_pinhole_area + ) + + transmissions[det_name] = transmission + return transmissions + +# ============================================================================= +# UTILITIES +# ============================================================================= + + +class PatchDataWriter(object): + """Class for dumping Bragg reflection data.""" + + def __init__(self, filename): + self._delim = ' ' + header_items = ( + '# ID', 'PID', + 'H', 'K', 'L', + 'sum(int)', 'max(int)', + 'pred tth', 'pred eta', 'pred ome', + 'meas tth', 'meas eta', 'meas ome', + 'pred X', 'pred Y', + 'meas X', 'meas Y' + ) + self._header = self._delim.join([ + self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]), + self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), + self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) + ]) + if isinstance(filename, IOBase): + self.fid = filename + else: + self.fid = open(filename, 'w') + print(self._header, file=self.fid) + + def __del__(self): + self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, peak_id, hkl_id, + hkl, spot_int, max_int, + pangs, mangs, pxy, mxy): + """ + !!! maybe need to check that last four inputs are arrays + """ + if mangs is None: + spot_int = np.nan + max_int = np.nan + mangs = np.nan*np.ones(3) + mxy = np.nan*np.ones(2) + + res = [int(peak_id), int(hkl_id)] \ + + np.array(hkl, dtype=int).tolist() \ + + [spot_int, max_int] \ + + pangs.tolist() \ + + mangs.tolist() \ + + pxy.tolist() \ + + mxy.tolist() + + output_str = self._delim.join( + [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + ) + print(output_str, file=self.fid) + return output_str + + +class GrainDataWriter(object): + """Class for dumping grain data.""" + + def __init__(self, filename=None, array=None): + """Writes to either file or np array + + Array must be initialized with number of rows to be written. + """ + if filename is None and array is None: + raise RuntimeError( + 'GrainDataWriter must be specified with filename or array') + + self.array = None + self.fid = None + + # array supersedes filename + if array is not None: + assert array.shape[1] == 21, \ + f'grain data table must have 21 columns not {array.shape[21]}' + self.array = array + self._array_row = 0 + return + + self._delim = ' ' + header_items = ( + '# grain ID', 'completeness', 'chi^2', + 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', + 't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]', + 'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]', + 'inv(V_s)[1,2]*sqrt(2)', + 'inv(V_s)[0,2]*sqrt(2)', + 'inv(V_s)[0,1]*sqrt(2)', + 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]', + 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]' + ) + self._header = self._delim.join( + [self._delim.join( + np.tile('{:<12}', 3) + ).format(*header_items[:3]), + self._delim.join( + np.tile('{:<23}', len(header_items) - 3) + ).format(*header_items[3:])] + ) + if isinstance(filename, IOBase): + self.fid = filename + else: + self.fid = open(filename, 'w') + print(self._header, file=self.fid) + + def __del__(self): + self.close() + + def close(self): + if self.fid is not None: + self.fid.close() + + def dump_grain(self, grain_id, completeness, chisq, + grain_params): + assert len(grain_params) == 12, \ + "len(grain_params) must be 12, not %d" % len(grain_params) + + # extract strain + emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) + evec = mutil.symmToVecMV(emat, scale=False) + + res = [int(grain_id), completeness, chisq] \ + + grain_params.tolist() \ + + evec.tolist() + + if self.array is not None: + row = self._array_row + assert row < self.array.shape[0], \ + f'invalid row {row} in array table' + self.array[row] = res + self._array_row += 1 + return res + + # (else) format and write to file + output_str = self._delim.join( + [self._delim.join( + ['{:<12d}', '{:<12f}', '{:<12e}'] + ).format(*res[:3]), + self._delim.join( + np.tile('{:<23.16e}', len(res) - 3) + ).format(*res[3:])] + ) + print(output_str, file=self.fid) + return output_str + + +class GrainDataWriter_h5(object): + """Class for dumping grain results to an HDF5 archive. + + TODO: add material spec + """ + + def __init__(self, filename, instr_cfg, grain_params, use_attr=False): + if isinstance(filename, h5py.File): + self.fid = filename + else: + self.fid = h5py.File(filename + ".hdf5", "w") + icfg = dict(instr_cfg) + + # add instrument groups and attributes + self.instr_grp = self.fid.create_group('instrument') + unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr) + + # add grain group + self.grain_grp = self.fid.create_group('grain') + rmat_c = make_rmat_of_expmap(grain_params[:3]) + tvec_c = np.array(grain_params[3:6]).flatten() + vinv_s = np.array(grain_params[6:]).flatten() + vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) + + if use_attr: # attribute version + self.grain_grp.attrs.create('rmat_c', rmat_c) + self.grain_grp.attrs.create('tvec_c', tvec_c) + self.grain_grp.attrs.create('inv(V)_s', vinv_s) + self.grain_grp.attrs.create('vmat_s', vmat_s) + else: # dataset version + self.grain_grp.create_dataset('rmat_c', data=rmat_c) + self.grain_grp.create_dataset('tvec_c', data=tvec_c) + self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) + self.grain_grp.create_dataset('vmat_s', data=vmat_s) + + data_key = 'reflection_data' + self.data_grp = self.fid.create_group(data_key) + + for det_key in self.instr_grp['detectors'].keys(): + self.data_grp.create_group(det_key) + + # FIXME: throws exception when called after close method + # def __del__(self): + # self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, panel_id, + i_refl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, ome_centers, + xy_centers, ijs, frame_indices, + spot_data, pangs, pxy, mangs, mxy, gzip=1): + """ + to be called inside loop over patches + + default GZIP level for data arrays is 1 + """ + fi = np.array(frame_indices, dtype=int) + + panel_grp = self.data_grp[panel_id] + spot_grp = panel_grp.create_group("spot_%05d" % i_refl) + spot_grp.attrs.create('peak_id', int(peak_id)) + spot_grp.attrs.create('hkl_id', int(hkl_id)) + spot_grp.attrs.create('hkl', np.array(hkl, dtype=int)) + spot_grp.attrs.create('predicted_angles', pangs) + spot_grp.attrs.create('predicted_xy', pxy) + if mangs is None: + mangs = np.nan*np.ones(3) + spot_grp.attrs.create('measured_angles', mangs) + if mxy is None: + mxy = np.nan*np.ones(3) + spot_grp.attrs.create('measured_xy', mxy) + + # get centers crds from edge arrays + # FIXME: export full coordinate arrays, or just center vectors??? + # + # ome_crd, eta_crd, tth_crd = np.meshgrid( + # ome_centers, + # centers_of_edge_vec(eta_edges), + # centers_of_edge_vec(tth_edges), + # indexing='ij') + # + # ome_dim, eta_dim, tth_dim = spot_data.shape + + # !!! for now just exporting center vectors for spot_data + tth_crd = centers_of_edge_vec(tth_edges) + eta_crd = centers_of_edge_vec(eta_edges) + + shuffle_data = True # reduces size by 20% + spot_grp.create_dataset('tth_crd', data=tth_crd, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('eta_crd', data=eta_crd, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('ome_crd', data=ome_centers, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('xy_centers', data=xy_centers, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('ij_centers', data=ijs, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('frame_indices', data=fi, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('intensities', data=spot_data, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + return + + +class GenerateEtaOmeMaps(object): + """ + eta-ome map class derived from new image_series and YAML config + + ...for now... + + must provide: + + self.dataStore + self.planeData + self.iHKLList + self.etaEdges # IN RADIANS + self.omeEdges # IN RADIANS + self.etas # IN RADIANS + self.omegas # IN RADIANS + + """ + + def __init__(self, image_series_dict, instrument, plane_data, + active_hkls=None, eta_step=0.25, threshold=None, + ome_period=(0, 360)): + """ + image_series must be OmegaImageSeries class + instrument_params must be a dict (loaded from yaml spec) + active_hkls must be a list (required for now) + + FIXME: get rid of omega period; should get it from imageseries + """ + + self._planeData = plane_data + + # ???: change name of iHKLList? + # ???: can we change the behavior of iHKLList? + if active_hkls is None: + self._iHKLList = plane_data.getHKLID( + plane_data.hkls, master=True + ) + n_rings = len(self._iHKLList) + else: + assert hasattr(active_hkls, '__len__'), \ + "active_hkls must be an iterable with __len__" + self._iHKLList = active_hkls + n_rings = len(active_hkls) + + # grab a det key and corresponding imageseries (first will do) + # !!! assuming that the imageseries for all panels + # have the same length and omegas + det_key, this_det_ims = next(iter(image_series_dict.items())) + + # handle omegas + # !!! for multi wedge, enforncing monotonicity + # !!! wedges also cannot overlap or span more than 360 + omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES + delta_ome = omegas_array[0][-1] - omegas_array[0][0] + frame_mask = None + ome_period = omegas_array[0, 0] + np.r_[0., 360.] # !!! be careful + if this_det_ims.omegawedges.nwedges > 1: + delta_omes = [(i['ostop'] - i['ostart'])/i['nsteps'] + for i in this_det_ims.omegawedges.wedges] + check_wedges = mutil.uniqueVectors(np.atleast_2d(delta_omes), + tol=1e-6).squeeze() + assert check_wedges.size == 1, \ + "all wedges must have the same delta omega to 1e-6" + # grab representative delta ome + # !!! assuming positive delta consistent with OmegaImageSeries + delta_ome = delta_omes[0] + + # grab full-range start/stop + # !!! be sure to map to the same period to enable arithmatic + # ??? safer to do this way rather than just pulling from + # the omegas attribute? + owedges = this_det_ims.omegawedges.wedges + ostart = owedges[0]['ostart'] # !!! DEGREES + ostop = float( + mapAngle(owedges[-1]['ostop'], ome_period, units='degrees') + ) + # compute total nsteps + # FIXME: need check for roundoff badness + nsteps = int((ostop - ostart)/delta_ome) + ome_edges_full = np.linspace( + ostart, ostop, num=nsteps+1, endpoint=True + ) + omegas_array = np.vstack( + [ome_edges_full[:-1], ome_edges_full[1:]] + ).T + ome_centers = np.average(omegas_array, axis=1) + + # use OmegaImageSeries method to determine which bins have data + # !!! this array has -1 outside a wedge + # !!! again assuming the valid frame order increases monotonically + frame_mask = np.array( + [this_det_ims.omega_to_frame(ome)[0] != -1 + for ome in ome_centers] + ) + + # ???: need to pass a threshold? + eta_mapping, etas = instrument.extract_polar_maps( + plane_data, image_series_dict, + active_hkls=active_hkls, threshold=threshold, + tth_tol=None, eta_tol=eta_step) + + # for convenience grab map shape from first + map_shape = next(iter(eta_mapping.values())).shape[1:] + + # pack all detectors with masking + # FIXME: add omega masking + data_store = [] + for i_ring in range(n_rings): + # first handle etas + full_map = np.zeros(map_shape, dtype=float) + nan_mask_full = np.zeros( + (len(eta_mapping), map_shape[0], map_shape[1]) + ) + i_p = 0 + for det_key, eta_map in eta_mapping.items(): + nan_mask = ~np.isnan(eta_map[i_ring]) + nan_mask_full[i_p] = nan_mask + full_map[nan_mask] += eta_map[i_ring][nan_mask] + i_p += 1 + re_nan_these = np.sum(nan_mask_full, axis=0) == 0 + full_map[re_nan_these] = np.nan + + # now omegas + if frame_mask is not None: + # !!! must expand row dimension to include + # skipped omegas + tmp = np.ones((len(frame_mask), map_shape[1]))*np.nan + tmp[frame_mask, :] = full_map + full_map = tmp + data_store.append(full_map) + self._dataStore = data_store + + # set required attributes + self._omegas = mapAngle( + np.radians(np.average(omegas_array, axis=1)), + np.radians(ome_period) + ) + self._omeEdges = mapAngle( + np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), + np.radians(ome_period) + ) + + # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the + # indexer to work properly + if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf: + # !!! SIGNED delta ome + del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0]) + self._omeEdges[-1] = self._omeEdges[-2] + del_ome + + # handle etas + # WARNING: unlinke the omegas in imageseries metadata, + # these are in RADIANS and represent bin centers + self._etaEdges = etas + self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + + @property + def dataStore(self): + return self._dataStore + + @property + def planeData(self): + return self._planeData + + @property + def iHKLList(self): + return np.atleast_1d(self._iHKLList).flatten() + + @property + def etaEdges(self): + return self._etaEdges + + @property + def omeEdges(self): + return self._omeEdges + + @property + def etas(self): + return self._etas + + @property + def omegas(self): + return self._omegas + + def save(self, filename): + xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename) + + +def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): + # mark pixels in the spec'd tth range + pixels_in_tthr = np.logical_and( + ptth >= tthr[0], ptth <= tthr[1] + ) + + # catch case where ring isn't on detector + if not np.any(pixels_in_tthr): + return None + + pixel_ids = np.where(pixels_in_tthr) + + # grab relevant eta coords using histogram + pixel_etas = peta[pixel_ids] + reta_hist = histogram(pixel_etas, eta_edges) + bins_on_detector = np.where(reta_hist)[0] + + return pixel_etas, eta_edges, pixel_ids, bins_on_detector + + +def run_fast_histogram(x, bins, weights=None): + return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), + weights=weights) + + +def run_numpy_histogram(x, bins, weights=None): + return histogram1d(x, bins=bins, weights=weights)[0] + + +histogram = run_fast_histogram if fast_histogram else run_numpy_histogram + + +def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): + for i_row in range(*rows): + image = ims[i_row] + + # handle threshold if specified + if threshold is not None: + # !!! NaNs get preserved + image = np.array(image) + image[image < threshold] = 0. + + for i_r, tthr in enumerate(tth_ranges): + this_map = ring_maps[i_r] + params = ring_params[i_r] + if not params: + # We are supposed to skip this ring... + continue + + # Unpack the params + pixel_etas, eta_edges, pixel_ids, bins_on_detector = params + result = histogram(pixel_etas, eta_edges, weights=image[pixel_ids]) + + # Note that this preserves nan values for bins not on the detector. + this_map[i_row, bins_on_detector] = result[bins_on_detector] + + +def _extract_detector_line_positions(iter_args, plane_data, tth_tol, + eta_tol, eta_centers, npdiv, + collapse_tth, collapse_eta, + do_interpolation, do_fitting, + fitting_kwargs, tth_distortion, + max_workers): + panel, instr_cfg, images, pbp = iter_args + + if images.ndim == 2: + images = np.tile(images, (1, 1, 1)) + elif images.ndim != 3: + raise RuntimeError("images must be 2- or 3-d") + + # make rings + # !!! adding tth_distortion pass-through; comes in as dict over panels + tth_distr_cls = None + if tth_distortion is not None: + tth_distr_cls = tth_distortion[panel.name] + + pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( + plane_data, merge_hkls=True, + delta_tth=tth_tol, delta_eta=eta_tol, + eta_list=eta_centers, tth_distortion=tth_distr_cls) + + tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) + + # !!! this is only needed if doing fitting + if isinstance(plane_data, PlaneData): + tth_idx, tth_ranges = plane_data.getMergedRanges(cullDupl=True) + tth_ref = plane_data.getTTh() + tth0 = [np.degrees(tth_ref[i]) for i in tth_idx] + else: + tth0 = plane_data + + # ================================================================= + # LOOP OVER RING SETS + # ================================================================= + pbar_rings = partial(tqdm, total=len(pow_angs), desc="Ringset", + position=pbp) + + kwargs = { + 'instr_cfg': instr_cfg, + 'panel': panel, + 'eta_tol': eta_tol, + 'npdiv': npdiv, + 'collapse_tth': collapse_tth, + 'collapse_eta': collapse_eta, + 'images': images, + 'do_interpolation': do_interpolation, + 'do_fitting': do_fitting, + 'fitting_kwargs': fitting_kwargs, + 'tth_distortion': tth_distr_cls, + } + func = partial(_extract_ring_line_positions, **kwargs) + iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) + with ProcessPoolExecutor(mp_context=constants.mp_context, + max_workers=max_workers) as executor: + return list(pbar_rings(executor.map(func, iter_arg))) + + +def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, + collapse_tth, collapse_eta, images, + do_interpolation, do_fitting, fitting_kwargs, + tth_distortion): + """ + Extracts data for a single Debye-Scherrer ring . + + Parameters + ---------- + iter_args : tuple + (angs [radians], + xys [mm], + tth_tol [deg], + this_tth0 [deg]) + instr_cfg : TYPE + DESCRIPTION. + panel : TYPE + DESCRIPTION. + eta_tol : TYPE + DESCRIPTION. + npdiv : TYPE + DESCRIPTION. + collapse_tth : TYPE + DESCRIPTION. + collapse_eta : TYPE + DESCRIPTION. + images : TYPE + DESCRIPTION. + do_interpolation : TYPE + DESCRIPTION. + do_fitting : TYPE + DESCRIPTION. + fitting_kwargs : TYPE + DESCRIPTION. + tth_distortion : TYPE + DESCRIPTION. + + Yields + ------ + patch_data : TYPE + DESCRIPTION. + + """ + # points are already checked to fall on detector + angs, xys, tth_tol, this_tth0 = iter_args + + # SS 01/31/25 noticed some nans in xys even after clipping + # going to do another round of masking to get rid of those + nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) + nan_mask = np.logical_or.reduce(nan_mask, 1) + if angs.ndim > 1 and xys.ndim > 1: + angs = angs[nan_mask,:] + xys = xys[nan_mask, :] + + n_images = len(images) + native_area = panel.pixel_area + + # make the tth,eta patches for interpolation + patches = xrdutil.make_reflection_patches( + instr_cfg, angs, panel.angularPixelSize(xys), + tth_tol=tth_tol, eta_tol=eta_tol, npdiv=npdiv, quiet=True) + + # loop over patches + # FIXME: fix initialization + if collapse_tth: + patch_data = np.zeros((len(angs), n_images)) + else: + patch_data = [] + for i_p, patch in enumerate(patches): + # strip relevant objects out of current patch + vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch + + # need to reshape eval pts for interpolation + xy_eval = np.vstack([ + xys_eval[0].flatten(), + xys_eval[1].flatten()]).T + + _, on_panel = panel.clip_to_panel(xy_eval) + + if np.any(~on_panel): + continue + + if collapse_tth: + ang_data = (vtx_angs[0][0, [0, -1]], + vtx_angs[1][[0, -1], 0]) + elif collapse_eta: + # !!! yield the tth bin centers + tth_centers = np.average( + np.vstack( + [vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]] + ), + axis=0 + ) + ang_data = (tth_centers, + angs[i_p][-1]) + if do_fitting: + fit_data = [] + else: + ang_data = vtx_angs + + prows, pcols = areas.shape + area_fac = areas/float(native_area) + + # interpolate + if not collapse_tth: + ims_data = [] + for j_p in np.arange(len(images)): + # catch interpolation type + image = images[j_p] + if do_interpolation: + p_img = panel.interpolate_bilinear( + xy_eval, + image, + ).reshape(prows, pcols)*area_fac + else: + p_img = image[ijs[0], ijs[1]]*area_fac + + # catch flat spectrum data, which will cause + # fitting to fail. + # ???: best here, or make fitting handle it? + mxval = np.max(p_img) + mnval = np.min(p_img) + if mxval == 0 or (1. - mnval/mxval) < 0.01: + continue + + # catch collapsing options + if collapse_tth: + patch_data[i_p, j_p] = np.average(p_img) + # ims_data.append(np.sum(p_img)) + else: + if collapse_eta: + lineout = np.average(p_img, axis=0) + ims_data.append(lineout) + if do_fitting: + if tth_distortion is not None: + # must correct tth0 + tmp = tth_distortion.apply( + panel.angles_to_cart( + np.vstack( + [np.radians(this_tth0), + np.tile(ang_data[-1], len(this_tth0))] + ).T + ), + return_nominal=True) + pk_centers = np.degrees(tmp[:, 0]) + else: + pk_centers = this_tth0 + kwargs = { + 'tth_centers': np.degrees(tth_centers), + 'lineout': lineout, + 'tth_pred': pk_centers, + **fitting_kwargs, + } + result = fit_ring(**kwargs) + fit_data.append(result) + else: + ims_data.append(p_img) + if not collapse_tth: + output = [ang_data, ims_data] + if do_fitting: + output.append(fit_data) + patch_data.append(output) + + return patch_data + + +DETECTOR_TYPES = { + 'planar': PlanarDetector, + 'cylindrical': CylindricalDetector, +} + + +class BufferShapeMismatchError(RuntimeError): + # This is raised when the buffer shape does not match the detector shape + pass + + +@contextmanager +def switch_xray_source(instr: HEDMInstrument, xray_source: Optional[str]): + if xray_source is None: + # If the x-ray source is None, leave it as the current active one + yield + return + + prev_beam_name = instr.active_beam_name + instr.active_beam_name = xray_source + try: + yield + finally: + instr.active_beam_name = prev_beam_name diff --git a/hexrd/laue/material/crystallography.py b/hexrd/laue/material/crystallography.py new file mode 100644 index 000000000..574225e67 --- /dev/null +++ b/hexrd/laue/material/crystallography.py @@ -0,0 +1,2255 @@ +# -*- coding: utf-8 -*- +# ============================================================================= +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================================= +import re +import copy +import csv +import os +from math import pi +from typing import Optional, Union, Dict, List, Tuple + +import numpy as np + +from hexrd.material.unitcell import unitcell +from hexrd.deprecation import deprecated +from hexrd import constants +from hexrd.matrixutil import unitVector +from hexrd.rotations import ( + rotMatOfExpMap, + mapAngle, + applySym, + ltypeOfLaueGroup, + quatOfLaueGroup, +) +from hexrd.transforms import xfcapi +from hexrd import valunits +from hexrd.valunits import toFloat +from hexrd.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf + +"""module vars""" + +# units +dUnit = 'angstrom' +outputDegrees = False +outputDegrees_bak = outputDegrees + + +def hklToStr(hkl: np.ndarray) -> str: + """ + Converts hkl representation to a string. + + Parameters + ---------- + hkl : np.ndarray + 3 element list of h, k, and l values (Miller indices). + + Returns + ------- + str + Space-separated string representation of h, k, and l values. + + """ + return re.sub(r'[\[\]\(\)\{\},]', '', str(hkl)) + + +def tempSetOutputDegrees(val: bool) -> None: + """ + Set the global outputDegrees flag temporarily. Can be reverted with + revertOutputDegrees(). + + Parameters + ---------- + val : bool + True to output angles in degrees, False to output angles in radians. + + Returns + ------- + None + + """ + global outputDegrees, outputDegrees_bak + outputDegrees_bak = outputDegrees + outputDegrees = val + + +def revertOutputDegrees() -> None: + """ + Revert the effect of tempSetOutputDegrees(), resetting the outputDegrees + flag to its previous value (True to output in degrees, False for radians). + + Returns + ------- + None + """ + global outputDegrees, outputDegrees_bak + outputDegrees = outputDegrees_bak + + +def cosineXform( + a: np.ndarray, b: np.ndarray, c: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + """ + Spherical trig transform to take alpha, beta, gamma to expressions + for cos(alpha*). See ref below. + + [1] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector algebra and + the relations between direct and reciprocal lattice quantities''. Acta + Cryst. (1968), A24, 247--248 + + Parameters + ---------- + a : np.ndarray + List of alpha angle values (radians). + b : np.ndarray + List of beta angle values (radians). + c : np.ndarray + List of gamma angle values (radians). + + Returns + ------- + np.ndarray + List of cos(alpha*) values. + np.ndarray + List of sin(alpha*) values. + + """ + cosar = (np.cos(b) * np.cos(c) - np.cos(a)) / (np.sin(b) * np.sin(c)) + sinar = np.sqrt(1 - cosar**2) + return cosar, sinar + + +def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: + """ + Convert an energy value to a wavelength. If argument has units of length + or energy, will convert to globally specified unit type for wavelength + (dUnit). If argument is a scalar, assumed input units are keV. + """ + if isinstance(arg, valunits.valWUnit): + # arg is a valunits.valWUnit object + if arg.isLength(): + return arg.getVal(dUnit) + elif arg.isEnergy(): + e = arg.getVal('keV') + return valunits.valWUnit( + 'wavelength', 'length', constants.keVToAngstrom(e), 'angstrom' + ).getVal(dUnit) + else: + raise RuntimeError('do not know what to do with ' + str(arg)) + else: + # !!! assuming arg is in keV + return valunits.valWUnit( + 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' + ).getVal(dUnit) + +def latticeParameters(lvec): + """ + Generates direct and reciprocal lattice vector components in a + crystal-relative RHON basis, X. The convention for fixing X to the + lattice is such that a || x1 and c* || x3, where a and c* are + direct and reciprocal lattice vectors, respectively. + """ + lnorm = np.sqrt(np.sum(lvec**2, 0)) + + a = lnorm[0] + b = lnorm[1] + c = lnorm[2] + + ahat = lvec[:, 0] / a + bhat = lvec[:, 1] / b + chat = lvec[:, 2] / c + + gama = np.arccos(np.dot(ahat, bhat)) + beta = np.arccos(np.dot(ahat, chat)) + alfa = np.arccos(np.dot(bhat, chat)) + if outputDegrees: + gama = r2d * gama + beta = r2d * beta + alfa = r2d * alfa + + return [a, b, c, alfa, beta, gama] + +def latticePlanes( + hkls: np.ndarray, + lparms: np.ndarray, + ltype: Optional[str] = 'cubic', + wavelength: Optional[float] = 1.54059292, + strainMag: Optional[float] = None, +) -> Dict[str, np.ndarray]: + """ + Generates lattice plane data in the direct lattice for a given set + of Miller indices. Vector components are written in the + crystal-relative RHON basis, X. The convention for fixing X to the + lattice is such that a || x1 and c* || x3, where a and c* are + direct and reciprocal lattice vectors, respectively. + + USAGE: + + planeInfo = latticePlanes(hkls, lparms, **kwargs) + + INPUTS: + + 1) hkls (3 x n float ndarray) is the array of Miller indices for + the planes of interest. The vectors are assumed to be + concatenated along the 1-axis (horizontal). + + 2) lparms (1 x m float list) is the array of lattice parameters, + where m depends on the symmetry group (see below). + + The following optional arguments are recognized: + + 3) ltype=(string) is a string representing the symmetry type of + the implied Laue group. The 11 available choices are shown + below. The default value is 'cubic'. Note that each group + expects a lattice parameter array of the indicated length + and order. + + latticeType lparms + ----------- ------------ + 'cubic' a + 'hexagonal' a, c + 'trigonal' a, c + 'rhombohedral' a, alpha (in degrees) + 'tetragonal' a, c + 'orthorhombic' a, b, c + 'monoclinic' a, b, c, beta (in degrees) + 'triclinic' a, b, c, alpha, beta, gamma (in degrees) + + 4) wavelength= is a value represented the wavelength in + Angstroms to calculate bragg angles for. The default value + is for Cu K-alpha radiation (1.54059292 Angstrom) + + 5) strainMag=None + + OUTPUTS: + + 1) planeInfo is a dictionary containing the following keys/items: + + normals (3, n) double array array of the components to the + unit normals for each {hkl} in + X (horizontally concatenated) + + dspacings (n, ) double array array of the d-spacings for + each {hkl} + + tThetas (n, ) double array array of the Bragg angles for + each {hkl} relative to the + specified wavelength + + NOTES: + + *) This function is effectively a wrapper to 'latticeVectors'. + See 'help(latticeVectors)' for additional info. + + *) Lattice plane d-spacings are calculated from the reciprocal + lattice vectors specified by {hkl} as shown in Appendix 1 of + [1]. + + REFERENCES: + + [1] B. D. Cullity, ``Elements of X-Ray Diffraction, 2 + ed.''. Addison-Wesley Publishing Company, Inc., 1978. ISBN + 0-201-01174-3 + + """ + location = 'latticePlanes' + + assert ( + hkls.shape[0] == 3 + ), f"hkls aren't column vectors in call to '{location}'!" + + tag = ltype + wlen = wavelength + + # get B + L = latticeVectors(lparms, tag) + + # get G-vectors -- reciprocal vectors in crystal frame + G = np.dot(L['B'], hkls) + + # magnitudes + d = 1 / np.sqrt(np.sum(G**2, 0)) + + aconv = 1.0 + if outputDegrees: + aconv = r2d + + # two thetas + sth = wlen / 2.0 / d + mask = np.abs(sth) < 1.0 + tth = np.zeros(sth.shape) + + tth[~mask] = np.nan + tth[mask] = aconv * 2.0 * np.arcsin(sth[mask]) + + p = dict(normals=unitVector(G), dspacings=d, tThetas=tth) + + if strainMag is not None: + p['tThetasLo'] = np.zeros(sth.shape) + p['tThetasHi'] = np.zeros(sth.shape) + + mask = (np.abs(wlen / 2.0 / (d * (1.0 + strainMag))) < 1.0) & ( + np.abs(wlen / 2.0 / (d * (1.0 - strainMag))) < 1.0 + ) + + p['tThetasLo'][~mask] = np.nan + p['tThetasHi'][~mask] = np.nan + + p['tThetasLo'][mask] = ( + aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 + strainMag))) + ) + p['tThetasHi'][mask] = ( + aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 - strainMag))) + ) + + return p + + +def latticeVectors( + lparms: np.ndarray, + tag: Optional[str] = 'cubic', + radians: Optional[bool] = False, +) -> Dict[str, Union[np.ndarray, float]]: + """ + Generates direct and reciprocal lattice vector components in a + crystal-relative RHON basis, X. The convention for fixing X to the + lattice is such that a || x1 and c* || x3, where a and c* are + direct and reciprocal lattice vectors, respectively. + + USAGE: + + lattice = LatticeVectors(lparms, ) + + INPUTS: + + 1) lparms (1 x n float list) is the array of lattice parameters, + where n depends on the symmetry group (see below). + + 2) tag (string) is a case-insensitive string representing the + symmetry type of the implied Laue group. The 11 available choices + are shown below. The default value is 'cubic'. Note that each + group expects a lattice parameter array of the indicated length + and order. + + latticeType lparms + ----------- ------------ + 'cubic' a + 'hexagonal' a, c + 'trigonal' a, c + 'rhombohedral' a, alpha (in degrees) + 'tetragonal' a, c + 'orthorhombic' a, b, c + 'monoclinic' a, b, c, beta (in degrees) + 'triclinic' a, b, c, alpha, beta, gamma (in degrees) + + The following optional arguments are recognized: + + 3) radians= is a boolean flag indicating usage of radians rather + than degrees, defaults to false. + + OUTPUTS: + + 1) lattice is a dictionary containing the following keys/items: + + F (3, 3) double array transformation matrix taking + componenents in the direct + lattice (i.e. {uvw}) to the + reference, X + + B (3, 3) double array transformation matrix taking + componenents in the reciprocal + lattice (i.e. {hkl}) to X + + BR (3, 3) double array transformation matrix taking + componenents in the reciprocal + lattice to the Fable reference + frame (see notes) + + U0 (3, 3) double array transformation matrix + (orthogonal) taking + componenents in the + Fable reference frame to X + + vol double the unit cell volume + + + dparms (6, ) double list the direct lattice parameters: + [a b c alpha beta gamma] + + rparms (6, ) double list the reciprocal lattice + parameters: + [a* b* c* alpha* beta* gamma*] + + NOTES: + + *) The conventions used for assigning a RHON basis, + X -> {x1, x2, x3}, to each point group are consistent with + those published in Appendix B of [1]. Namely: a || x1 and + c* || x3. This differs from the convention chosen by the Fable + group, where a* || x1 and c || x3 [2]. + + *) The unit cell angles are defined as follows: + alpha=acos(b'*c/|b||c|), beta=acos(c'*a/|c||a|), and + gamma=acos(a'*b/|a||b|). + + *) The reciprocal lattice vectors are calculated using the + crystallographic convention, where the prefactor of 2*pi is + omitted. In this convention, the reciprocal lattice volume is + 1/V. + + *) Several relations from [3] were employed in the component + calculations. + + REFERENCES: + + [1] J. F. Nye, ``Physical Properties of Crystals: Their + Representation by Tensors and Matrices''. Oxford University + Press, 1985. ISBN 0198511655 + + [2] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, + ``Tracking: a method for structural characterization of grains + in powders or polycrystals''. J. Appl. Cryst. (2001). 34, + 744--750 + + [3] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector + algebra and the relations between direct and reciprocal + lattice quantities''. Acta Cryst. (1968), A24, 247--248 + + + """ + + # build index for sorting out lattice parameters + lattStrings = [ + 'cubic', + 'hexagonal', + 'trigonal', + 'rhombohedral', + 'tetragonal', + 'orthorhombic', + 'monoclinic', + 'triclinic', + ] + + if radians: + aconv = 1.0 + else: + aconv = pi / 180.0 # degToRad + deg90 = pi / 2.0 + deg120 = 2.0 * pi / 3.0 + # + if tag == lattStrings[0]: + # cubic + cellparms = np.r_[np.tile(lparms[0], (3,)), deg90 * np.ones((3,))] + elif tag == lattStrings[1] or tag == lattStrings[2]: + # hexagonal | trigonal (hex indices) + cellparms = np.r_[ + lparms[0], lparms[0], lparms[1], deg90, deg90, deg120 + ] + elif tag == lattStrings[3]: + # rhombohedral + cellparms = np.r_[ + np.tile(lparms[0], (3,)), np.tile(aconv * lparms[1], (3,)) + ] + elif tag == lattStrings[4]: + # tetragonal + cellparms = np.r_[lparms[0], lparms[0], lparms[1], deg90, deg90, deg90] + elif tag == lattStrings[5]: + # orthorhombic + cellparms = np.r_[lparms[0], lparms[1], lparms[2], deg90, deg90, deg90] + elif tag == lattStrings[6]: + # monoclinic + cellparms = np.r_[ + lparms[0], lparms[1], lparms[2], deg90, aconv * lparms[3], deg90 + ] + elif tag == lattStrings[7]: + # triclinic + cellparms = np.r_[ + lparms[0], + lparms[1], + lparms[2], + aconv * lparms[3], + aconv * lparms[4], + aconv * lparms[5], + ] + else: + raise RuntimeError(f'lattice tag "{tag}" is not recognized') + + alpha, beta, gamma = cellparms[3:6] + cosalfar, sinalfar = cosineXform(alpha, beta, gamma) + + a = cellparms[0] * np.r_[1, 0, 0] + b = cellparms[1] * np.r_[np.cos(gamma), np.sin(gamma), 0] + c = ( + cellparms[2] + * np.r_[ + np.cos(beta), -cosalfar * np.sin(beta), sinalfar * np.sin(beta) + ] + ) + + ad = np.sqrt(np.sum(a**2)) + bd = np.sqrt(np.sum(b**2)) + cd = np.sqrt(np.sum(c**2)) + + # Cell volume + V = np.dot(a, np.cross(b, c)) + + # F takes components in the direct lattice to X + F = np.c_[a, b, c] + + # Reciprocal lattice vectors + astar = np.cross(b, c) / V + bstar = np.cross(c, a) / V + cstar = np.cross(a, b) / V + + # and parameters + ar = np.sqrt(np.sum(astar**2)) + br = np.sqrt(np.sum(bstar**2)) + cr = np.sqrt(np.sum(cstar**2)) + + alfar = np.arccos(np.dot(bstar, cstar) / br / cr) + betar = np.arccos(np.dot(cstar, astar) / cr / ar) + gamar = np.arccos(np.dot(astar, bstar) / ar / br) + + # B takes components in the reciprocal lattice to X + B = np.c_[astar, bstar, cstar] + + cosalfar2, sinalfar2 = cosineXform(alfar, betar, gamar) + + afable = ar * np.r_[1, 0, 0] + bfable = br * np.r_[np.cos(gamar), np.sin(gamar), 0] + cfable = ( + cr + * np.r_[ + np.cos(betar), + -cosalfar2 * np.sin(betar), + sinalfar2 * np.sin(betar), + ] + ) + + BR = np.c_[afable, bfable, cfable] + U0 = np.dot(B, np.linalg.inv(BR)) + if outputDegrees: + dparms = np.r_[ad, bd, cd, r2d * np.r_[alpha, beta, gamma]] + rparms = np.r_[ar, br, cr, r2d * np.r_[alfar, betar, gamar]] + else: + dparms = np.r_[ad, bd, cd, np.r_[alpha, beta, gamma]] + rparms = np.r_[ar, br, cr, np.r_[alfar, betar, gamar]] + + return { + 'F': F, + 'B': B, + 'BR': BR, + 'U0': U0, + 'vol': V, + 'dparms': dparms, + 'rparms': rparms, + } + +def hexagonalIndicesFromRhombohedral(hkl): + """ + converts rhombohedral hkl to hexagonal indices + """ + HKL = np.zeros((3, hkl.shape[1]), dtype='int') + + HKL[0, :] = hkl[0, :] - hkl[1, :] + HKL[1, :] = hkl[1, :] - hkl[2, :] + HKL[2, :] = hkl[0, :] + hkl[1, :] + hkl[2, :] + + return HKL + + +def rhombohedralIndicesFromHexagonal(HKL): + """ + converts hexagonal hkl to rhombohedral indices + """ + hkl = np.zeros((3, HKL.shape[1]), dtype='int') + + hkl[0, :] = 2 * HKL[0, :] + HKL[1, :] + HKL[2, :] + hkl[1, :] = -HKL[0, :] + HKL[1, :] + HKL[2, :] + hkl[2, :] = -HKL[0, :] - 2 * HKL[1, :] + HKL[2, :] + + hkl = hkl / 3.0 + return hkl + + +def rhombohedralParametersFromHexagonal(a_h, c_h): + """ + converts hexagonal lattice parameters (a, c) to rhombohedral + lattice parameters (a, alpha) + """ + a_r = np.sqrt(3 * a_h**2 + c_h**2) / 3.0 + alfa_r = 2 * np.arcsin(3.0 / (2 * np.sqrt(3 + (c_h / a_h) ** 2))) + if outputDegrees: + alfa_r = r2d * alfa_r + return a_r, alfa_r + + +def convert_Miller_direction_to_cartesian(uvw, a=1.0, c=1.0, normalize=False): + """ + Converts 3-index hexagonal Miller direction indices to components in the + crystal reference frame. + Parameters + ---------- + uvw : array_like + The (n, 3) array of 3-index hexagonal indices to convert. + a : scalar, optional + The `a` lattice parameter. The default value is 1. + c : scalar, optional + The `c` lattice parameter. The default value is 1. + normalize : bool, optional + Flag for whether or not to normalize output vectors + Returns + ------- + numpy.ndarray + The (n, 3) array of cartesian components associated with the input + direction indices. + Notes + ----- + 1) The [uv.w] the Miller-Bravais convention is in the hexagonal basis + {a1, a2, a3, c}. The basis for the output, {o1, o2, o3}, is + chosen such that + o1 || a1 + o3 || c + o2 = o3 ^ o1 + """ + u, v, w = np.atleast_2d(uvw).T + retval = np.vstack([1.5 * u * a, sqrt3by2 * a * (2 * v + u), w * c]) + if normalize: + return unitVector(retval).T + else: + return retval.T + + +def convert_Miller_direction_to_MillerBravias(uvw, suppress_redundant=True): + """ + Converts 3-index hexagonal Miller direction indices to 4-index + Miller-Bravais direction indices. + Parameters + ---------- + uvw : array_like + The (n, 3) array of 3-index hexagonal Miller indices to convert. + suppress_redundant : bool, optional + Flag to suppress the redundant 3rd index. The default is True. + Returns + ------- + numpy.ndarray + The (n, 3) or (n, 4) array -- depending on kwarg -- of Miller-Bravis + components associated with the input Miller direction indices. + Notes + ----- + * NOT for plane normals!!! + """ + u, v, w = np.atleast_2d(uvw).T + retval = np.vstack([(2 * u - v) / 3, (2 * v - u) / 3, w]).T + rem = np.vstack([np.mod(np.tile(i[0], 2), i[1:]) for i in retval]) + rem[abs(rem) < epsf] = np.nan + lcm = np.nanmin(rem, axis=1) + lcm[np.isnan(lcm)] = 1 + retval = retval / np.tile(lcm, (3, 1)).T + if suppress_redundant: + return retval + else: + t = np.atleast_2d(1 - np.sum(retval[:2], axis=1)).T + return np.hstack([retval[:, :2], t, np.atleast_2d(retval[:, 2]).T]) + + +def convert_MillerBravias_direction_to_Miller(UVW): + """ + Converts 4-index hexagonal Miller-Bravais direction indices to + 3-index Miller direction indices. + Parameters + ---------- + UVW : array_like + The (n, 3) array of **non-redundant** Miller-Bravais direction indices + to convert. + Returns + ------- + numpy.ndarray + The (n, 3) array of Miller direction indices associated with the + input Miller-Bravais indices. + Notes + ----- + * NOT for plane normals!!! + """ + U, V, W = np.atleast_2d(UVW).T + return np.vstack([2 * U + V, 2 * V + U, W]) + + +class PlaneData(object): + """ + Careful with ordering: Outputs are ordered by the 2-theta for the + hkl unless you get self._hkls directly, and this order can change + with changes in lattice parameters (lparms); setting and getting + exclusions works on the current hkl ordering, not the original + ordering (in self._hkls), but exclusions are stored in the + original ordering in case the hkl ordering does change with + lattice parameters + + if not None, tThWidth takes priority over strainMag in setting + two-theta ranges; changing strainMag automatically turns off + tThWidth + """ + + def __init__(self, hkls: Optional[np.ndarray], *args, **kwargs) -> None: + """ + Constructor for PlaneData + + Parameters + ---------- + hkls : np.ndarray + Miller indices to be used in the plane data. Can be None if + args is another PlaneData object + + *args + Unnamed arguments. Could be in the format of `lparms, laueGroup, + wavelength, strainMag`, or just a `PlaneData` object. + + **kwargs + Valid keyword arguments include: + - doTThSort + - exclusions + - tThMax + - tThWidth + """ + self._doTThSort = True + self._exclusions = None + self._tThMax = None + + if len(args) == 4: + lparms, laueGroup, wavelength, strainMag = args + tThWidth = None + self._wavelength = processWavelength(wavelength) + self._lparms = self._parseLParms(lparms) + elif len(args) == 1 and isinstance(args[0], PlaneData): + other = args[0] + lparms, laueGroup, wavelength, strainMag, tThWidth = ( + other.getParams() + ) + self._wavelength = wavelength + self._lparms = lparms + self._doTThSort = other._doTThSort + self._exclusions = other._exclusions + self._tThMax = other._tThMax + if hkls is None: + hkls = other._hkls + else: + raise NotImplementedError(f'args : {args}') + + self._laueGroup = laueGroup + self._hkls = copy.deepcopy(hkls) + self._strainMag = strainMag + self._structFact = np.ones(self._hkls.shape[1]) + self.tThWidth = tThWidth + + # ... need to implement tThMin too + if 'doTThSort' in kwargs: + self._doTThSort = kwargs.pop('doTThSort') + if 'exclusions' in kwargs: + self._exclusions = kwargs.pop('exclusions') + if 'tThMax' in kwargs: + self._tThMax = toFloat(kwargs.pop('tThMax'), 'radians') + if 'tThWidth' in kwargs: + self.tThWidth = kwargs.pop('tThWidth') + if len(kwargs) > 0: + raise RuntimeError( + f'have unparsed keyword arguments with keys: {kwargs.keys()}' + ) + + # This is only used to calculate the structure factor if invalidated + self._unitcell: unitcell = None + + self._calc() + + def _calc(self): + symmGroup = ltypeOfLaueGroup(self._laueGroup) + self._q_sym = quatOfLaueGroup(self._laueGroup) + _, latVecOps, hklDataList = PlaneData.makePlaneData( + self._hkls, + self._lparms, + self._q_sym, + symmGroup, + self._strainMag, + self.wavelength, + ) + 'sort by tTheta' + tThs = np.array( + [hklDataList[iHKL]['tTheta'] for iHKL in range(len(hklDataList))] + ) + if self._doTThSort: + # sorted hkl -> _hkl + # _hkl -> sorted hkl + self.tThSort = np.argsort(tThs) + self.tThSortInv = np.empty(len(hklDataList), dtype=int) + self.tThSortInv[self.tThSort] = np.arange(len(hklDataList)) + self.hklDataList = [hklDataList[iHKL] for iHKL in self.tThSort] + else: + self.tThSort = np.arange(len(hklDataList)) + self.tThSortInv = np.arange(len(hklDataList)) + self.hklDataList = hklDataList + self._latVecOps = latVecOps + self.nHKLs = len(self.getHKLs()) + + def __str__(self): + s = '========== plane data ==========\n' + s += 'lattice parameters:\n ' + str(self.lparms) + '\n' + s += f'two theta width: ({str(self.tThWidth)})\n' + s += f'strain magnitude: ({str(self.strainMag)})\n' + s += f'beam energy ({str(self.wavelength)})\n' + s += 'hkls: (%d)\n' % self.nHKLs + s += str(self.getHKLs()) + return s + + def getParams(self): + """ + Getter for the parameters of the plane data. + + Returns + ------- + tuple + The parameters of the plane data. In the order of + _lparams, _laueGroup, _wavelength, _strainMag, tThWidth + + """ + return ( + self._lparms, + self._laueGroup, + self._wavelength, + self._strainMag, + self.tThWidth, + ) + + def getNhklRef(self) -> int: + """ + Get the total number of hkl's in the plane data, not ignoring + ones that are excluded in exclusions. + + Returns + ------- + int + The total number of hkl's in the plane data. + """ + return len(self.hklDataList) + + @property + def hkls(self) -> np.ndarray: + """ + hStacked Hkls of the plane data (Miller indices). + """ + return self.getHKLs().T + + @hkls.setter + def hkls(self, hkls): + raise NotImplementedError('for now, not allowing hkls to be reset') + + @property + def tThMax(self) -> Optional[float]: + """ + Maximum 2-theta value of the plane data. + + float or None + """ + return self._tThMax + + @tThMax.setter + def tThMax(self, t_th_max: Union[float, valunits.valWUnit]) -> None: + self._tThMax = toFloat(t_th_max, 'radians') + + @property + def exclusions(self) -> np.ndarray: + """ + Excluded HKL's the plane data. + + Set as type np.ndarray, as a mask of length getNhklRef(), a list of + indices to be excluded, or a list of ranges of indices. + + Read as a mask of length getNhklRef(). + """ + retval = np.zeros(self.getNhklRef(), dtype=bool) + if self._exclusions is not None: + # report in current hkl ordering + retval[:] = self._exclusions[self.tThSortInv] + if self._tThMax is not None: + for iHKLr, hklData in enumerate(self.hklDataList): + if hklData['tTheta'] > self._tThMax: + retval[iHKLr] = True + return retval + + @exclusions.setter + def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: + excl = np.zeros(len(self.hklDataList), dtype=bool) + if new_exclusions is not None: + exclusions = np.atleast_1d(new_exclusions) + if len(exclusions) == len(self.hklDataList): + assert ( + exclusions.dtype == 'bool' + ), 'Exclusions should be bool if full length' + # convert from current hkl ordering to _hkl ordering + excl[:] = exclusions[self.tThSort] + else: + if len(exclusions.shape) == 1: + # treat exclusions as indices + excl[self.tThSort[exclusions]] = True + elif len(exclusions.shape) == 2: + # treat exclusions as ranges of indices + for r in exclusions: + excl[self.tThSort[r[0]:r[1]]] = True + else: + raise RuntimeError( + f'Unclear behavior for shape {exclusions.shape}' + ) + self._exclusions = excl + self.nHKLs = np.sum(np.logical_not(self._exclusions)) + + def exclude( + self, + dmin: Optional[float] = None, + dmax: Optional[float] = None, + tthmin: Optional[float] = None, + tthmax: Optional[float] = None, + sfacmin: Optional[float] = None, + sfacmax: Optional[float] = None, + pintmin: Optional[float] = None, + pintmax: Optional[float] = None, + ) -> None: + """ + Set exclusions according to various parameters + + Any hkl with a value below any min or above any max will be excluded. So + to be included, an hkl needs to have values between the min and max + for all of the conditions given. + + Note that method resets the tThMax attribute to None. + + PARAMETERS + ---------- + dmin: float > 0 + minimum lattice spacing (angstroms) + dmax: float > 0 + maximum lattice spacing (angstroms) + tthmin: float > 0 + minimum two theta (radians) + tthmax: float > 0 + maximum two theta (radians) + sfacmin: float > 0 + minimum structure factor as a proportion of maximum + sfacmax: float > 0 + maximum structure factor as a proportion of maximum + pintmin: float > 0 + minimum powder intensity as a proportion of maximum + pintmax: float > 0 + maximum powder intensity as a proportion of maximum + """ + excl = np.zeros(self.getNhklRef(), dtype=bool) + self.exclusions = None + self.tThMax = None + + if (dmin is not None) or (dmax is not None): + d = np.array(self.getPlaneSpacings()) + if dmin is not None: + excl[d < dmin] = True + if dmax is not None: + excl[d > dmax] = True + + if (tthmin is not None) or (tthmax is not None): + tth = self.getTTh() + if tthmin is not None: + excl[tth < tthmin] = True + if tthmax is not None: + excl[tth > tthmax] = True + + if (sfacmin is not None) or (sfacmax is not None): + sfac = self.structFact + sfac = sfac / sfac.max() + if sfacmin is not None: + excl[sfac < sfacmin] = True + if sfacmax is not None: + excl[sfac > sfacmax] = True + + if (pintmin is not None) or (pintmax is not None): + pint = self.powder_intensity + pint = pint / pint.max() + if pintmin is not None: + excl[pint < pintmin] = True + if pintmax is not None: + excl[pint > pintmax] = True + + self.exclusions = excl + + def _parseLParms( + self, lparms: List[Union[valunits.valWUnit, float]] + ) -> List[float]: + lparmsDUnit = [] + for lparmThis in lparms: + if isinstance(lparmThis, valunits.valWUnit): + if lparmThis.isLength(): + lparmsDUnit.append(lparmThis.getVal(dUnit)) + elif lparmThis.isAngle(): + # plumbing set up to default to degrees + # for lattice parameters + lparmsDUnit.append(lparmThis.getVal('degrees')) + else: + raise RuntimeError( + f'Do not know what to do with {lparmThis}' + ) + else: + lparmsDUnit.append(lparmThis) + return lparmsDUnit + + @property + def lparms(self) -> List[float]: + """ + Lattice parameters of the plane data. + + Can be set as a List[float | valWUnit], but will be converted to + List[float]. + """ + return self._lparms + + @lparms.setter + def lparms(self, lparms: List[Union[valunits.valWUnit, float]]) -> None: + self._lparms = self._parseLParms(lparms) + self._calc() + + @property + def strainMag(self) -> Optional[float]: + """ + Strain magnitude of the plane data. + + float or None + """ + return self._strainMag + + @strainMag.setter + def strainMag(self, strain_mag: float) -> None: + self._strainMag = strain_mag + self.tThWidth = None + self._calc() + + @property + def wavelength(self) -> float: + """ + Wavelength of the plane data. + + Set as float or valWUnit. + + Read as float + """ + return self._wavelength + + @wavelength.setter + def wavelength(self, wavelength: Union[float, valunits.valWUnit]) -> None: + wavelength = processWavelength(wavelength) + # Do not re-compute if it is almost the same + if np.isclose(self._wavelength, wavelength): + return + + self._wavelength = wavelength + self._calc() + + def invalidate_structure_factor(self, ucell: unitcell) -> None: + """ + It can be expensive to compute the structure factor + This method just invalidates it, providing a unit cell, + so that it can be lazily computed from the unit cell. + + Parameters: + ----------- + unitcell : unitcell + The unit cell to be used to compute the structure factor + """ + self._structFact = None + self._hedm_intensity = None + self._powder_intensity = None + self._unitcell = ucell + + def _compute_sf_if_needed(self): + any_invalid = ( + self._structFact is None + or self._hedm_intensity is None + or self._powder_intensity is None + ) + if any_invalid and self._unitcell is not None: + # Compute the structure factor first. + # This can be expensive to do, so we lazily compute it when needed. + hkls = self.getHKLs(allHKLs=True) + self.structFact = self._unitcell.CalcXRSF(hkls) + + @property + def structFact(self) -> np.ndarray: + """ + Structure factors for each hkl. + + np.ndarray + """ + self._compute_sf_if_needed() + return self._structFact[~self.exclusions] + + @structFact.setter + def structFact(self, structFact: np.ndarray) -> None: + self._structFact = structFact + multiplicity = self.getMultiplicity(allHKLs=True) + tth = self.getTTh(allHKLs=True) + + hedm_intensity = ( + structFact * lorentz_factor(tth) * polarization_factor(tth) + ) + + powderI = hedm_intensity * multiplicity + + # Now scale them + hedm_intensity = 100.0 * hedm_intensity / np.nanmax(hedm_intensity) + powderI = 100.0 * powderI / np.nanmax(powderI) + + self._hedm_intensity = hedm_intensity + self._powder_intensity = powderI + + @property + def powder_intensity(self) -> np.ndarray: + """ + Powder intensity for each hkl. + """ + self._compute_sf_if_needed() + return self._powder_intensity[~self.exclusions] + + @property + def hedm_intensity(self) -> np.ndarray: + """ + HEDM (high energy x-ray diffraction microscopy) intensity for each hkl. + """ + self._compute_sf_if_needed() + return self._hedm_intensity[~self.exclusions] + + @staticmethod + def makePlaneData( + hkls: np.ndarray, + lparms: np.ndarray, + qsym: np.ndarray, + symmGroup, + strainMag, + wavelength, + ) -> Tuple[ + Dict[str, np.ndarray], Dict[str, Union[np.ndarray, float]], List[Dict] + ]: + """ + Generate lattice plane data from inputs. + + Parameters: + ----------- + hkls: np.ndarray + Miller indices, as in crystallography.latticePlanes + lparms: np.ndarray + Lattice parameters, as in crystallography.latticePlanes + qsym: np.ndarray + (4, n) containing quaternions of symmetry + symmGroup: str + Tag for the symmetry (Laue) group of the lattice. Can generate from + ltypeOfLaueGroup + strainMag: float + Swag of strain magnitudes + wavelength: float + Wavelength + + Returns: + ------- + dict: + Dictionary containing lattice plane data + dict: + Dictionary containing lattice vector operators + list: + List of dictionaries, each containing the data for one hkl + """ + + tempSetOutputDegrees(False) + latPlaneData = latticePlanes( + hkls, + lparms, + ltype=symmGroup, + strainMag=strainMag, + wavelength=wavelength, + ) + + latVecOps = latticeVectors(lparms, symmGroup) + + hklDataList = [] + for iHKL in range(len(hkls.T)): + # need transpose because of convention for hkls ordering + + """ + latVec = latPlaneData['normals'][:,iHKL] + # ... if not spots, may be able to work with a subset of these + latPlnNrmlList = applySym( + np.c_[latVec], qsym, csFlag=True, cullPM=False + ) + """ + # returns UN-NORMALIZED lattice plane normals + latPlnNrmls = applySym( + np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), + qsym, + csFlag=True, + cullPM=False, + ) + + # check for +/- in symmetry group + latPlnNrmlsM = applySym( + np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), + qsym, + csFlag=False, + cullPM=False, + ) + + csRefl = latPlnNrmls.shape[1] == latPlnNrmlsM.shape[1] + + # added this so that I retain the actual symmetric + # integer hkls as well + symHKLs = np.array( + np.round(np.dot(latVecOps['F'].T, latPlnNrmls)), dtype='int' + ) + + hklDataList.append( + dict( + hklID=iHKL, + hkl=hkls[:, iHKL], + tTheta=latPlaneData['tThetas'][iHKL], + dSpacings=latPlaneData['dspacings'][iHKL], + tThetaLo=latPlaneData['tThetasLo'][iHKL], + tThetaHi=latPlaneData['tThetasHi'][iHKL], + latPlnNrmls=unitVector(latPlnNrmls), + symHKLs=symHKLs, + centrosym=csRefl, + ) + ) + + revertOutputDegrees() + return latPlaneData, latVecOps, hklDataList + + @property + def laueGroup(self) -> str: + """ + This is the Schoenflies tag, describing symmetry group of the lattice. + Note that setting this with incompatible lattice parameters will + cause an error. If changing both, use set_laue_and_lparms. + + str + """ + return self._laueGroup + + @laueGroup.setter + def laueGroup(self, laueGroup: str) -> None: + self._laueGroup = laueGroup + self._calc() + + def set_laue_and_lparms( + self, laueGroup: str, lparms: List[Union[valunits.valWUnit, float]] + ) -> None: + """ + Set the Laue group and lattice parameters simultaneously + + When the Laue group changes, the lattice parameters may be + incompatible, and cause an error in self._calc(). This function + allows us to update both the Laue group and lattice parameters + simultaneously to avoid this issue. + + Parameters: + ----------- + laueGroup : str + The symmetry (Laue) group to be set + lparms : List[valunits.valWUnit | float] + Lattice parameters to be set + """ + self._laueGroup = laueGroup + self._lparms = self._parseLParms(lparms) + self._calc() + + @property + def q_sym(self) -> np.ndarray: + """ + Quaternions of symmetry for each hkl, generated from the Laue group + + np.ndarray((4, n)) + """ + return self._q_sym # rotations.quatOfLaueGroup(self._laueGroup) + + def getPlaneSpacings(self) -> List[float]: + """ + Plane spacings for each hkl. + + Returns: + ------- + List[float] + List of plane spacings for each hkl + """ + dspacings = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + dspacings.append(hklData['dSpacings']) + return dspacings + + @property + def latVecOps(self) -> Dict[str, Union[np.ndarray, float]]: + """ + gets lattice vector operators as a new (deepcopy) + + Returns: + ------- + Dict[str, np.ndarray | float] + Dictionary containing lattice vector operators + """ + return copy.deepcopy(self._latVecOps) + + def _thisHKL(self, iHKLr: int) -> bool: + hklData = self.hklDataList[iHKLr] + if self._exclusions is not None: + if self._exclusions[self.tThSortInv[iHKLr]]: + return False + if self._tThMax is not None: + if hklData['tTheta'] > self._tThMax or np.isnan(hklData['tTheta']): + return False + return True + + def _getTThRange(self, iHKLr: int) -> Tuple[float, float]: + hklData = self.hklDataList[iHKLr] + if self.tThWidth is not None: # tThHi-tThLo < self.tThWidth + tTh = hklData['tTheta'] + tThHi = tTh + self.tThWidth * 0.5 + tThLo = tTh - self.tThWidth * 0.5 + else: + tThHi = hklData['tThetaHi'] + tThLo = hklData['tThetaLo'] + return (tThLo, tThHi) + + def getTThRanges(self, strainMag: Optional[float] = None) -> np.ndarray: + """ + Get the 2-theta ranges for included hkls + + Parameters: + ----------- + strainMag : Optional[float] + Optional swag of strain magnitude + + Returns: + ------- + np.ndarray: + hstacked array of hstacked tThLo and tThHi for each hkl (n x 2) + """ + tThRanges = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + if strainMag is None: + tThRanges.append(self._getTThRange(iHKLr)) + else: + hklData = self.hklDataList[iHKLr] + d = hklData['dSpacings'] + tThLo = 2.0 * np.arcsin( + self._wavelength / 2.0 / (d * (1.0 + strainMag)) + ) + tThHi = 2.0 * np.arcsin( + self._wavelength / 2.0 / (d * (1.0 - strainMag)) + ) + tThRanges.append((tThLo, tThHi)) + return np.array(tThRanges) + + def getMergedRanges( + self, cullDupl: Optional[bool] = False + ) -> Tuple[List[List[int]], List[List[float]]]: + """ + Return indices and ranges for specified planeData, merging where + there is overlap based on the tThWidth and line positions + + Parameters: + ----------- + cullDupl : (optional) bool + If True, cull duplicate 2-theta values (within sqrt_epsf). Defaults + to False. + + Returns: + -------- + List[List[int]] + List of indices for each merged range + + List[List[float]] + List of merged ranges, (n x 2) + """ + tThs = self.getTTh() + tThRanges = self.getTThRanges() + + # if you end exlcusions in a doublet (or multiple close rings) + # then this will 'fail'. May need to revisit... + nonoverlapNexts = np.hstack( + (tThRanges[:-1, 1] < tThRanges[1:, 0], True) + ) + iHKLLists = [] + mergedRanges = [] + hklsCur = [] + tThLoIdx = 0 + tThHiCur = 0.0 + for iHKL, nonoverlapNext in enumerate(nonoverlapNexts): + tThHi = tThRanges[iHKL, -1] + if not nonoverlapNext: + if cullDupl and abs(tThs[iHKL] - tThs[iHKL + 1]) < sqrt_epsf: + continue + else: + hklsCur.append(iHKL) + tThHiCur = tThHi + else: + hklsCur.append(iHKL) + tThHiCur = tThHi + iHKLLists.append(hklsCur) + mergedRanges.append([tThRanges[tThLoIdx, 0], tThHiCur]) + tThLoIdx = iHKL + 1 + hklsCur = [] + return iHKLLists, mergedRanges + + def getTTh(self, allHKLs: Optional[bool] = False) -> np.ndarray: + """ + Get the 2-theta values for each hkl. + + Parameters: + ----------- + allHKLs : (optional) bool + If True, return all 2-theta values, even if they are excluded in + the current planeData. Default is False. + + Returns: + ------- + np.ndarray + Array of 2-theta values for each hkl + """ + tTh = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not allHKLs and not self._thisHKL(iHKLr): + continue + tTh.append(hklData['tTheta']) + return np.array(tTh) + + def getMultiplicity(self, allHKLs: Optional[bool] = False) -> np.ndarray: + """ + Get the multiplicity for each hkl (number of symHKLs). + + Paramters: + ---------- + allHKLs : (optional) bool + If True, return all multiplicities, even if they are excluded in + the current planeData. Defaults to false. + + Returns + ------- + np.ndarray + Array of multiplicities for each hkl + """ + # ... JVB: is this incorrect? + multip = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if allHKLs or self._thisHKL(iHKLr): + multip.append(hklData['symHKLs'].shape[1]) + return np.array(multip) + + def getHKLID( + self, + hkl: Union[int, Tuple[int, int, int], np.ndarray], + master: Optional[bool] = False, + ) -> Union[List[int], int]: + """ + Return the unique ID of a list of hkls. + + Parameters + ---------- + hkl : int | tuple | list | numpy.ndarray + The input hkl. If an int, or a list of ints, it just passes + through (FIXME). + If a tuple, treated as a single (h, k, l). + If a list of lists/tuples, each is treated as an (h, k, l). + If an numpy.ndarray, it is assumed to have shape (3, N) with the + N (h, k, l) vectors stacked column-wise + + master : bool, optional + If True, return the master hklID, else return the index from the + external (sorted and reduced) list. + + Returns + ------- + hkl_ids : list + The list of requested hklID values associate with the input. + + Notes + ----- + TODO: revisit this weird API??? + + Changes: + ------- + 2020-05-21 (JVB) -- modified to handle all symmetric equavlent reprs. + """ + if hasattr(hkl, '__setitem__'): # tuple does not have __setitem__ + if isinstance(hkl, np.ndarray): + # if is ndarray, assume is 3xN + return [self._getHKLID(x, master=master) for x in hkl.T] + else: + return [self._getHKLID(x, master=master) for x in hkl] + else: + return self._getHKLID(hkl, master=master) + + def _getHKLID( + self, + hkl: Union[int, Tuple[int, int, int], np.ndarray], + master: Optional[bool] = False, + ) -> int: + """ + for hkl that is a tuple, return externally visible hkl index + """ + if isinstance(hkl, int): + return hkl + else: + hklList = self.getSymHKLs() # !!! list, reduced by exclusions + intl_hklIDs = np.asarray([i['hklID'] for i in self.hklDataList]) + intl_hklIDs_sorted = intl_hklIDs[~self.exclusions[self.tThSortInv]] + dHKLInv = {} + for iHKL, symHKLs in enumerate(hklList): + idx = intl_hklIDs_sorted[iHKL] if master else iHKL + for thisHKL in symHKLs.T: + dHKLInv[tuple(thisHKL)] = idx + try: + return dHKLInv[tuple(hkl)] + except KeyError: + raise RuntimeError( + f"hkl '{tuple(hkl)}' is not present in this material!" + ) + + def getHKLs(self, *hkl_ids: int, **kwargs) -> Union[List[str], np.ndarray]: + """ + Returns the powder HKLs subject to specified options. + + Parameters + ---------- + *hkl_ids : int + Optional list of specific master hklIDs. + **kwargs : dict + One or more of the following keyword arguments: + asStr : bool + If True, return a list of strings. The default is False. + thisTTh : scalar | None + If not None, only return hkls overlapping the specified + 2-theta (in radians). The default is None. + allHKLs : bool + If True, then ignore exlcusions. The default is False. + + Raises + ------ + TypeError + If an unknown kwarg is passed. + RuntimeError + If an invalid hklID is passed. + + Returns + ------- + hkls : list | numpy.ndarray + Either a list of hkls as strings (if asStr=True) or a vstacked + array of hkls. + + Notes + ----- + !!! the shape of the return value when asStr=False is the _transpose_ + of the typical return value for self.get_hkls() and self.hkls! + This _may_ change to avoid confusion, but going to leave it for + now so as not to break anything. + + 2022/08/05 JVB: + - Added functionality to handle optional hklID args + - Updated docstring + """ + # kwarg parsing + opts = dict(asStr=False, thisTTh=None, allHKLs=False) + if len(kwargs) > 0: + # check keys + for k, v in kwargs.items(): + if k not in opts: + raise TypeError( + f"getHKLs() got an unexpected keyword argument '{k}'" + ) + opts.update(kwargs) + + hkls = [] + if len(hkl_ids) == 0: + for iHKLr, hklData in enumerate(self.hklDataList): + if not opts['allHKLs']: + if not self._thisHKL(iHKLr): + continue + if opts['thisTTh'] is not None: + tThLo, tThHi = self._getTThRange(iHKLr) + if opts['thisTTh'] < tThHi and opts['thisTTh'] > tThLo: + hkls.append(hklData['hkl']) + else: + hkls.append(hklData['hkl']) + else: + # !!! changing behavior here; if the hkl_id is invalid, raises + # RuntimeError, and if allHKLs=True and the hkl_id is + # excluded, it also raises a RuntimeError + all_hkl_ids = np.asarray([i['hklID'] for i in self.hklDataList]) + sorted_excl = self.exclusions[self.tThSortInv] + idx = np.zeros(len(self.hklDataList), dtype=int) + for i, hkl_id in enumerate(hkl_ids): + # find ordinal index of current hklID + try: + idx[i] = int(np.where(all_hkl_ids == hkl_id)[0]) + except TypeError: + raise RuntimeError( + f"Requested hklID '{hkl_id}'is invalid!" + ) + if sorted_excl[idx[i]] and not opts['allHKLs']: + raise RuntimeError( + f"Requested hklID '{hkl_id}' is excluded!" + ) + hkls.append(self.hklDataList[idx[i]]['hkl']) + + # handle output kwarg + if opts['asStr']: + return list(map(hklToStr, np.array(hkls))) + else: + return np.array(hkls) + + def getSymHKLs( + self, + asStr: Optional[bool] = False, + withID: Optional[bool] = False, + indices: Optional[List[int]] = None, + ) -> Union[List[List[str]], List[np.ndarray]]: + """ + Return all symmetry HKLs. + + Parameters + ---------- + asStr : bool, optional + If True, return the symmetry HKLs as strings. The default is False. + withID : bool, optional + If True, return the symmetry HKLs with the hklID. The default is + False. Does nothing if asStr is True. + indices : list[inr], optional + Optional list of indices of hkls to include. + + Returns + ------- + sym_hkls : list list of strings, or list of numpy.ndarray + List of symmetry HKLs for each HKL, either as strings or as a + vstacked array. + """ + sym_hkls = [] + hkl_index = 0 + if indices is not None: + indB = np.zeros(self.nHKLs, dtype=bool) + indB[np.array(indices)] = True + else: + indB = np.ones(self.nHKLs, dtype=bool) + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + if indB[hkl_index]: + hkls = hklData['symHKLs'] + if asStr: + sym_hkls.append(list(map(hklToStr, np.array(hkls).T))) + elif withID: + sym_hkls.append( + np.vstack( + [ + np.tile(hklData['hklID'], (1, hkls.shape[1])), + hkls, + ] + ) + ) + else: + sym_hkls.append(np.array(hkls)) + hkl_index += 1 + return sym_hkls + + @staticmethod + def makeScatteringVectors( + hkls: np.ndarray, + rMat_c: np.ndarray, + bMat: np.ndarray, + wavelength: float, + chiTilt: Optional[float] = None, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Static method for calculating g-vectors and scattering vector angles + for specified hkls, subject to the bragg conditions specified by + lattice vectors, orientation matrix, and wavelength + + Parameters + ---------- + hkls : np.ndarray + (3, n) array of hkls. + rMat_c : np.ndarray + (3, 3) rotation matrix from the crystal to the sample frame. + bMat : np.ndarray, optional + (3, 3) COB from reciprocal lattice frame to the crystal frame. + wavelength : float + xray wavelength in Angstroms. + chiTilt : float, optional + 0 <= chiTilt <= 90 degrees, defaults to 0 + + Returns + ------- + gVec_s : np.ndarray + (3, n) array of g-vectors (reciprocal lattice) in the sample frame. + oangs0 : np.ndarray + (3, n) array containing the feasible (2-theta, eta, ome) triplets + for each input hkl (first solution) + oangs1 : np.ndarray + (3, n) array containing the feasible (2-theta, eta, ome) triplets + for each input hkl (second solution) + + FIXME: must do testing on strained bMat + """ + # arg munging + chi = float(chiTilt) if chiTilt is not None else 0.0 + rMat_c = rMat_c.squeeze() + + # these are the reciprocal lattice vectors in the SAMPLE FRAME + # ** NOTE ** + # if strained, assumes that you handed it a bMat calculated from + # strained [a, b, c] in the CRYSTAL FRAME + gVec_s = np.dot(rMat_c, np.dot(bMat, hkls)) + + dim0 = gVec_s.shape[0] + if dim0 != 3: + raise ValueError(f'Number of lattice plane normal dims is {dim0}') + + # call model from transforms now + oangs0, oangs1 = xfcapi.oscill_angles_of_hkls( + hkls.T, chi, rMat_c, bMat, wavelength + ) + + return gVec_s, oangs0.T, oangs1.T + + def _makeScatteringVectors( + self, + rMat: np.ndarray, + bMat: Optional[np.ndarray] = None, + chiTilt: Optional[float] = None, + ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: + """ + modeled after QFromU.m + """ + + if bMat is None: + bMat = self._latVecOps['B'] + + Qs_vec = [] + Qs_ang0 = [] + Qs_ang1 = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + thisQs, thisAng0, thisAng1 = PlaneData.makeScatteringVectors( + hklData['symHKLs'], + rMat, + bMat, + self._wavelength, + chiTilt=chiTilt, + ) + Qs_vec.append(thisQs) + Qs_ang0.append(thisAng0) + Qs_ang1.append(thisAng1) + + return Qs_vec, Qs_ang0, Qs_ang1 + + def calcStructFactor(self, atominfo): + """ + Calculates unit cell structure factors as a function of hkl + USAGE: + FSquared = calcStructFactor(atominfo,hkls,B) + INPUTS: + 1) atominfo (m x 1 float ndarray) the first threee columns of the + matrix contain fractional atom positions [uvw] of atoms in the unit + cell. The last column contains the number of electrons for a given atom + 2) hkls (3 x n float ndarray) is the array of Miller indices for + the planes of interest. The vectors are assumed to be + concatenated along the 1-axis (horizontal) + 3) B (3 x 3 float ndarray) is a matrix of reciprocal lattice basis + vectors,where each column contains a reciprocal lattice basis vector + ({g}=[B]*{hkl}) + OUTPUTS: + 1) FSquared (n x 1 float ndarray) array of structure factors, + one for each hkl passed into the function + """ + r = atominfo[:, 0:3] + elecNum = atominfo[:, 3] + hkls = self.hkls + B = self.latVecOps['B'] + sinThOverLamdaList, ffDataList = LoadFormFactorData() + FSquared = np.zeros(hkls.shape[1]) + + for jj in np.arange(0, hkls.shape[1]): + # ???: probably have other functions for this + # Calculate G for each hkl + # Calculate magnitude of G for each hkl + G = ( + hkls[0, jj] * B[:, 0] + + hkls[1, jj] * B[:, 1] + + hkls[2, jj] * B[:, 2] + ) + magG = np.sqrt(G[0] ** 2 + G[1] ** 2 + G[2] ** 2) + + # Begin calculating form factor + F = 0 + for ii in np.arange(0, r.shape[0]): + ff = RetrieveAtomicFormFactor( + elecNum[ii], magG, sinThOverLamdaList, ffDataList + ) + exparg = complex( + 0.0, + 2.0 + * np.pi + * ( + hkls[0, jj] * r[ii, 0] + + hkls[1, jj] * r[ii, 1] + + hkls[2, jj] * r[ii, 2] + ), + ) + F += ff * np.exp(exparg) + + """ + F = sum_atoms(ff(Q)*e^(2*pi*i(hu+kv+lw))) + """ + FSquared[jj] = np.real(F * np.conj(F)) + + return FSquared + + # OLD DEPRECATED PLANE_DATA STUFF ==================================== + @deprecated(new_func="len(self.hkls.T)", removal_date="2025-08-01") + def getNHKLs(self): + return len(self.getHKLs()) + + @deprecated(new_func="self.exclusions", removal_date="2025-08-01") + def get_exclusions(self): + return self.exclusions + + @deprecated(new_func="self.exclusions=...", removal_date="2025-08-01") + def set_exclusions(self, exclusions): + self.exclusions = exclusions + + @deprecated(new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", + removal_date="2025-08-01") + def getLatticeType(self): + return ltypeOfLaueGroup(self.laueGroup) + + @deprecated(new_func="self.q_sym", removal_date="2025-08-01") + def getQSym(self): + return self.q_sym + + +@deprecated(removal_date='2025-01-01') +def getFriedelPair(tth0, eta0, *ome0, **kwargs): + """ + Get the diffractometer angular coordinates in degrees for + the Friedel pair of a given reflection (min angular distance). + + AUTHORS: + + J. V. Bernier -- 10 Nov 2009 + + USAGE: + + ome1, eta1 = getFriedelPair(tth0, eta0, *ome0, + display=False, + units='degrees', + convention='hexrd') + + INPUTS: + + 1) tth0 is a list (or ndarray) of 1 or n the bragg angles (2theta) for + the n reflections (tiled to match eta0 if only 1 is given). + + 2) eta0 is a list (or ndarray) of 1 or n azimuthal coordinates for the n + reflections (tiled to match tth0 if only 1 is given). + + 3) ome0 is a list (or ndarray) of 1 or n reference oscillation + angles for the n reflections (denoted omega in [1]). This argument + is optional. + + 4) Keyword arguments may be one of the following: + + Keyword Values|{default} Action + -------------- -------------- -------------- + 'display' True|{False} toggles display to cmd line + 'units' 'radians'|{'degrees'} sets units for input angles + 'convention' 'fable'|{'hexrd'} sets conventions defining + the angles (see below) + 'chiTilt' None the inclination (about Xlab) of + the oscillation axis + + OUTPUTS: + + 1) ome1 contains the oscialltion angle coordinates of the + Friedel pairs associated with the n input reflections, relative to ome0 + (i.e. ome1 = + ome0). Output is in DEGREES! + + 2) eta1 contains the azimuthal coordinates of the Friedel + pairs associated with the n input reflections. Output units are + controlled via the module variable 'outputDegrees' + + NOTES: + + !!!: The ouputs ome1, eta1 are written using the selected convention, but + the units are alway degrees. May change this to work with Nathan's + global... + + !!!: In the 'fable' convention [1], {XYZ} form a RHON basis where X is + downstream, Z is vertical, and eta is CCW with +Z defining eta = 0. + + !!!: In the 'hexrd' convention [2], {XYZ} form a RHON basis where Z is + upstream, Y is vertical, and eta is CCW with +X defining eta = 0. + + REFERENCES: + + [1] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, + ``Tracking: a method for structural characterization of grains in + powders or polycrystals''. J. Appl. Cryst. (2001). 34, 744--750 + + [2] J. V. Bernier, M. P. Miller, J. -S. Park, and U. Lienert, + ``Quantitative Stress Analysis of Recrystallized OFHC Cu Subject + to Deformed In Situ'', J. Eng. Mater. Technol. (2008). 130. + DOI:10.1115/1.2870234 + """ + + dispFlag = False + fableFlag = False + chi = None + c1 = 1.0 + c2 = pi / 180.0 + + eta0 = np.atleast_1d(eta0) + tth0 = np.atleast_1d(tth0) + ome0 = np.atleast_1d(ome0) + + if eta0.ndim != 1: + raise RuntimeError('azimuthal input must be 1-D') + + npts = len(eta0) + + if tth0.ndim != 1: + raise RuntimeError('Bragg angle input must be not 1-D') + else: + if len(tth0) != npts: + if len(tth0) == 1: + tth0 *= np.ones(npts) + elif npts == 1: + npts = len(tth0) + eta0 *= np.ones(npts) + else: + raise RuntimeError( + 'the azimuthal and Bragg angle inputs are inconsistent' + ) + + if len(ome0) == 0: + ome0 = np.zeros(npts) # dummy ome0 + elif len(ome0) == 1 and npts > 1: + ome0 *= np.ones(npts) + else: + if len(ome0) != npts: + raise RuntimeError( + 'your oscialltion angle input is inconsistent; ' + + f'it has length {len(ome0)} while it should be {npts}' + ) + + # keyword args processing + kwarglen = len(kwargs) + if kwarglen > 0: + argkeys = list(kwargs.keys()) + for i in range(kwarglen): + if argkeys[i] == 'display': + dispFlag = kwargs[argkeys[i]] + elif argkeys[i] == 'convention': + if kwargs[argkeys[i]].lower() == 'fable': + fableFlag = True + elif argkeys[i] == 'units': + if kwargs[argkeys[i]] == 'radians': + c1 = 180.0 / pi + c2 = 1.0 + elif argkeys[i] == 'chiTilt': + if kwargs[argkeys[i]] is not None: + chi = kwargs[argkeys[i]] + + # a little talkback... + if dispFlag: + if fableFlag: + print('\nUsing Fable angle convention\n') + else: + print('\nUsing image-based angle convention\n') + + # mapped eta input + # - in DEGREES, thanks to c1 + eta0 = mapAngle(c1 * eta0, [-180, 180], units='degrees') + if fableFlag: + eta0 = 90 - eta0 + + # must put args into RADIANS + # - eta0 is in DEGREES, + # - the others are in whatever was entered, hence c2 + eta0 = d2r * eta0 + tht0 = c2 * tth0 / 2 + if chi is not None: + chi = c2 * chi + else: + chi = 0 + + """ + SYSTEM SOLVE + + + cos(chi)cos(eta)cos(theta)sin(x) - cos(chi)sin(theta)cos(x) \ + = sin(theta) - sin(chi)sin(eta)cos(theta) + + + Identity: a sin x + b cos x = sqrt(a**2 + b**2) sin (x + alpha) + + / + | atan(b/a) for a > 0 + alpha < + | pi + atan(b/a) for a < 0 + \ + + => sin (x + alpha) = c / sqrt(a**2 + b**2) + + must use both branches for sin(x) = n: + x = u (+ 2k*pi) | x = pi - u (+ 2k*pi) + """ + cchi = np.cos(chi) + schi = np.sin(chi) + ceta = np.cos(eta0) + seta = np.sin(eta0) + ctht = np.cos(tht0) + stht = np.sin(tht0) + + nchi = np.c_[0.0, cchi, schi].T + + gHat0_l = -np.vstack([ceta * ctht, seta * ctht, stht]) + + a = cchi * ceta * ctht + b = -cchi * stht + c = stht + schi * seta * ctht + + # form solution + abMag = np.sqrt(a * a + b * b) + assert np.all(abMag > 0), "Beam vector specification is infeasible!" + phaseAng = np.arctan2(b, a) + rhs = c / abMag + rhs[abs(rhs) > 1.0] = np.nan + rhsAng = np.arcsin(rhs) + + # write ome angle output arrays (NaNs persist here) + ome1 = rhsAng - phaseAng + ome2 = np.pi - rhsAng - phaseAng + + ome1 = mapAngle(ome1, [-np.pi, np.pi], units='radians') + ome2 = mapAngle(ome2, [-np.pi, np.pi], units='radians') + + ome_stack = np.vstack([ome1, ome2]) + + min_idx = np.argmin(abs(ome_stack), axis=0) + + ome_min = ome_stack[min_idx, list(range(len(ome1)))] + eta_min = np.nan * np.ones_like(ome_min) + + # mark feasible reflections + goodOnes = ~np.isnan(ome_min) + + numGood = np.sum(goodOnes) + tmp_eta = np.empty(numGood) + tmp_gvec = gHat0_l[:, goodOnes] + for i in range(numGood): + rchi = rotMatOfExpMap(np.tile(ome_min[goodOnes][i], (3, 1)) * nchi) + gHat_l = np.dot(rchi, tmp_gvec[:, i].reshape(3, 1)) + tmp_eta[i] = np.arctan2(gHat_l[1], gHat_l[0]) + eta_min[goodOnes] = tmp_eta + + # everybody back to DEGREES! + # - ome1 is in RADIANS here + # - convert and put into [-180, 180] + ome1 = mapAngle( + mapAngle(r2d * ome_min, [-180, 180], units='degrees') + c1 * ome0, + [-180, 180], + units='degrees', + ) + + # put eta1 in [-180, 180] + eta1 = mapAngle(r2d * eta_min, [-180, 180], units='degrees') + + if not outputDegrees: + ome1 *= d2r + eta1 *= d2r + + return ome1, eta1 + + +def getDparms( + lp: np.ndarray, lpTag: str, radians: Optional[bool] = True +) -> np.ndarray: + """ + Utility routine for getting dparms, that is the lattice parameters + without symmetry -- 'triclinic' + + Parameters + ---------- + lp : np.ndarray + Parsed lattice parameters + lpTag : str + Tag for the symmetry group of the lattice (from Laue group) + radians : bool, optional + Whether or not to use radians for angles, default is True + + Returns + ------- + np.ndarray + The lattice parameters without symmetry. + """ + latVecOps = latticeVectors(lp, tag=lpTag, radians=radians) + return latVecOps['dparms'] + + +def LoadFormFactorData(): + """ + Script to read in a csv file containing information relating the + magnitude of Q (sin(th)/lambda) to atomic form factor + Notes: + Atomic form factor data gathered from the International Tables of + Crystallography: + P. J. Brown, A. G. Fox, E. N. Maslen, M. A. O'Keefec and B. T. M. Willis, + "Chapter 6.1. Intensity of diffracted intensities", International Tables + for Crystallography (2006). Vol. C, ch. 6.1, pp. 554-595 + """ + + dir1 = os.path.split(valunits.__file__) + dataloc = os.path.join(dir1[0], 'data', 'FormFactorVsQ.csv') + + data = np.zeros((62, 99), float) + + # FIXME: marked broken by DP + jj = 0 + with open(dataloc, 'rU') as csvfile: + datareader = csv.reader(csvfile, dialect=csv.excel) + for row in datareader: + ii = 0 + for val in row: + data[jj, ii] = float(val) + ii += 1 + jj += 1 + + sinThOverLamdaList = data[:, 0] + ffDataList = data[:, 1:] + + return sinThOverLamdaList, ffDataList + + +def RetrieveAtomicFormFactor(elecNum, magG, sinThOverLamdaList, ffDataList): + """Interpolates between tabulated data to find the atomic form factor + for an atom with elecNum electrons for a given magnitude of Q + USAGE: + ff = RetrieveAtomicFormFactor(elecNum,magG,sinThOverLamdaList,ffDataList) + INPUTS: + 1) elecNum, (1 x 1 float) number of electrons for atom of interest + 2) magG (1 x 1 float) magnitude of G + 3) sinThOverLamdaList (n x 1 float ndarray) form factor data is tabulated + in terms of sin(theta)/lambda (A^-1). + 3) ffDataList (n x m float ndarray) form factor data is tabulated in terms + of sin(theta)/lambda (A^-1). Each column corresponds to a different + number of electrons + OUTPUTS: + 1) ff (n x 1 float) atomic form factor for atom and hkl of interest + NOTES: + Data should be calculated in terms of G at some point + """ + sinThOverLambda = 0.5 * magG + # lambda=2*d*sin(th) + # lambda=2*sin(th)/G + # 1/2*G=sin(th)/lambda + + ff = np.interp( + sinThOverLambda, sinThOverLamdaList, ffDataList[:, (elecNum - 1)] + ) + + return ff + + +def lorentz_factor(tth: np.ndarray) -> np.ndarray: + """ + 05/26/2022 SS adding lorentz factor computation + to the detector so that it can be compenstated for in the + intensity correction + + Parameters + ---------- + tth: np.ndarray + 2-theta of every pixel in radians + + Returns + ------- + np.ndarray + Lorentz factor for each pixel + """ + + theta = 0.5 * tth + + cth = np.cos(theta) + sth2 = np.sin(theta) ** 2 + + return 1.0 / (4.0 * cth * sth2) + + +def polarization_factor( + tth: np.ndarray, + unpolarized: Optional[bool] = True, + eta: Optional[np.ndarray] = None, + f_hor: Optional[float] = None, + f_vert: Optional[float] = None, +) -> np.ndarray: + """ + 06/14/2021 SS adding lorentz polarization factor computation + to the detector so that it can be compenstated for in the + intensity correction + + 05/26/2022 decoupling lorentz factor from polarization factor + + parameters: tth two theta of every pixel in radians + if unpolarized is True, all subsequent arguments are optional + eta azimuthal angle of every pixel + f_hor fraction of horizontal polarization + (~1 for XFELs) + f_vert fraction of vertical polarization + (~0 for XFELs) + notice f_hor + f_vert = 1 + + FIXME, called without parameters like eta, f_hor, f_vert, but they default + to none in the current implementation, which will throw an error. + """ + + ctth2 = np.cos(tth) ** 2 + + if unpolarized: + return (1 + ctth2) / 2 + + seta2 = np.sin(eta) ** 2 + ceta2 = np.cos(eta) ** 2 + return f_hor * (seta2 + ceta2 * ctth2) + f_vert * (ceta2 + seta2 * ctth2) diff --git a/hexrd/laue/xrdutil/utils.py b/hexrd/laue/xrdutil/utils.py new file mode 100644 index 000000000..2cbae2b6f --- /dev/null +++ b/hexrd/laue/xrdutil/utils.py @@ -0,0 +1,1516 @@ +#! /usr/bin/env python3 +# ============================================================ +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ + + +from typing import Optional, Union, Any, Generator +from hexrd.material.crystallography import PlaneData +from hexrd.distortion.distortionabc import DistortionABC + +import numba +import numpy as np +import numba + +from hexrd import constants +from hexrd import matrixutil as mutil +from hexrd import rotations as rot +from hexrd import gridutil as gutil + +from hexrd.material.crystallography import processWavelength, PlaneData + +from hexrd.transforms import xfcapi +from hexrd.valunits import valWUnit + +from hexrd import distortion as distortion_pkg + +from hexrd.deprecation import deprecated + + +simlp = 'hexrd.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +distortion_key = 'distortion' + +d2r = piby180 = constants.d2r +r2d = constants.r2d + +epsf = constants.epsf # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 +sqrt_epsf = constants.sqrt_epsf # ~1.5e-8 + +bHat_l_DFLT = constants.beam_vec.flatten() +eHat_l_DFLT = constants.eta_vec.flatten() + +nans_1x2 = np.nan * np.ones((1, 2)) + +# ============================================================================= +# CLASSES +# ============================================================================= + + +class EtaOmeMaps(object): + """ + find-orientations loads pickled eta-ome data, but CollapseOmeEta is not + pickleable, because it holds a list of ReadGE, each of which holds a + reference to an open file object, which is not pickleable. + """ + + def __init__(self, ome_eta_archive: str): + ome_eta: np.ndarray = np.load(ome_eta_archive, allow_pickle=True) + + planeData_args = ome_eta['planeData_args'] + planeData_hkls = ome_eta['planeData_hkls'] + self.planeData = PlaneData(planeData_hkls, *planeData_args) + self.planeData.exclusions = ome_eta['planeData_excl'] + self.dataStore = ome_eta['dataStore'] + self.iHKLList = ome_eta['iHKLList'] + self.etaEdges = ome_eta['etaEdges'] + self.omeEdges = ome_eta['omeEdges'] + self.etas = ome_eta['etas'] + self.omegas = ome_eta['omegas'] + + def save_eta_ome_maps(self, filename: str) -> None: + """ + eta_ome.dataStore + eta_ome.planeData + eta_ome.iHKLList + eta_ome.etaEdges + eta_ome.omeEdges + eta_ome.etas + eta_ome.omegas + """ + args = np.array(self.planeData.getParams(), dtype=object)[:4] + args[2] = valWUnit('wavelength', 'length', args[2], 'angstrom') + hkls = np.vstack([i['hkl'] for i in self.planeData.hklDataList]).T + save_dict = { + 'dataStore': self.dataStore, + 'etas': self.etas, + 'etaEdges': self.etaEdges, + 'iHKLList': self.iHKLList, + 'omegas': self.omegas, + 'omeEdges': self.omeEdges, + 'planeData_args': args, + 'planeData_hkls': hkls, + 'planeData_excl': self.planeData.exclusions, + } + np.savez_compressed(filename, **save_dict) + + +# ============================================================================= +# FUNCTIONS +# ============================================================================= + + +def _zproject(x: np.ndarray, y: np.ndarray): + return np.cos(x) * np.sin(y) - np.sin(x) * np.cos(y) + + +def zproject_sph_angles( + invecs: np.ndarray, + chi: float = 0.0, + method: str = 'stereographic', + source: str = 'd', + use_mask: bool = False, + invert_z: bool = False, + rmat: Optional[np.ndarray] = None, +) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray]]: + """ + Projects spherical angles to 2-d mapping. + + Parameters + ---------- + invec : array_like + The (n, 3) array of input points, interpreted via the 'source' kwarg. + chi : scalar, optional + The inclination angle of the sample frame. The default is 0.. + method : str, optional + Mapping type spec, either 'stereographic' or 'equal-area'. + The default is 'stereographic'. + source : str, optional + The type specifier of the input vectors, either 'd', 'q', or 'g'. + 'd' signifies unit diffraction vectors as (2theta, eta, omega), + 'q' specifies unit scattering vectors as (2theta, eta, omega), + 'g' specifies unit vectors in the sample frame as (x, y, z). + The default is 'd'. + use_mask : bool, optional + If True, trim points not on the +z hemishpere (polar angles > 90). + The default is False. + invert_z : bool, optional + If True, invert the Z-coordinates of the unit vectors calculated from + the input angles. The default is False. + rmat : numpy.ndarry, shape=(3, 3), optional + Array representing a change of basis (rotation) to appy to the + calculated unit vectors. The default is None. + + Raises + ------ + RuntimeError + If method not in ('stereographic', 'equal-area'). + + Returns + ------- + numpy.ndarray or tuple + If use_mask = False, then the array of n mapped input points with shape + (n, 2). If use_mask = True, then the first element is the ndarray of + mapped points with shape (<=n, 2), and the second is a bool array with + shape (n,) marking the point that fell on the upper hemishpere. + . + + Notes + ----- + CAVEAT: +Z axis projections only!!! + TODO: check mask application. + """ + assert isinstance(source, str), "source kwarg must be a string" + + invecs = np.atleast_2d(invecs) + if source.lower() == 'd': + spts_s = xfcapi.angles_to_dvec(invecs, chi=chi) + elif source.lower() == 'q': + spts_s = xfcapi.angles_to_gvec(invecs, chi=chi) + elif source.lower() == 'g': + spts_s = invecs + + if rmat is not None: + spts_s = np.dot(spts_s, rmat.T) + + if invert_z: + spts_s[:, 2] = -spts_s[:, 2] + + # filter based on hemisphere + if use_mask: + pzi = spts_s[:, 2] <= 0 + spts_s = spts_s[pzi, :] + + if method.lower() == 'stereographic': + ppts = np.vstack( + [ + spts_s[:, 0] / (1.0 - spts_s[:, 2]), + spts_s[:, 1] / (1.0 - spts_s[:, 2]), + ] + ).T + elif method.lower() == 'equal-area': + chords = spts_s + np.tile([0, 0, 1], (len(spts_s), 1)) + scl = np.tile(mutil.rowNorm(chords), (2, 1)).T + ucrd = mutil.unitVector( + np.hstack([chords[:, :2], np.zeros((len(spts_s), 1))]).T + ) + + ppts = ucrd[:2, :].T * scl + else: + raise RuntimeError(f"method '{method}' not recognized") + + if use_mask: + return ppts, pzi + else: + return ppts + + +def make_polar_net( + ndiv: int = 24, projection: str = 'stereographic', max_angle: float = 120.0 +) -> np.ndarray: + """ + TODO: options for generating net boundaries; fixed to Z proj. + """ + ndiv_tth = int(np.floor(0.5 * ndiv)) + 1 + wtths = np.radians( + np.linspace(0, 1, num=ndiv_tth, endpoint=True) * max_angle + ) + wetas = np.radians(np.linspace(-1, 1, num=ndiv + 1, endpoint=True) * 180.0) + weta_gen = np.radians(np.linspace(-1, 1, num=181, endpoint=True) * 180.0) + pts = [] + for eta in wetas: + net_ang = np.vstack( + [[wtths[0], wtths[-1]], np.tile(eta, 2), np.zeros(2)] + ).T + pts.append(zproject_sph_angles(net_ang, method=projection, source='d')) + pts.append(np.nan * np.ones((1, 2))) + for tth in wtths[1:]: + net_ang = np.vstack( + [tth * np.ones_like(weta_gen), weta_gen, np.zeros_like(weta_gen)] + ).T + pts.append(zproject_sph_angles(net_ang, method=projection, source='d')) + pts.append(nans_1x2) + + return np.vstack(pts) + + +validateAngleRanges = xfcapi.validate_angle_ranges + + +@deprecated(removal_date='2025-01-01') +def simulateOmeEtaMaps( + omeEdges, + etaEdges, + planeData, + expMaps, + chi=0.0, + etaTol=None, + omeTol=None, + etaRanges=None, + omeRanges=None, + bVec=constants.beam_vec, + eVec=constants.eta_vec, + vInv=constants.identity_6x1, +): + """ + Simulate spherical maps. + + Parameters + ---------- + omeEdges : TYPE + DESCRIPTION. + etaEdges : TYPE + DESCRIPTION. + planeData : TYPE + DESCRIPTION. + expMaps : (3, n) ndarray + DESCRIPTION. + chi : TYPE, optional + DESCRIPTION. The default is 0.. + etaTol : TYPE, optional + DESCRIPTION. The default is None. + omeTol : TYPE, optional + DESCRIPTION. The default is None. + etaRanges : TYPE, optional + DESCRIPTION. The default is None. + omeRanges : TYPE, optional + DESCRIPTION. The default is None. + bVec : TYPE, optional + DESCRIPTION. The default is [0, 0, -1]. + eVec : TYPE, optional + DESCRIPTION. The default is [1, 0, 0]. + vInv : TYPE, optional + DESCRIPTION. The default is [1, 1, 1, 0, 0, 0]. + + Returns + ------- + eta_ome : TYPE + DESCRIPTION. + + Notes + ----- + all angular info is entered in degrees + + ??? might want to creat module-level angluar unit flag + ??? might want to allow resvers delta omega + + """ + # convert to radians + etaEdges = np.radians(np.sort(etaEdges)) + omeEdges = np.radians(np.sort(omeEdges)) + + omeIndices = list(range(len(omeEdges))) + etaIndices = list(range(len(etaEdges))) + + i_max = omeIndices[-1] + j_max = etaIndices[-1] + + etaMin = etaEdges[0] + etaMax = etaEdges[-1] + omeMin = omeEdges[0] + omeMax = omeEdges[-1] + if omeRanges is None: + omeRanges = [ + [omeMin, omeMax], + ] + + if etaRanges is None: + etaRanges = [ + [etaMin, etaMax], + ] + + # signed deltas IN RADIANS + del_ome = omeEdges[1] - omeEdges[0] + del_eta = etaEdges[1] - etaEdges[0] + + delOmeSign = np.sign(del_eta) + + # tolerances are in degrees (easier) + if omeTol is None: + omeTol = abs(del_ome) + else: + omeTol = np.radians(omeTol) + if etaTol is None: + etaTol = abs(del_eta) + else: + etaTol = np.radians(etaTol) + + # pixel dialtions + dpix_ome = round(omeTol / abs(del_ome)) + dpix_eta = round(etaTol / abs(del_eta)) + + i_dil, j_dil = np.meshgrid( + np.arange(-dpix_ome, dpix_ome + 1), np.arange(-dpix_eta, dpix_eta + 1) + ) + + # get symmetrically expanded hkls from planeData + sym_hkls = planeData.getSymHKLs() + nhkls = len(sym_hkls) + + # make things C-contiguous for use in xfcapi functions + expMaps = np.array(expMaps.T, order='C') + nOrs = len(expMaps) + + bMat = np.array(planeData.latVecOps['B'], order='C') + wlen = planeData.wavelength + + bVec = np.array(bVec.flatten(), order='C') + eVec = np.array(eVec.flatten(), order='C') + vInv = np.array(vInv.flatten(), order='C') + + eta_ome = np.zeros((nhkls, max(omeIndices), max(etaIndices)), order='C') + for iHKL in range(nhkls): + these_hkls = np.ascontiguousarray(sym_hkls[iHKL].T, dtype=float) + for iOr in range(nOrs): + rMat_c = xfcapi.make_rmat_of_expmap(expMaps[iOr, :]) + angList = np.vstack( + xfcapi.oscill_angles_of_hkls( + these_hkls, + chi, + rMat_c, + bMat, + wlen, + beam_vec=bVec, + eta_vec=eVec, + v_inv=vInv, + ) + ) + if not np.all(np.isnan(angList)): + # + angList[:, 1] = rot.mapAngle( + angList[:, 1], [etaEdges[0], etaEdges[0] + 2 * np.pi] + ) + angList[:, 2] = rot.mapAngle( + angList[:, 2], [omeEdges[0], omeEdges[0] + 2 * np.pi] + ) + # + # do eta ranges + angMask_eta = np.zeros(len(angList), dtype=bool) + for etas in etaRanges: + angMask_eta = np.logical_or( + angMask_eta, + xfcapi.validate_angle_ranges( + angList[:, 1], etas[0], etas[1] + ), + ) + + # do omega ranges + ccw = True + angMask_ome = np.zeros(len(angList), dtype=bool) + for omes in omeRanges: + if omes[1] - omes[0] < 0: + ccw = False + angMask_ome = np.logical_or( + angMask_ome, + xfcapi.validate_angle_ranges( + angList[:, 2], omes[0], omes[1], ccw=ccw + ), + ) + + # mask angles list, hkls + angMask = np.logical_and(angMask_eta, angMask_ome) + + culledTTh = angList[angMask, 0] + culledEta = angList[angMask, 1] + culledOme = angList[angMask, 2] + + for iTTh in range(len(culledTTh)): + culledEtaIdx = np.where(etaEdges - culledEta[iTTh] > 0)[0] + if len(culledEtaIdx) > 0: + culledEtaIdx = culledEtaIdx[0] - 1 + if culledEtaIdx < 0: + culledEtaIdx = None + else: + culledEtaIdx = None + culledOmeIdx = np.where(omeEdges - culledOme[iTTh] > 0)[0] + if len(culledOmeIdx) > 0: + if delOmeSign > 0: + culledOmeIdx = culledOmeIdx[0] - 1 + else: + culledOmeIdx = culledOmeIdx[-1] + if culledOmeIdx < 0: + culledOmeIdx = None + else: + culledOmeIdx = None + + if culledEtaIdx is not None and culledOmeIdx is not None: + if dpix_ome > 0 or dpix_eta > 0: + i_sup = omeIndices[culledOmeIdx] + np.array( + [i_dil.flatten()], dtype=int + ) + j_sup = etaIndices[culledEtaIdx] + np.array( + [j_dil.flatten()], dtype=int + ) + + # catch shit that falls off detector... + # maybe make this fancy enough to wrap at 2pi? + idx_mask = np.logical_and( + np.logical_and(i_sup >= 0, i_sup < i_max), + np.logical_and(j_sup >= 0, j_sup < j_max), + ) + eta_ome[iHKL, i_sup[idx_mask], j_sup[idx_mask]] = ( + 1.0 + ) + else: + eta_ome[ + iHKL, + omeIndices[culledOmeIdx], + etaIndices[culledEtaIdx], + ] = 1.0 + return eta_ome + + +def _fetch_hkls_from_planedata(pd: PlaneData): + return np.hstack(pd.getSymHKLs(withID=True)).T + + +def _filter_hkls_eta_ome( + hkls: np.ndarray, + angles: np.ndarray, + eta_range: list[tuple[float]], + ome_range: list[tuple[float]], + return_mask: bool = False, +) -> Union[ + tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray, np.ndarray] +]: + """ + given a set of hkls and angles, filter them by the + eta and omega ranges + """ + angMask_eta = np.zeros(len(angles), dtype=bool) + for etas in eta_range: + angMask_eta = np.logical_or( + angMask_eta, xfcapi.validate_angle_ranges( + angles[:, 1], etas[0], etas[1] + ) + ) + + ccw = True + angMask_ome = np.zeros(len(angles), dtype=bool) + for omes in ome_range: + if omes[1] - omes[0] < 0: + ccw = False + angMask_ome = np.logical_or( + angMask_ome, + xfcapi.validate_angle_ranges( + angles[:, 2], omes[0], omes[1], ccw=ccw + ), + ) + + angMask = np.logical_and(angMask_eta, angMask_ome) + + allAngs = angles[angMask, :] + allHKLs = np.vstack([hkls, hkls])[angMask, :] + + if return_mask: + return allAngs, allHKLs, angMask + else: + return allAngs, allHKLs + + +def _project_on_detector_plane( + allAngs: np.ndarray, + rMat_d: np.ndarray, + rMat_c: np.ndarray, + chi: float, + tVec_d: np.ndarray, + tVec_c: np.ndarray, + tVec_s: np.ndarray, + distortion: DistortionABC, + beamVec: np.ndarray = constants.beam_vec, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + utility routine for projecting a list of (tth, eta, ome) onto the + detector plane parameterized by the args + """ + gVec_cs = xfcapi.angles_to_gvec( + allAngs, chi=chi, rmat_c=rMat_c, beam_vec=beamVec + ) + + rMat_ss = xfcapi.make_sample_rmat(chi, allAngs[:, 2]) + + tmp_xys = xfcapi.gvec_to_xy( + gVec_cs, + rMat_d, + rMat_ss, + rMat_c, + tVec_d, + tVec_s, + tVec_c, + beam_vec=beamVec, + ) + + valid_mask = ~(np.isnan(tmp_xys[:, 0]) | np.isnan(tmp_xys[:, 1])) + + det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) + + # apply distortion if specified + if distortion is not None: + det_xy = distortion.apply_inverse(det_xy) + + return det_xy, rMat_ss, valid_mask + + +def _project_on_detector_cylinder( + allAngs: np.ndarray, + chi: float, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + distortion: DistortionABC = None, + beamVec: np.ndarray = constants.beam_vec, + etaVec: np.ndarray = constants.eta_vec, + tVec_s: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, + tVec_c: np.ndarray = constants.zeros_3x1, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + utility routine for projecting a list of (tth, eta, ome) onto the + detector plane parameterized by the args. this function does the + computation for a cylindrical detector + """ + dVec_cs = xfcapi.angles_to_dvec( + allAngs, chi=chi, rmat_c=np.eye(3), beam_vec=beamVec, eta_vec=etaVec + ) + + rMat_ss = np.tile(rmat_s, [allAngs.shape[0], 1, 1]) + + tmp_xys, valid_mask = _dvecToDetectorXYcylinder( + dVec_cs, + tVec_d, + caxis, + paxis, + radius, + physical_size, + angle_extent, + tVec_s=tVec_s, + rmat_s=rmat_s, + tVec_c=tVec_c, + ) + + det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) + + # apply distortion if specified + if distortion is not None: + det_xy = distortion.apply_inverse(det_xy) + + return det_xy, rMat_ss, valid_mask + + +def _dvecToDetectorXYcylinder( + dVec_cs: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> tuple[np.ndarray, np.ndarray]: + + cvec = _unitvec_to_cylinder( + dVec_cs, + caxis, + paxis, + radius, + tVec_d, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + cvec_det, valid_mask = _clip_to_cylindrical_detector( + cvec, + tVec_d, + caxis, + paxis, + radius, + physical_size, + angle_extent, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + xy_det = _dewarp_from_cylinder( + cvec_det, + tVec_d, + caxis, + paxis, + radius, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + return xy_det, valid_mask + + +def _unitvec_to_cylinder( + uvw: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + tvec: np.ndarray, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> np.ndarray: + """ + get point where unitvector uvw + intersect the cylindrical detector. + this will give points which are + outside the actual panel. the points + will be clipped to the panel later + + Parameters + ---------- + uvw : numpy.ndarray + unit vectors stacked row wise (nx3) shape + + Returns + ------- + numpy.ndarray + (x,y,z) vectors point which intersect with + the cylinder with (nx3) shape + """ + naxis = np.cross(caxis, paxis) + naxis = naxis / np.linalg.norm(naxis) + + tvec_c_l = np.dot(rmat_s, tVec_c) + + delta = tvec - (radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l)) + num = uvw.shape[0] + cx = np.atleast_2d(caxis).T + + delta_t = np.tile(delta, [num, 1]) + + t1 = np.dot(uvw, delta.T) + t2 = np.squeeze(np.dot(uvw, cx)) + t3 = np.squeeze(np.dot(delta, cx)) + t4 = np.dot(uvw, cx) + + A = np.squeeze(1 - t4**2) + B = t1 - t2 * t3 + C = radius**2 - np.linalg.norm(delta) ** 2 + t3**2 + + mask = np.abs(A) < 1e-10 + beta = np.zeros( + [ + num, + ] + ) + + beta[~mask] = (B[~mask] + np.sqrt(B[~mask] ** 2 + A[~mask] * C)) / A[~mask] + + beta[mask] = np.nan + return np.tile(beta, [3, 1]).T * uvw + + +def _clip_to_cylindrical_detector( + uvw: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> tuple[np.ndarray, np.ndarray]: + """ + takes in the intersection points uvw + with the cylindrical detector and + prunes out points which don't actually + hit the actual panel + + Parameters + ---------- + uvw : numpy.ndarray + unit vectors stacked row wise (nx3) shape + + Returns + ------- + numpy.ndarray + (x,y,z) vectors point which fall on panel + with (mx3) shape + """ + # first get rid of points which are above + # or below the detector + naxis = np.cross(caxis, paxis) + num = uvw.shape[0] + + cx = np.atleast_2d(caxis).T + nx = np.atleast_2d(naxis).T + + tvec_c_l = np.dot(rmat_s, tVec_c) + + delta = tVec_d - ( + radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) + ) + + delta_t = np.tile(delta, [num, 1]) + + uvwp = uvw - delta_t + dp = np.dot(uvwp, cx) + + uvwpxy = uvwp - np.tile(dp, [1, 3]) * np.tile(cx, [1, num]).T + + size = physical_size + tvec = np.atleast_2d(tVec_d).T + + # ycomp = uvwp - np.tile(tVec_d,[num, 1]) + mask1 = np.squeeze(np.abs(dp) > size[0] * 0.5) + uvwp[mask1, :] = np.nan + + # next get rid of points that fall outside + # the polar angle range + + ang = np.dot(uvwpxy, nx) / radius + ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) + + ang = np.arccos(ang) + mask2 = np.squeeze(ang >= angle_extent) + mask = np.logical_or(mask1, mask2) + res = uvw.copy() + res[mask, :] = np.nan + + return res, ~mask + + +def _dewarp_from_cylinder( + uvw: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +): + """ + routine to convert cylindrical coordinates + to cartesian coordinates in image frame + """ + naxis = np.cross(caxis, paxis) + naxis = naxis / np.linalg.norm(naxis) + + cx = np.atleast_2d(caxis).T + px = np.atleast_2d(paxis).T + nx = np.atleast_2d(naxis).T + num = uvw.shape[0] + + tvec_c_l = np.dot(rmat_s, tVec_c) + + delta = tVec_d - ( + radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) + ) + + delta_t = np.tile(delta, [num, 1]) + + uvwp = uvw - delta_t + + uvwpxy = uvwp - np.tile(np.dot(uvwp, cx), [1, 3]) * np.tile(cx, [1, num]).T + + sgn = np.sign(np.dot(uvwpxy, px)) + sgn[sgn == 0.0] = 1.0 + ang = np.dot(uvwpxy, nx) / radius + ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) + ang = np.arccos(ang) + xcrd = np.squeeze(radius * ang * sgn) + ycrd = np.squeeze(np.dot(uvwp, cx)) + return np.vstack((xcrd, ycrd)).T + + +def _warp_to_cylinder( + cart: np.ndarray, + tVec_d: np.ndarray, + radius: float, + caxis: np.ndarray, + paxis: np.ndarray, + tVec_s: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, + tVec_c: np.ndarray = constants.zeros_3x1, + normalize: bool = True, +) -> np.ndarray: + """ + routine to convert cartesian coordinates + in image frame to cylindrical coordinates + """ + tvec = np.atleast_2d(tVec_d).T + if tVec_s.ndim == 1: + tVec_s = np.atleast_2d(tVec_s).T + if tVec_c.ndim == 1: + tVec_c = np.atleast_2d(tVec_c).T + num = cart.shape[0] + naxis = np.cross(paxis, caxis) + x = cart[:, 0] + y = cart[:, 1] + th = x / radius + xp = radius * np.sin(th) + xn = radius * (1 - np.cos(th)) + + ccomp = np.tile(y, [3, 1]).T * np.tile(caxis, [num, 1]) + pcomp = np.tile(xp, [3, 1]).T * np.tile(paxis, [num, 1]) + ncomp = np.tile(xn, [3, 1]).T * np.tile(naxis, [num, 1]) + cart3d = pcomp + ccomp + ncomp + + tVec_c_l = np.dot(rmat_s, tVec_c) + + res = cart3d + np.tile(tvec - tVec_s - tVec_c_l, [1, num]).T + + if normalize: + return res / np.tile(np.linalg.norm(res, axis=1), [3, 1]).T + else: + return res + + +def _dvec_to_angs( + dvecs: np.ndarray, bvec: np.ndarray, evec: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + """ + convert diffraction vectors to (tth, eta) + angles in the 'eta' frame + dvecs is assumed to have (nx3) shape + """ + num = dvecs.shape[0] + exb = np.cross(evec, bvec) + exb = exb / np.linalg.norm(exb) + bxexb = np.cross(bvec, exb) + bxexb = bxexb / np.linalg.norm(bxexb) + + dp = np.dot(bvec, dvecs.T) + dp[np.abs(dp) > 1.0] = np.sign(dp[np.abs(dp) > 1.0]) + tth = np.arccos(dp) + + dvecs_p = dvecs - np.tile(dp, [3, 1]).T * np.tile(bvec, [num, 1]) + + dpx = np.dot(bxexb, dvecs_p.T) + dpy = np.dot(exb, dvecs_p.T) + eta = np.arctan2(dpy, dpx) + + return tth, eta + + +def simulateGVecs( + pd: PlaneData, + detector_params: np.ndarray, + grain_params: np.ndarray, + ome_range: list[tuple[float]] = [ + (-np.pi, np.pi), + ], + ome_period: tuple[float] = (-np.pi, np.pi), + eta_range: list[tuple[float]] = [ + (-np.pi, np.pi), + ], + panel_dims: list[tuple[float]] = [(-204.8, -204.8), (204.8, 204.8)], + pixel_pitch: tuple[float] = (0.2, 0.2), + distortion: DistortionABC = None, + beam_vector: np.ndarray = constants.beam_vec, +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + """ + returns valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps + + panel_dims are [(xmin, ymin), (xmax, ymax)] in mm + + pixel_pitch is [row_size, column_size] in mm + + simulate the monochormatic scattering for a specified + + - space group + - wavelength + - orientation + - strain + - position + - detector parameters + - oscillation axis tilt (chi) + + subject to + + - omega (oscillation) ranges (list of (min, max) tuples) + - eta (azimuth) ranges + + pd................a hexrd.crystallography.PlaneData instance + detector_params...a (10,) ndarray containing the tilt angles (3), + translation (3), chi (1), and sample frame translation + (3) parameters + grain_params......a (12,) ndarray containing the exponential map (3), + translation (3), and inverse stretch tensor compnents + in Mandel-Voigt notation (6). + + * currently only one panel is supported, but this will likely change soon + """ + bMat = pd.latVecOps['B'] + wlen = pd.wavelength + full_hkls = _fetch_hkls_from_planedata(pd) + + # extract variables for convenience + rMat_d = xfcapi.make_detector_rmat(detector_params[:3]) + tVec_d = np.ascontiguousarray(detector_params[3:6]) + chi = detector_params[6] + tVec_s = np.ascontiguousarray(detector_params[7:10]) + rMat_c = xfcapi.make_rmat_of_expmap(grain_params[:3]) + tVec_c = np.ascontiguousarray(grain_params[3:6]) + vInv_s = np.ascontiguousarray(grain_params[6:12]) + beam_vector = np.ascontiguousarray(beam_vector) + + # first find valid G-vectors + angList = np.vstack( + xfcapi.oscill_angles_of_hkls( + full_hkls[:, 1:], chi, rMat_c, bMat, wlen, v_inv=vInv_s, + beam_vec=beam_vector + ) + ) + allAngs, allHKLs = _filter_hkls_eta_ome( + full_hkls, angList, eta_range, ome_range + ) + + if len(allAngs) == 0: + valid_ids = [] + valid_hkl = [] + valid_ang = [] + valid_xy = [] + ang_ps = [] + else: + # ??? preallocate for speed? + det_xy, rMat_ss, _ = _project_on_detector_plane( + allAngs, rMat_d, rMat_c, chi, tVec_d, tVec_c, tVec_s, distortion, + beamVec=beam_vector + ) + + on_panel = np.logical_and( + np.logical_and( + det_xy[:, 0] >= panel_dims[0][0], + det_xy[:, 0] <= panel_dims[1][0], + ), + np.logical_and( + det_xy[:, 1] >= panel_dims[0][1], + det_xy[:, 1] <= panel_dims[1][1], + ), + ) + + op_idx = np.where(on_panel)[0] + + valid_ang = allAngs[op_idx, :] + valid_ang[:, 2] = xfcapi.mapAngle(valid_ang[:, 2], ome_period) + valid_ids = allHKLs[op_idx, 0] + valid_hkl = allHKLs[op_idx, 1:] + valid_xy = det_xy[op_idx, :] + ang_ps = angularPixelSize( + valid_xy, + pixel_pitch, + rMat_d, + # Provide only the first sample rotation matrix to angularPixelSize + # Perhaps this is something that can be improved in the future? + rMat_ss[0], + tVec_d, + tVec_s, + tVec_c, + distortion=distortion, + beamVec=beam_vector, + ) + + return valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps + + +@deprecated(new_func=simlp, removal_date='2025-01-01') +def simulateLauePattern( + hkls, + bMat, + rmat_d, + tvec_d, + panel_dims, + panel_buffer=5, + minEnergy=8, + maxEnergy=24, + rmat_s=np.eye(3), + grain_params=None, + distortion=None, + beamVec=None, +): + + if beamVec is None: + beamVec = constants.beam_vec + + # parse energy ranges + multipleEnergyRanges = False + if hasattr(maxEnergy, '__len__'): + assert len(maxEnergy) == len( + minEnergy + ), 'energy cutoff ranges must have the same length' + multipleEnergyRanges = True + lmin = [processWavelength(e) for e in maxEnergy] + lmax = [processWavelength(e) for e in minEnergy] + else: + lmin = processWavelength(maxEnergy) + lmax = processWavelength(minEnergy) + + # process crystal rmats and inverse stretches + if grain_params is None: + grain_params = np.atleast_2d( + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0] + ) + + n_grains = len(grain_params) + + # dummy translation vector... make input + tvec_s = np.zeros((3, 1)) + + # number of hkls + nhkls_tot = hkls.shape[1] + + # unit G-vectors in crystal frame + ghat_c = mutil.unitVector(np.dot(bMat, hkls)) + + # pre-allocate output arrays + xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan * np.ones((n_grains, nhkls_tot)) + energy = np.nan * np.ones((n_grains, nhkls_tot)) + + """ + LOOP OVER GRAINS + """ + + for iG, gp in enumerate(grain_params): + rmat_c = xfcapi.make_rmat_of_expmap(gp[:3]) + tvec_c = gp[3:6].reshape(3, 1) + vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) + + # stretch them: V^(-1) * R * Gc + ghat_s_str = mutil.unitVector(np.dot(vInv_s, np.dot(rmat_c, ghat_c))) + ghat_c_str = np.dot(rmat_c.T, ghat_s_str) + + # project + dpts = xfcapi.gvec_to_xy( + ghat_c_str.T, + rmat_d, + rmat_s, + rmat_c, + tvec_d, + tvec_s, + tvec_c, + beam_vec=beamVec, + ).T + + # check intersections with detector plane + canIntersect = ~np.isnan(dpts[0, :]) + npts_in = sum(canIntersect) + + if np.any(canIntersect): + dpts = dpts[:, canIntersect].reshape(2, npts_in) + dhkl = hkls[:, canIntersect].reshape(3, npts_in) + + rmat_b = xfcapi.make_beam_rmat(beamVec, constants.eta_vec) + + # back to angles + tth_eta, gvec_l = xfcapi.xy_to_gvec( + dpts.T, rmat_d, rmat_s, tvec_d, tvec_s, tvec_c, rmat_b=rmat_b + ) + tth_eta = np.vstack(tth_eta).T + + # warp measured points + if distortion is not None: + dpts = distortion.apply_inverse(dpts) + + # plane spacings and energies + dsp = 1.0 / mutil.columnNorm(np.dot(bMat, dhkl)) + wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) + + # find on spatial extent of detector + xTest = np.logical_and( + dpts[0, :] >= -0.5 * panel_dims[1] + panel_buffer, + dpts[0, :] <= 0.5 * panel_dims[1] - panel_buffer, + ) + yTest = np.logical_and( + dpts[1, :] >= -0.5 * panel_dims[0] + panel_buffer, + dpts[1, :] <= 0.5 * panel_dims[0] - panel_buffer, + ) + + onDetector = np.logical_and(xTest, yTest) + if multipleEnergyRanges: + validEnergy = np.zeros(len(wlen), dtype=bool) + for i in range(len(lmin)): + validEnergy = validEnergy | np.logical_and( + wlen >= lmin[i], wlen <= lmax[i] + ) + else: + validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) + + # index for valid reflections + keepers = np.where(np.logical_and(onDetector, validEnergy))[0] + + # assign output arrays + xy_det[iG][keepers, :] = dpts[:, keepers].T + hkls_in[iG][:, keepers] = dhkl[:, keepers] + angles[iG][keepers, :] = tth_eta[keepers, :] + dspacing[iG, keepers] = dsp[keepers] + energy[iG, keepers] = processWavelength(wlen[keepers]) + return xy_det, hkls_in, angles, dspacing, energy + + +@numba.njit(nogil=True, cache=True) +def _expand_pixels( + original: np.ndarray, w: float, h: float, result: np.ndarray +) -> np.ndarray: + hw = 0.5 * w + hh = 0.5 * h + for el in range(len(original)): + x, y = original[el, 0], original[el, 1] + result[el * 4 + 0, 0] = x - hw + result[el * 4 + 0, 1] = y - hh + result[el * 4 + 1, 0] = x + hw + result[el * 4 + 1, 1] = y - hh + result[el * 4 + 2, 0] = x + hw + result[el * 4 + 2, 1] = y + hh + result[el * 4 + 3, 0] = x - hw + result[el * 4 + 3, 1] = y + hh + + return result + + +@numba.njit(nogil=True, cache=True) +def _compute_max( + tth: np.ndarray, eta: np.ndarray, result: np.ndarray +) -> np.ndarray: + period = 2.0 * np.pi + hperiod = np.pi + for el in range(0, len(tth), 4): + max_tth = np.abs(tth[el + 0] - tth[el + 3]) + eta_diff = eta[el + 0] - eta[el + 3] + max_eta = np.abs(np.remainder(eta_diff + hperiod, period) - hperiod) + for i in range(3): + curr_tth = np.abs(tth[el + i] - tth[el + i + 1]) + eta_diff = eta[el + i] - eta[el + i + 1] + curr_eta = np.abs( + np.remainder(eta_diff + hperiod, period) - hperiod + ) + max_tth = np.maximum(curr_tth, max_tth) + max_eta = np.maximum(curr_eta, max_eta) + result[el // 4, 0] = max_tth + result[el // 4, 1] = max_eta + + return result + + +def angularPixelSize( + xy_det: np.ndarray, + xy_pixelPitch: tuple[float], + rMat_d: np.ndarray, + rMat_s: np.ndarray, + tVec_d: np.ndarray, + tVec_s: np.ndarray, + tVec_c: np.ndarray, + distortion: DistortionABC = None, + beamVec: np.ndarray = None, + etaVec: np.ndarray = None, +) -> np.ndarray: + """ + Calculate angular pixel sizes on a detector. + + * choices to beam vector and eta vector specs have been supressed + * assumes xy_det in UNWARPED configuration + """ + xy_det = np.atleast_2d(xy_det) + if distortion is not None: # !!! check this logic + xy_det = distortion.apply(xy_det) + if beamVec is None: + beamVec = constants.beam_vec + if etaVec is None: + etaVec = constants.eta_vec + + # Verify that rMat_s is only 2D (a single matrix). + # Arrays of matrices were previously provided, which `xy_to_gvec` + # cannot currently handle. + if rMat_s.ndim != 2: + msg = ( + f'rMat_s should have 2 dimensions, but has {rMat_s.ndim} ' + 'dimensions instead' + ) + raise ValueError(msg) + + xy_expanded = np.empty((len(xy_det) * 4, 2), dtype=xy_det.dtype) + xy_expanded = _expand_pixels( + xy_det, xy_pixelPitch[0], xy_pixelPitch[1], xy_expanded + ) + + rmat_b = xfcapi.make_beam_rmat(beamVec, etaVec) + + gvec_space, _ = xfcapi.xy_to_gvec( + xy_expanded, + rMat_d, + rMat_s, + tVec_d, + tVec_s, + tVec_c, + rmat_b=rmat_b, + ) + result = np.empty_like(xy_det) + return _compute_max(gvec_space[0], gvec_space[1], result) + + +def make_reflection_patches( + instr_cfg: dict[str, Any], + tth_eta: np.ndarray, + ang_pixel_size: np.ndarray, + omega: Optional[np.ndarray] = None, + tth_tol: float = 0.2, + eta_tol: float = 1.0, + rmat_c: np.ndarray = np.eye(3), + tvec_c: np.ndarray = np.zeros((3, 1)), + npdiv: int = 1, + quiet: bool = False, # TODO: Remove this parameter - it isn't used + compute_areas_func: np.ndarray = gutil.compute_areas, +) -> Generator[ + tuple[ + np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray + ], + None, + None, +]: + """Make angular patches on a detector. + + panel_dims are [(xmin, ymin), (xmax, ymax)] in mm + + pixel_pitch is [row_size, column_size] in mm + + FIXME: DISTORTION HANDING IS STILL A KLUDGE!!! + + patches are: + + delta tth + d ------------- ... ------------- + e | x | x | x | ... | x | x | x | + l ------------- ... ------------- + t . + a . + . + e ------------- ... ------------- + t | x | x | x | ... | x | x | x | + a ------------- ... ------------- + + outputs are: + (tth_vtx, eta_vtx), + (x_vtx, y_vtx), + connectivity, + subpixel_areas, + (x_center, y_center), + (i_row, j_col) + """ + + # detector quantities + rmat_d = xfcapi.make_rmat_of_expmap( + np.r_[instr_cfg['detector']['transform']['tilt']] + ) + tvec_d = np.r_[instr_cfg['detector']['transform']['translation']] + pixel_size = instr_cfg['detector']['pixels']['size'] + + frame_nrows = instr_cfg['detector']['pixels']['rows'] + frame_ncols = instr_cfg['detector']['pixels']['columns'] + + panel_dims = ( + -0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]], + 0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]], + ) + row_edges = ( + np.arange(frame_nrows + 1)[::-1] * pixel_size[1] + panel_dims[0][1] + ) + col_edges = np.arange(frame_ncols + 1) * pixel_size[0] + panel_dims[0][0] + + # handle distortion + distortion = None + if distortion_key in instr_cfg['detector']: + distortion_cfg = instr_cfg['detector'][distortion_key] + if distortion_cfg is not None: + try: + func_name = distortion_cfg['function_name'] + dparams = distortion_cfg['parameters'] + distortion = distortion_pkg.get_mapping(func_name, dparams) + except KeyError: + raise RuntimeError("problem with distortion specification") + + # sample frame + chi = instr_cfg['oscillation_stage']['chi'] + tvec_s = np.r_[instr_cfg['oscillation_stage']['translation']] + bvec = np.r_[instr_cfg['beam']['vector']] + + # data to loop + # ??? WOULD IT BE CHEAPER TO CARRY ZEROS OR USE CONDITIONAL? + if omega is None: + full_angs = np.hstack([tth_eta, np.zeros((len(tth_eta), 1))]) + else: + full_angs = np.hstack([tth_eta, omega.reshape(len(tth_eta), 1)]) + + for angs, pix in zip(full_angs, ang_pixel_size): + # calculate bin edges for patch based on local angular pixel size + # tth + ntths, tth_edges = gutil.make_tolerance_grid( + bin_width=np.degrees(pix[0]), + window_width=tth_tol, + num_subdivisions=npdiv, + ) + + # eta + netas, eta_edges = gutil.make_tolerance_grid( + bin_width=np.degrees(pix[1]), + window_width=eta_tol, + num_subdivisions=npdiv, + ) + + # FOR ANGULAR MESH + conn = gutil.cellConnectivity(netas, ntths, origin='ll') + + # meshgrid args are (cols, rows), a.k.a (fast, slow) + m_tth, m_eta = np.meshgrid(tth_edges, eta_edges) + npts_patch = m_tth.size + + # calculate the patch XY coords from the (tth, eta) angles + # !!! will CHEAT and ignore the small perturbation the different + # omega angle values causes and simply use the central value + gVec_angs_vtx = np.tile(angs, (npts_patch, 1)) + np.radians( + np.vstack( + [m_tth.flatten(), m_eta.flatten(), np.zeros(npts_patch)] + ).T + ) + + xy_eval_vtx, _, _ = _project_on_detector_plane( + gVec_angs_vtx, + rmat_d, + rmat_c, + chi, + tvec_d, + tvec_c, + tvec_s, + distortion, + beamVec=bvec, + ) + + areas = compute_areas_func(xy_eval_vtx, conn) + + # EVALUATION POINTS + # !!! for lack of a better option will use centroids + tth_eta_cen = gutil.cellCentroids( + np.atleast_2d(gVec_angs_vtx[:, :2]), conn + ) + + gVec_angs = np.hstack( + [tth_eta_cen, np.tile(angs[2], (len(tth_eta_cen), 1))] + ) + + xy_eval, _, _ = _project_on_detector_plane( + gVec_angs, + rmat_d, + rmat_c, + chi, + tvec_d, + tvec_c, + tvec_s, + distortion, + beamVec=bvec, + ) + + row_indices = gutil.cellIndices(row_edges, xy_eval[:, 1]) + col_indices = gutil.cellIndices(col_edges, xy_eval[:, 0]) + + yield ( + ( + ( + gVec_angs_vtx[:, 0].reshape(m_tth.shape), + gVec_angs_vtx[:, 1].reshape(m_tth.shape), + ), + ( + xy_eval_vtx[:, 0].reshape(m_tth.shape), + xy_eval_vtx[:, 1].reshape(m_tth.shape), + ), + conn, + areas.reshape(netas, ntths), + ( + xy_eval[:, 0].reshape(netas, ntths), + xy_eval[:, 1].reshape(netas, ntths), + ), + ( + row_indices.reshape(netas, ntths), + col_indices.reshape(netas, ntths), + ), + ) + ) + + +def extract_detector_transformation( + detector_params: Union[dict[str, Any], np.ndarray] +) -> tuple[np.ndarray, np.ndarray, float, np.ndarray]: + """ + Construct arrays from detector parameters. + + goes from 10 vector of detector parames OR instrument config dictionary + (from YAML spec) to affine transformation arrays + + Parameters + ---------- + detector_params : TYPE + DESCRIPTION. + + Returns + ------- + rMat_d : TYPE + DESCRIPTION. + tVec_d : TYPE + DESCRIPTION. + chi : TYPE + DESCRIPTION. + tVec_s : TYPE + DESCRIPTION. + + """ + # extract variables for convenience + if isinstance(detector_params, dict): + rMat_d = xfcapi.make_rmat_of_expmap( + np.array(detector_params['detector']['transform']['tilt']) + ) + tVec_d = np.r_[detector_params['detector']['transform']['translation']] + chi = detector_params['oscillation_stage']['chi'] + tVec_s = np.r_[detector_params['oscillation_stage']['translation']] + else: + assert len( + detector_params >= 10 + ), "list of detector parameters must have length >= 10" + rMat_d = xfcapi.make_rmat_of_expmap(detector_params[:3]) + tVec_d = np.ascontiguousarray(detector_params[3:6]) + chi = detector_params[6] + tVec_s = np.ascontiguousarray(detector_params[7:10]) + return rMat_d, tVec_d, chi, tVec_s diff --git a/hexrd/fitting/calibration/calibrator.py b/hexrd/powder/fitting/calibration/calibrator.py similarity index 100% rename from hexrd/fitting/calibration/calibrator.py rename to hexrd/powder/fitting/calibration/calibrator.py diff --git a/hexrd/fitting/calibration/instrument.py b/hexrd/powder/fitting/calibration/instrument.py similarity index 100% rename from hexrd/fitting/calibration/instrument.py rename to hexrd/powder/fitting/calibration/instrument.py diff --git a/hexrd/fitting/calibration/lmfit_param_handling.py b/hexrd/powder/fitting/calibration/lmfit_param_handling.py similarity index 100% rename from hexrd/fitting/calibration/lmfit_param_handling.py rename to hexrd/powder/fitting/calibration/lmfit_param_handling.py diff --git a/hexrd/fitting/calibration/powder.py b/hexrd/powder/fitting/calibration/powder.py similarity index 100% rename from hexrd/fitting/calibration/powder.py rename to hexrd/powder/fitting/calibration/powder.py diff --git a/hexrd/fitting/calibration/structureless.py b/hexrd/powder/fitting/calibration/structureless.py similarity index 100% rename from hexrd/fitting/calibration/structureless.py rename to hexrd/powder/fitting/calibration/structureless.py diff --git a/hexrd/powder/instrument/detector.py b/hexrd/powder/instrument/detector.py new file mode 100644 index 000000000..db4f95d1a --- /dev/null +++ b/hexrd/powder/instrument/detector.py @@ -0,0 +1,2086 @@ +from abc import abstractmethod +import copy +import os +from typing import Optional + +from hexrd.instrument.constants import ( + COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +) +from hexrd.instrument.physics_package import AbstractPhysicsPackage +import numpy as np +import numba + +from hexrd import constants as ct +from hexrd import distortion as distortion_pkg +from hexrd import matrixutil as mutil +from hexrd import xrdutil +from hexrd.rotations import mapAngle + +from hexrd.material import crystallography +from hexrd.material.crystallography import PlaneData + +from hexrd.transforms.xfcapi import ( + xy_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_rmat_of_expmap, + oscill_angles_of_hkls, + angles_to_dvec, +) + +from hexrd.utils.decorators import memoize +from hexrd.gridutil import cellIndices +from hexrd.instrument import detector_coatings +from hexrd.material.utils import ( + calculate_linear_absorption_length, + calculate_incoherent_scattering) + +distortion_registry = distortion_pkg.Registry() + +max_workers_DFLT = max(1, os.cpu_count() - 1) + +beam_energy_DFLT = 65.351 + +# Memoize these, so each detector can avoid re-computing if nothing +# has changed. +_lorentz_factor = memoize(crystallography.lorentz_factor) +_polarization_factor = memoize(crystallography.polarization_factor) + + +class Detector: + """ + Base class for 2D detectors with functions and properties + common to planar and cylindrical detectors. This class + will be inherited by both those classes. + """ + + __pixelPitchUnit = 'mm' + + # Abstract methods that must be redefined in derived classes + @property + @abstractmethod + def detector_type(self): + raise NotImplementedError + + @abstractmethod + def cart_to_angles( + self, + xy_data, + rmat_s=None, + tvec_s=None, + tvec_c=None, + apply_distortion=False, + ): + """ + Transform cartesian coordinates to angular. + + Parameters + ---------- + xy_data : TYPE + The (n, 2) array of n (x, y) coordinates to be transformed in + either the raw or ideal cartesian plane (see `apply_distortion` + kwarg below). + rmat_s : array_like, optional + The (3, 3) COB matrix for the sample frame. The default is None. + tvec_s : array_like, optional + The (3, ) translation vector for the sample frame. + The default is None. + tvec_c : array_like, optional + The (3, ) translation vector for the crystal frame. + The default is None. + apply_distortion : bool, optional + If True, apply distortion to the inpout cartesian coordinates. + The default is False. + + Returns + ------- + tth_eta : TYPE + DESCRIPTION. + g_vec : TYPE + DESCRIPTION. + + """ + raise NotImplementedError + + @abstractmethod + def angles_to_cart( + self, + tth_eta, + rmat_s=None, + tvec_s=None, + rmat_c=None, + tvec_c=None, + apply_distortion=False, + ): + """ + Transform angular coordinates to cartesian. + + Parameters + ---------- + tth_eta : array_like + The (n, 2) array of n (tth, eta) coordinates to be transformed. + rmat_s : array_like, optional + The (3, 3) COB matrix for the sample frame. The default is None. + tvec_s : array_like, optional + The (3, ) translation vector for the sample frame. + The default is None. + rmat_c : array_like, optional + (3, 3) COB matrix for the crystal frame. + The default is None. + tvec_c : array_like, optional + The (3, ) translation vector for the crystal frame. + The default is None. + apply_distortion : bool, optional + If True, apply distortion to take cartesian coordinates to the + "warped" configuration. The default is False. + + Returns + ------- + xy_det : array_like + The (n, 2) array on the n input coordinates in the . + + """ + raise NotImplementedError + + @abstractmethod + def cart_to_dvecs(self, xy_data): + """Convert cartesian coordinates to dvectors""" + raise NotImplementedError + + @abstractmethod + def pixel_angles(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def pixel_tth_gradient(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def pixel_eta_gradient(self, origin=ct.zeros_3): + raise NotImplementedError + + @abstractmethod + def calc_filter_coating_transmission(self, energy): + pass + + @property + @abstractmethod + def beam_position(self): + """ + returns the coordinates of the beam in the cartesian detector + frame {Xd, Yd, Zd}. NaNs if no intersection. + """ + raise NotImplementedError + + @property + def extra_config_kwargs(self): + return {} + + # End of abstract methods + + def __init__( + self, + rows=2048, + cols=2048, + pixel_size=(0.2, 0.2), + tvec=np.r_[0.0, 0.0, -1000.0], + tilt=ct.zeros_3, + name='default', + bvec=ct.beam_vec, + xrs_dist=None, + evec=ct.eta_vec, + saturation_level=None, + panel_buffer=None, + tth_distortion=None, + roi=None, + group=None, + distortion=None, + max_workers=max_workers_DFLT, + detector_filter: Optional[detector_coatings.Filter] = None, + detector_coating: Optional[detector_coatings.Coating] = None, + phosphor: Optional[detector_coatings.Phosphor] = None, + ): + """ + Instantiate a PlanarDetector object. + + Parameters + ---------- + rows : TYPE, optional + DESCRIPTION. The default is 2048. + cols : TYPE, optional + DESCRIPTION. The default is 2048. + pixel_size : TYPE, optional + DESCRIPTION. The default is (0.2, 0.2). + tvec : TYPE, optional + DESCRIPTION. The default is np.r_[0., 0., -1000.]. + tilt : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + name : TYPE, optional + DESCRIPTION. The default is 'default'. + bvec : TYPE, optional + DESCRIPTION. The default is ct.beam_vec. + evec : TYPE, optional + DESCRIPTION. The default is ct.eta_vec. + saturation_level : TYPE, optional + DESCRIPTION. The default is None. + panel_buffer : TYPE, optional + If a scalar or len(2) array_like, the interpretation is a border + in mm. If an array with shape (nrows, ncols), interpretation is a + boolean with True marking valid pixels. The default is None. + roi : TYPE, optional + DESCRIPTION. The default is None. + group : TYPE, optional + DESCRIPTION. The default is None. + distortion : TYPE, optional + DESCRIPTION. The default is None. + detector_filter : detector_coatings.Filter, optional + filter specifications including material type, + density and thickness. Used for absorption correction + calculations. + detector_coating : detector_coatings.Coating, optional + coating specifications including material type, + density and thickness. Used for absorption correction + calculations. + phosphor : detector_coatings.Phosphor, optional + phosphor specifications including material type, + density and thickness. Used for absorption correction + calculations. + + Returns + ------- + None. + + """ + self._name = name + + self._rows = rows + self._cols = cols + + self._pixel_size_row = pixel_size[0] + self._pixel_size_col = pixel_size[1] + + self._saturation_level = saturation_level + + self._panel_buffer = panel_buffer + + self._tth_distortion = tth_distortion + + if roi is None: + self._roi = roi + else: + assert len(roi) == 2, "roi is set via (start_row, start_col)" + self._roi = ( + (roi[0], roi[0] + self._rows), + (roi[1], roi[1] + self._cols), + ) + + self._tvec = np.array(tvec).flatten() + self._tilt = np.array(tilt).flatten() + + self._bvec = np.array(bvec).flatten() + self._xrs_dist = xrs_dist + + self._evec = np.array(evec).flatten() + + self._distortion = distortion + + self.max_workers = max_workers + + self.group = group + + if detector_filter is None: + detector_filter = detector_coatings.Filter( + **FILTER_DEFAULTS.TARDIS) + self.filter = detector_filter + + if detector_coating is None: + detector_coating = detector_coatings.Coating(**COATING_DEFAULT) + self.coating = detector_coating + + if phosphor is None: + phosphor = detector_coatings.Phosphor(**PHOSPHOR_DEFAULT) + self.phosphor = phosphor + + # detector ID + @property + def name(self): + return self._name + + @name.setter + def name(self, s): + assert isinstance(s, str), "requires string input" + self._name = s + + @property + def lmfit_name(self): + # lmfit requires underscores instead of dashes + return self.name.replace('-', '_') + + # properties for physical size of rectangular detector + @property + def rows(self): + return self._rows + + @rows.setter + def rows(self, x): + assert isinstance(x, int) + self._rows = x + + @property + def cols(self): + return self._cols + + @cols.setter + def cols(self, x): + assert isinstance(x, int) + self._cols = x + + @property + def pixel_size_row(self): + return self._pixel_size_row + + @pixel_size_row.setter + def pixel_size_row(self, x): + self._pixel_size_row = float(x) + + @property + def pixel_size_col(self): + return self._pixel_size_col + + @pixel_size_col.setter + def pixel_size_col(self, x): + self._pixel_size_col = float(x) + + @property + def pixel_area(self): + return self.pixel_size_row * self.pixel_size_col + + @property + def saturation_level(self): + return self._saturation_level + + @saturation_level.setter + def saturation_level(self, x): + if x is not None: + assert np.isreal(x) + self._saturation_level = x + + @property + def panel_buffer(self): + return self._panel_buffer + + @panel_buffer.setter + def panel_buffer(self, x): + """if not None, a buffer in mm (x, y)""" + if x is not None: + assert len(x) == 2 or x.ndim == 2 + self._panel_buffer = x + + @property + def tth_distortion(self): + return self._tth_distortion + + @tth_distortion.setter + def tth_distortion(self, x): + """if not None, a buffer in mm (x, y)""" + if x is not None: + assert x.ndim == 2 and x.shape == self.shape + self._tth_distortion = x + + @property + def roi(self): + return self._roi + + @roi.setter + def roi(self, vertex_array): + """ + !!! vertex array must be (r0, c0) + """ + if vertex_array is not None: + assert ( + len(vertex_array) == 2 + ), "roi is set via (start_row, start_col)" + self._roi = ( + (vertex_array[0], vertex_array[0] + self.rows), + (vertex_array[1], vertex_array[1] + self.cols), + ) + + @property + def row_dim(self): + return self.rows * self.pixel_size_row + + @property + def col_dim(self): + return self.cols * self.pixel_size_col + + @property + def row_pixel_vec(self): + return self.pixel_size_row * ( + 0.5 * (self.rows - 1) - np.arange(self.rows) + ) + + @property + def row_edge_vec(self): + return _row_edge_vec(self.rows, self.pixel_size_row) + + @property + def col_pixel_vec(self): + return self.pixel_size_col * ( + np.arange(self.cols) - 0.5 * (self.cols - 1) + ) + + @property + def col_edge_vec(self): + return _col_edge_vec(self.cols, self.pixel_size_col) + + @property + def corner_ul(self): + return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] + + @property + def corner_ll(self): + return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim] + + @property + def corner_lr(self): + return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim] + + @property + def corner_ur(self): + return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] + + @property + def shape(self): + return (self.rows, self.cols) + + @property + def tvec(self): + return self._tvec + + @tvec.setter + def tvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3, 'input must have length = 3' + self._tvec = x + + @property + def tilt(self): + return self._tilt + + @tilt.setter + def tilt(self, x): + assert len(x) == 3, 'input must have length = 3' + self._tilt = np.array(x).squeeze() + + @property + def bvec(self): + return self._bvec + + @bvec.setter + def bvec(self, x): + x = np.array(x).flatten() + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' + self._bvec = x + + @property + def xrs_dist(self): + return self._xrs_dist + + @xrs_dist.setter + def xrs_dist(self, x): + assert x is None or np.isscalar( + x + ), f"'source_distance' must be None or scalar; you input '{x}'" + self._xrs_dist = x + + @property + def evec(self): + return self._evec + + @evec.setter + def evec(self, x): + x = np.array(x).flatten() + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' + self._evec = x + + @property + def distortion(self): + return self._distortion + + @distortion.setter + def distortion(self, x): + if x is not None: + registry = distortion_registry.distortion_registry + check_arg = np.zeros(len(registry), dtype=bool) + for i, dcls in enumerate(registry.values()): + check_arg[i] = isinstance(x, dcls) + assert np.any(check_arg), 'input distortion is not in registry!' + self._distortion = x + + @property + def rmat(self): + return make_rmat_of_expmap(self.tilt) + + @property + def normal(self): + return self.rmat[:, 2] + + # ...memoize??? + @property + def pixel_coords(self): + pix_i, pix_j = np.meshgrid( + self.row_pixel_vec, self.col_pixel_vec, indexing='ij' + ) + return pix_i, pix_j + + # ========================================================================= + # METHODS + # ========================================================================= + + def pixel_Q(self, energy: np.floating, + origin: np.ndarray = ct.zeros_3) -> np.ndarray: + '''get the equivalent momentum transfer + for the angles. + + Parameters + ---------- + energy: float + incident photon energy in keV + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise Q in A^-1 + + ''' + lam = ct.keVToAngstrom(energy) + tth, _ = self.pixel_angles(origin=origin) + return 4.*np.pi*np.sin(tth*0.5)/lam + + def pixel_compton_energy_loss( + self, + energy: np.floating, + origin: np.ndarray = ct.zeros_3, + ) -> np.ndarray: + '''inelastic compton scattering leads + to energy loss of the incident photons. + compute the final energy of the photons + for each pixel. + + Parameters + ---------- + energy: float + incident photon energy in keV + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise energy of inelastically + scatterd photons in keV + ''' + energy = np.asarray(energy) + tth, _ = self.pixel_angles() + ang_fact = (1 - np.cos(tth)) + beta = energy/ct.cRestmasskeV + return energy/(1 + beta*ang_fact) + + def pixel_compton_attenuation_length( + self, + energy: np.floating, + density: np.floating, + formula: str, + origin: np.ndarray = ct.zeros_3, + ) -> np.ndarray: + '''each pixel intercepts inelastically + scattered photons of different energy. + the attenuation length and the transmission + for these photons are different. this function + calculate attenuatin length for each pixel + on the detector. + + Parameters + ---------- + energy: float + incident photon energy in keV + density: float + density of material in g/cc + formula: str + formula of the material scattering + origin: np.ndarray + origin of diffraction volume + + Returns + ------- + np.ndarray + pixel wise attentuation length of compton + scattered photons + ''' + pixel_energy = self.pixel_compton_energy_loss(energy) + + pixel_attenuation_length = calculate_linear_absorption_length( + density, + formula, + pixel_energy.flatten(), + ) + return pixel_attenuation_length.reshape(self.shape) + + def compute_compton_scattering_intensity( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + origin: np.array = ct.zeros_3, + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + + ''' compute the theoretical compton scattering + signal on the detector. this value is corrected + for the transmission of compton scattered photons + and normlaized before getting subtracting from the + raw intensity + + Parameters + ----------- + energy: float + energy of incident photon + rMat_s: np.ndarray + rotation matrix of sample orientation + physics_package: AbstractPhysicsPackage + physics package information + Returns + ------- + compton_intensity: np.ndarray + transmission corrected compton scattering + intensity + ''' + + q = self.pixel_Q(energy) + inc_s = calculate_incoherent_scattering( + physics_package.sample_material, + q.flatten()).reshape(self.shape) + + inc_w = calculate_incoherent_scattering( + physics_package.window_material, + q.flatten()).reshape(self.shape) + + t_s = self.calc_compton_physics_package_transmission( + energy, rMat_s, physics_package) + + t_w = self.calc_compton_window_transmission( + energy, rMat_s, physics_package) + + return inc_s * t_s + inc_w * t_w, t_s, t_w + + def polarization_factor(self, f_hor, f_vert, unpolarized=False): + """ + Calculated the polarization factor for every pixel. + + Parameters + ---------- + f_hor : float + the fraction of horizontal polarization. for XFELs + this is close to 1. + f_vert : TYPE + the fraction of vertical polarization, which is ~0 for XFELs. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + s = f_hor + f_vert + if np.abs(s - 1) > ct.sqrt_epsf: + msg = ( + "sum of fraction of " + "horizontal and vertical polarizations " + "must be equal to 1." + ) + raise RuntimeError(msg) + + if f_hor < 0 or f_vert < 0: + msg = ( + "fraction of polarization in horizontal " + "or vertical directions can't be negative." + ) + raise RuntimeError(msg) + + tth, eta = self.pixel_angles() + kwargs = { + 'tth': tth, + 'eta': eta, + 'f_hor': f_hor, + 'f_vert': f_vert, + 'unpolarized': unpolarized, + } + + return _polarization_factor(**kwargs) + + def lorentz_factor(self): + """ + calculate the lorentz factor for every pixel + + Parameters + ---------- + None + + Raises + ------ + None + + Returns + ------- + numpy.ndarray + returns an array the same size as the detector panel + with each element containg the lorentz factor of the + corresponding pixel + """ + tth, eta = self.pixel_angles() + return _lorentz_factor(tth) + + def config_dict( + self, + chi=0, + tvec=ct.zeros_3, + beam_energy=beam_energy_DFLT, + beam_vector=ct.beam_vec, + sat_level=None, + panel_buffer=None, + style='yaml', + ): + """ + Return a dictionary of detector parameters. + + Optional instrument level parameters. This is a convenience function + to work with the APIs in several functions in xrdutil. + + Parameters + ---------- + chi : float, optional + DESCRIPTION. The default is 0. + tvec : array_like (3,), optional + DESCRIPTION. The default is ct.zeros_3. + beam_energy : float, optional + DESCRIPTION. The default is beam_energy_DFLT. + beam_vector : aray_like (3,), optional + DESCRIPTION. The default is ct.beam_vec. + sat_level : scalar, optional + DESCRIPTION. The default is None. + panel_buffer : scalar, array_like (2,), optional + DESCRIPTION. The default is None. + + Returns + ------- + config_dict : dict + DESCRIPTION. + + """ + assert style.lower() in ['yaml', 'hdf5'], ( + "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) + + config_dict = {} + + # ===================================================================== + # DETECTOR PARAMETERS + # ===================================================================== + # transform and pixels + # + # assign local vars; listify if necessary + tilt = self.tilt + translation = self.tvec + roi = ( + None + if self.roi is None + else np.array([self.roi[0][0], self.roi[1][0]]).flatten() + ) + if style.lower() == 'yaml': + tilt = tilt.tolist() + translation = translation.tolist() + tvec = tvec.tolist() + roi = None if roi is None else roi.tolist() + + det_dict = dict( + detector_type=self.detector_type, + transform=dict( + tilt=tilt, + translation=translation, + ), + pixels=dict( + rows=int(self.rows), + columns=int(self.cols), + size=[float(self.pixel_size_row), float(self.pixel_size_col)], + ), + ) + + if roi is not None: + # Only add roi if it is not None + det_dict['pixels']['roi'] = roi + + if self.group is not None: + # Only add group if it is not None + det_dict['group'] = self.group + + # distortion + if self.distortion is not None: + dparams = self.distortion.params + if style.lower() == 'yaml': + dparams = dparams.tolist() + dist_d = dict( + function_name=self.distortion.maptype, parameters=dparams + ) + det_dict['distortion'] = dist_d + + # saturation level + if sat_level is None: + sat_level = self.saturation_level + det_dict['saturation_level'] = float(sat_level) + + # panel buffer + if panel_buffer is None: + # could be none, a 2-element list, or a 2-d array (rows, cols) + panel_buffer = copy.deepcopy(self.panel_buffer) + # !!! now we have to do some style-dependent munging of panel_buffer + if isinstance(panel_buffer, np.ndarray): + if panel_buffer.ndim == 1: + assert len(panel_buffer) == 2, "length of 1-d buffer must be 2" + # if here is a 2-element array + if style.lower() == 'yaml': + panel_buffer = panel_buffer.tolist() + elif panel_buffer.ndim == 2: + if style.lower() == 'yaml': + # !!! can't practically write array-like buffers to YAML + # so forced to clobber + print("clobbering panel buffer array in yaml-ready output") + panel_buffer = [0.0, 0.0] + else: + raise RuntimeError( + "panel buffer ndim must be 1 or 2; you specified %d" + % panel_buffer.ndmin + ) + elif panel_buffer is None: + # still None on self + # !!! this gets handled by unwrap_dict_to_h5 now + + # if style.lower() == 'hdf5': + # # !!! can't write None to hdf5; substitute with zeros + # panel_buffer = np.r_[0., 0.] + pass + det_dict['buffer'] = panel_buffer + + det_dict.update(self.extra_config_kwargs) + + # ===================================================================== + # SAMPLE STAGE PARAMETERS + # ===================================================================== + stage_dict = dict(chi=chi, translation=tvec) + + # ===================================================================== + # BEAM PARAMETERS + # ===================================================================== + # !!! make_reflection_patches is still using the vector + # azim, pola = calc_angles_from_beam_vec(beam_vector) + # beam_dict = dict( + # energy=beam_energy, + # vector=dict( + # azimuth=azim, + # polar_angle=pola + # ) + # ) + beam_dict = dict(energy=beam_energy, vector=beam_vector) + + config_dict['detector'] = det_dict + config_dict['oscillation_stage'] = stage_dict + config_dict['beam'] = beam_dict + + return config_dict + + def cartToPixel(self, xy_det, pixels=False, apply_distortion=False): + """ + Coverts cartesian coordinates to pixel coordinates + + Parameters + ---------- + xy_det : array_like + The (n, 2) vstacked array of (x, y) pairs in the reference + cartesian frame (possibly subject to distortion). + pixels : bool, optional + If True, return discrete pixel indices; otherwise fractional pixel + coordinates are returned. The default is False. + apply_distortion : bool, optional + If True, apply self.distortion to the input (if applicable). + The default is False. + + Returns + ------- + ij_det : array_like + The (n, 2) array of vstacked (i, j) coordinates in the pixel + reference frame where i is the (slow) row dimension and j is the + (fast) column dimension. + + """ + xy_det = np.atleast_2d(xy_det) + if apply_distortion and self.distortion is not None: + xy_det = self.distortion.apply(xy_det) + + npts = len(xy_det) + + tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1)) + i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5 + j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5 + + ij_det = np.vstack([i_pix, j_pix]).T + if pixels: + # Hide any runtime warnings in this conversion. Their output values + # will certainly be off the detector, which is fine. + with np.errstate(invalid='ignore'): + ij_det = np.array(np.round(ij_det), dtype=int) + + return ij_det + + def pixelToCart(self, ij_det): + """ + Convert vstacked array or list of [i,j] pixel indices + (or UL corner-based points) and convert to (x,y) in the + cartesian frame {Xd, Yd, Zd} + """ + ij_det = np.atleast_2d(ij_det) + + x = (ij_det[:, 1] + 0.5) * self.pixel_size_col + self.corner_ll[0] + y = ( + self.rows - ij_det[:, 0] - 0.5 + ) * self.pixel_size_row + self.corner_ll[1] + return np.vstack([x, y]).T + + def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): + """ + Notes + ----- + !!! assumes xy are in raw (distorted) frame, if applicable + """ + # munge kwargs + if rMat_s is None: + rMat_s = ct.identity_3x3 + if tVec_s is None: + tVec_s = ct.zeros_3x1 + if tVec_c is None: + tVec_c = ct.zeros_3x1 + + # FIXME: perhaps not necessary, but safe... + xy = np.atleast_2d(xy) + + ''' + # --------------------------------------------------------------------- + # TODO: needs testing and memoized gradient arrays! + # --------------------------------------------------------------------- + # need origin arg + origin = np.dot(rMat_s, tVec_c).flatten() + tVec_s.flatten() + + # get pixel indices + i_crds = cellIndices(self.row_edge_vec, xy[:, 1]) + j_crds = cellIndices(self.col_edge_vec, xy[:, 0]) + + ptth_grad = self.pixel_tth_gradient(origin=origin)[i_crds, j_crds] + peta_grad = self.pixel_eta_gradient(origin=origin)[i_crds, j_crds] + + return np.vstack([ptth_grad, peta_grad]).T + ''' + # call xrdutil function + ang_ps = xrdutil.angularPixelSize( + xy, + (self.pixel_size_row, self.pixel_size_col), + self.rmat, + rMat_s, + self.tvec, + tVec_s, + tVec_c, + distortion=self.distortion, + beamVec=self.bvec, + etaVec=self.evec, + ) + return ang_ps + + def clip_to_panel(self, xy, buffer_edges=True): + """ + if self.roi is not None, uses it by default + + TODO: check if need shape kwarg + TODO: optimize ROI search better than list comprehension below + TODO: panel_buffer can be a 2-d boolean mask, but needs testing + + """ + xy = np.atleast_2d(xy) + + ''' + # !!! THIS LOGIC IS OBSOLETE + if self.roi is not None: + ij_crds = self.cartToPixel(xy, pixels=True) + ii, jj = polygon(self.roi[:, 0], self.roi[:, 1], + shape=(self.rows, self.cols)) + on_panel_rows = [i in ii for i in ij_crds[:, 0]] + on_panel_cols = [j in jj for j in ij_crds[:, 1]] + on_panel = np.logical_and(on_panel_rows, on_panel_cols) + else: + ''' + xlim = 0.5 * self.col_dim + ylim = 0.5 * self.row_dim + if buffer_edges and self.panel_buffer is not None: + if self.panel_buffer.ndim == 2: + pix = self.cartToPixel(xy, pixels=True) + + roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows) + coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols) + + idx = np.logical_or(roff, coff) + + on_panel = np.full(pix.shape[0], False) + valid_pix = pix[~idx, :] + on_panel[~idx] = self.panel_buffer[ + valid_pix[:, 0], valid_pix[:, 1] + ] + else: + xlim -= self.panel_buffer[0] + ylim -= self.panel_buffer[1] + on_panel_x = np.logical_and( + xy[:, 0] >= -xlim, xy[:, 0] <= xlim + ) + on_panel_y = np.logical_and( + xy[:, 1] >= -ylim, xy[:, 1] <= ylim + ) + on_panel = np.logical_and(on_panel_x, on_panel_y) + elif not buffer_edges or self.panel_buffer is None: + on_panel_x = np.logical_and(xy[:, 0] >= -xlim, xy[:, 0] <= xlim) + on_panel_y = np.logical_and(xy[:, 1] >= -ylim, xy[:, 1] <= ylim) + on_panel = np.logical_and(on_panel_x, on_panel_y) + return xy[on_panel, :], on_panel + + def interpolate_nearest(self, xy, img, pad_with_nans=True): + """ + TODO: revisit normalization in here? + + """ + is_2d = img.ndim == 2 + right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) + + # initialize output with nans + if pad_with_nans: + int_xy = np.nan * np.ones(len(xy)) + else: + int_xy = np.zeros(len(xy)) + + # clip away points too close to or off the edges of the detector + xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) + + # get pixel indices of clipped points + i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) + j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) + + # next interpolate across cols + int_vals = img[i_src, j_src] + int_xy[on_panel] = int_vals + return int_xy + + def interpolate_bilinear(self, xy, img, pad_with_nans=True, + clip_to_panel=True, + on_panel: Optional[np.ndarray] = None): + """ + Interpolate an image array at the specified cartesian points. + + Parameters + ---------- + xy : array_like, (n, 2) + Array of cartesian coordinates in the image plane at which + to evaluate intensity. + img : array_like + 2-dimensional image array. + pad_with_nans : bool, optional + Toggle for assigning NaN to points that fall off the detector. + The default is True. + on_panel : np.ndarray, optional + If you want to skip clip_to_panel() for performance reasons, + just provide an array of which pixels are on the panel. + + Returns + ------- + int_xy : array_like, (n,) + The array of interpolated intensities at each of the n input + coordinates. + + Notes + ----- + TODO: revisit normalization in here? + """ + + is_2d = img.ndim == 2 + right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) + + # initialize output with nans + if pad_with_nans: + int_xy = np.nan * np.ones(len(xy)) + else: + int_xy = np.zeros(len(xy)) + + if on_panel is None: + # clip away points too close to or off the edges of the detector + xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) + else: + xy_clip = xy[on_panel] + + # grab fractional pixel indices of clipped points + ij_frac = self.cartToPixel(xy_clip) + + # get floors/ceils from array of pixel _centers_ + # and fix indices running off the pixel centers + # !!! notice we already clipped points to the panel! + i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) + i_floor_img = _fix_indices(i_floor, 0, self.rows - 1) + + j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) + j_floor_img = _fix_indices(j_floor, 0, self.cols - 1) + + # ceilings from floors + i_ceil = i_floor + 1 + i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1) + + j_ceil = j_floor + 1 + j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) + + # first interpolate at top/bottom rows + row_floor_int = (j_ceil - ij_frac[:, 1]) * img[ + i_floor_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_floor_img, j_ceil_img] + row_ceil_int = (j_ceil - ij_frac[:, 1]) * img[ + i_ceil_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_ceil_img, j_ceil_img] + + # next interpolate across cols + int_vals = (i_ceil - ij_frac[:, 0]) * row_floor_int + ( + ij_frac[:, 0] - i_floor + ) * row_ceil_int + int_xy[on_panel] = int_vals + return int_xy + + def make_powder_rings( + self, + pd, + merge_hkls=False, + delta_tth=None, + delta_eta=10.0, + eta_period=None, + eta_list=None, + rmat_s=ct.identity_3x3, + tvec_s=ct.zeros_3, + tvec_c=ct.zeros_3, + full_output=False, + tth_distortion=None, + ): + """ + Generate points on Debye_Scherrer rings over the detector. + + !!! it is assuming that rmat_s is built from (chi, ome) as it the case + for HEDM! + + Parameters + ---------- + pd : TYPE + DESCRIPTION. + merge_hkls : TYPE, optional + DESCRIPTION. The default is False. + delta_tth : TYPE, optional + DESCRIPTION. The default is None. + delta_eta : TYPE, optional + DESCRIPTION. The default is 10.. + eta_period : TYPE, optional + DESCRIPTION. The default is None. + eta_list : TYPE, optional + DESCRIPTION. The default is None. + rmat_s : TYPE, optional + DESCRIPTION. The default is ct.identity_3x3. + tvec_s : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + tvec_c : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + full_output : TYPE, optional + DESCRIPTION. The default is False. + tth_distortion : special class, optional + Special distortion class. The default is None. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + if tth_distortion is not None: + tnorms = mutil.rowNorm(np.vstack([tvec_s, tvec_c])) + assert ( + np.all(tnorms) < ct.sqrt_epsf + ), "If using distrotion function, translations must be zero" + + # in case you want to give it tth angles directly + if isinstance(pd, PlaneData): + pd = PlaneData(None, pd) + if delta_tth is not None: + pd.tThWidth = np.radians(delta_tth) + else: + delta_tth = np.degrees(pd.tThWidth) + + # !!! conversions, meh... + del_eta = np.radians(delta_eta) + + # do merging if asked + if merge_hkls: + _, tth_ranges = pd.getMergedRanges(cullDupl=True) + tth = np.average(tth_ranges, axis=1) + else: + tth_ranges = pd.getTThRanges() + tth = pd.getTTh() + tth_pm = tth_ranges - np.tile(tth, (2, 1)).T + sector_vertices = np.vstack( + [ + [ + i[0], + -del_eta, + i[0], + del_eta, + i[1], + del_eta, + i[1], + -del_eta, + 0.0, + 0.0, + ] + for i in tth_pm + ] + ) + else: + # Okay, we have a array-like tth specification + tth = np.array(pd).flatten() + if delta_tth is None: + raise RuntimeError( + "If supplying a 2theta list as first arg, " + + "must supply a delta_tth" + ) + tth_pm = 0.5 * delta_tth * np.r_[-1.0, 1.0] + tth_ranges = np.radians([i + tth_pm for i in tth]) # !!! units + sector_vertices = np.tile( + 0.5 + * np.radians( + [ + -delta_tth, + -delta_eta, + -delta_tth, + delta_eta, + delta_tth, + delta_eta, + delta_tth, + -delta_eta, + 0.0, + 0.0, + ] + ), + (len(tth), 1), + ) + # !! conversions, meh... + tth = np.radians(tth) + del_eta = np.radians(delta_eta) + + # for generating rings, make eta vector in correct period + if eta_period is None: + eta_period = (-np.pi, np.pi) + + if eta_list is None: + neta = int(360.0 / float(delta_eta)) + # this is the vector of ETA EDGES + eta_edges = mapAngle( + np.radians(delta_eta * np.linspace(0.0, neta, num=neta + 1)) + + eta_period[0], + eta_period, + ) + + # get eta bin centers from edges + """ + # !!! this way is probably overkill, since we have delta eta + eta_centers = np.average( + np.vstack([eta[:-1], eta[1:]), + axis=0) + """ + # !!! should be safe as eta_edges are monotonic + eta_centers = eta_edges[:-1] + 0.5 * del_eta + else: + eta_centers = np.radians(eta_list).flatten() + neta = len(eta_centers) + eta_edges = ( + np.tile(eta_centers, (2, 1)) + + np.tile(0.5 * del_eta * np.r_[-1, 1], (neta, 1)).T + ).T.flatten() + + # get chi and ome from rmat_s + # !!! API ambiguity + # !!! this assumes rmat_s was made from the composition + # !!! rmat_s = R(Xl, chi) * R(Yl, ome) + ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) + + # make list of angle tuples + angs = [ + np.vstack([i * np.ones(neta), eta_centers, ome * np.ones(neta)]) + for i in tth + ] + + # need xy coords and pixel sizes + valid_ang = [] + valid_xy = [] + map_indices = [] + npp = 5 # [ll, ul, ur, lr, center] + for i_ring in range(len(angs)): + # expand angles to patch vertices + these_angs = angs[i_ring].T + + # push to vertices to see who falls off + # FIXME: clipping is not checking if masked regions are on the + # patch interior + patch_vertices = ( + np.tile(these_angs[:, :2], (1, npp)) + + np.tile(sector_vertices[i_ring], (neta, 1)) + ).reshape(npp * neta, 2) + + # find vertices that all fall on the panel + # !!! not API ambiguity regarding rmat_s above + all_xy = self.angles_to_cart( + patch_vertices, + rmat_s=rmat_s, + tvec_s=tvec_s, + rmat_c=None, + tvec_c=tvec_c, + apply_distortion=True, + ) + + _, on_panel = self.clip_to_panel(all_xy) + + # all vertices must be on... + + patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) + patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] + + # !!! Have to apply after clipping, distortion can get wonky near + # the edeg of the panel, and it is assumed to be <~1 deg + # !!! The tth_ranges are NOT correct! + if tth_distortion is not None: + patch_valid_angs = tth_distortion.apply( + self.angles_to_cart(these_angs[patch_is_on, :2]), + return_nominal=True, + ) + patch_valid_xys = self.angles_to_cart( + patch_valid_angs, apply_distortion=True + ) + else: + patch_valid_angs = these_angs[patch_is_on, :2] + patch_valid_xys = patch_xys[:, -1, :].squeeze() + + # form output arrays + valid_ang.append(patch_valid_angs) + valid_xy.append(patch_valid_xys) + map_indices.append(patch_is_on) + # ??? is this option necessary? + if full_output: + return valid_ang, valid_xy, tth_ranges, map_indices, eta_edges + else: + return valid_ang, valid_xy, tth_ranges + + def map_to_plane(self, pts, rmat, tvec): + """ + Map detctor points to specified plane. + + Parameters + ---------- + pts : TYPE + DESCRIPTION. + rmat : TYPE + DESCRIPTION. + tvec : TYPE + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + Notes + ----- + by convention: + + n * (u*pts_l - tvec) = 0 + + [pts]_l = rmat*[pts]_m + tvec + + """ + # arg munging + pts = np.atleast_2d(pts) + npts = len(pts) + + # map plane normal & translation vector, LAB FRAME + nvec_map_lab = rmat[:, 2].reshape(3, 1) + tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1) + tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1) + + # put pts as 3-d in panel CS and transform to 3-d lab coords + pts_det = np.hstack([pts, np.zeros((npts, 1))]) + pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab + + # scaling along pts vectors to hit map plane + u = np.dot(nvec_map_lab.T, tvec_map_lab) / np.dot( + nvec_map_lab.T, pts_lab + ) + + # pts on map plane, in LAB FRAME + pts_map_lab = np.tile(u, (3, 1)) * pts_lab + + return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T + + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + chi=0.0, + tVec_s=ct.zeros_3, + wavelength=None, + ): + """ + Simulate a monochromatic rotation series for a list of grains. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_param_list : TYPE + DESCRIPTION. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + chi : TYPE, optional + DESCRIPTION. The default is 0.. + tVec_s : TYPE, optional + DESCRIPTION. The default is ct.zeros_3. + wavelength : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + valid_ids : TYPE + DESCRIPTION. + valid_hkls : TYPE + DESCRIPTION. + valid_angs : TYPE + DESCRIPTION. + valid_xys : TYPE + DESCRIPTION. + ang_pixel_size : TYPE + DESCRIPTION. + + """ + # grab B-matrix from plane data + bMat = plane_data.latVecOps['B'] + + # reconcile wavelength + # * added sanity check on exclusions here; possible to + # * make some reflections invalid (NaN) + if wavelength is None: + wavelength = plane_data.wavelength + else: + if plane_data.wavelength != wavelength: + plane_data.wavelength = ct.keVToAngstrom(wavelength) + assert not np.any( + np.isnan(plane_data.getTTh()) + ), "plane data exclusions incompatible with wavelength" + + # vstacked G-vector id, h, k, l + full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) + + """ LOOP OVER GRAINS """ + valid_ids = [] + valid_hkls = [] + valid_angs = [] + valid_xys = [] + ang_pixel_size = [] + for gparm in grain_param_list: + + # make useful parameters + rMat_c = make_rmat_of_expmap(gparm[:3]) + tVec_c = gparm[3:6] + vInv_s = gparm[6:] + + # All possible bragg conditions as vstacked [tth, eta, ome] + # for each omega solution + angList = np.vstack( + oscill_angles_of_hkls( + full_hkls[:, 1:], + chi, + rMat_c, + bMat, + wavelength, + v_inv=vInv_s, + beam_vec=self.bvec, + ) + ) + + # filter by eta and omega ranges + # ??? get eta range from detector? + allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( + full_hkls, angList, eta_ranges, ome_ranges + ) + allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) + + # find points that fall on the panel + det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( + allAngs, + self.rmat, + rMat_c, + chi, + self.tvec, + tVec_c, + tVec_s, + self.distortion, + self.bvec, + ) + xys_p, on_panel = self.clip_to_panel(det_xy) + valid_xys.append(xys_p) + + # filter angs and hkls that are on the detector plane + # !!! check this -- seems unnecessary but the results of + # _project_on_detector_plane() can have len < the input? + # the output of _project_on_detector_plane has been modified to + # hand back the index array to remedy this JVB 2020-05-27 + if np.any(~on_plane): + allAngs = np.atleast_2d(allAngs[on_plane, :]) + allHKLs = np.atleast_2d(allHKLs[on_plane, :]) + + # grab hkls and gvec ids for this panel + valid_hkls.append(allHKLs[on_panel, 1:]) + valid_ids.append(allHKLs[on_panel, 0]) + + # reflection angles (voxel centers) and pixel size in (tth, eta) + valid_angs.append(allAngs[on_panel, :]) + ang_pixel_size.append(self.angularPixelSize(xys_p)) + return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size + + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + tvec_s=None, + grain_params=None, + beam_vec=None, + ): + """ """ + if isinstance(crystal_data, PlaneData): + + plane_data = crystal_data + + # grab the expanded list of hkls from plane_data + hkls = np.hstack(plane_data.getSymHKLs()) + + # and the unit plane normals (G-vectors) in CRYSTAL FRAME + gvec_c = np.dot(plane_data.latVecOps['B'], hkls) + + # Filter out g-vectors going in the wrong direction. `gvec_to_xy()` used + # to do this, but not anymore. + to_keep = np.dot(gvec_c.T, self.bvec) <= 0 + + hkls = hkls[:, to_keep] + gvec_c = gvec_c[:, to_keep] + elif len(crystal_data) == 2: + # !!! should clean this up + hkls = np.array(crystal_data[0]) + bmat = crystal_data[1] + gvec_c = np.dot(bmat, hkls) + else: + raise RuntimeError( + f'argument list not understood: {crystal_data=}' + ) + nhkls_tot = hkls.shape[1] + + # parse energy ranges + # TODO: allow for spectrum parsing + multipleEnergyRanges = False + if hasattr(maxEnergy, '__len__'): + assert len(maxEnergy) == len( + minEnergy + ), 'energy cutoff ranges must have the same length' + multipleEnergyRanges = True + lmin = [] + lmax = [] + for i in range(len(maxEnergy)): + lmin.append(ct.keVToAngstrom(maxEnergy[i])) + lmax.append(ct.keVToAngstrom(minEnergy[i])) + else: + lmin = ct.keVToAngstrom(maxEnergy) + lmax = ct.keVToAngstrom(minEnergy) + + # parse grain parameters kwarg + if grain_params is None: + grain_params = np.atleast_2d( + np.hstack([np.zeros(6), ct.identity_6x1]) + ) + n_grains = len(grain_params) + + # sample rotation + if rmat_s is None: + rmat_s = ct.identity_3x3 + + # dummy translation vector... make input + if tvec_s is None: + tvec_s = ct.zeros_3 + + # beam vector + if beam_vec is None: + beam_vec = ct.beam_vec + + # ========================================================================= + # LOOP OVER GRAINS + # ========================================================================= + + # pre-allocate output arrays + xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan * np.ones((n_grains, nhkls_tot)) + energy = np.nan * np.ones((n_grains, nhkls_tot)) + for iG, gp in enumerate(grain_params): + rmat_c = make_rmat_of_expmap(gp[:3]) + tvec_c = gp[3:6].reshape(3, 1) + vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) + + # stretch them: V^(-1) * R * Gc + gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) + ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) + + # project + dpts = gvec_to_xy( + ghat_c_str.T, + self.rmat, + rmat_s, + rmat_c, + self.tvec, + tvec_s, + tvec_c, + beam_vec=beam_vec, + ) + + # check intersections with detector plane + canIntersect = ~np.isnan(dpts[:, 0]) + npts_in = sum(canIntersect) + + if np.any(canIntersect): + dpts = dpts[canIntersect, :].reshape(npts_in, 2) + dhkl = hkls[:, canIntersect].reshape(3, npts_in) + + rmat_b = make_beam_rmat(beam_vec, ct.eta_vec) + # back to angles + tth_eta, gvec_l = xy_to_gvec( + dpts, + self.rmat, + rmat_s, + self.tvec, + tvec_s, + tvec_c, + rmat_b=rmat_b, + ) + tth_eta = np.vstack(tth_eta).T + + # warp measured points + if self.distortion is not None: + dpts = self.distortion.apply_inverse(dpts) + + # plane spacings and energies + dsp = 1.0 / mutil.rowNorm(gvec_s_str[:, canIntersect].T) + wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) + + # clip to detector panel + _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) + + if multipleEnergyRanges: + validEnergy = np.zeros(len(wlen), dtype=bool) + for i in range(len(lmin)): + in_energy_range = np.logical_and( + wlen >= lmin[i], wlen <= lmax[i] + ) + validEnergy = validEnergy | in_energy_range + else: + validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) + + # index for valid reflections + keepers = np.where(np.logical_and(on_panel, validEnergy))[0] + + # assign output arrays + xy_det[iG][keepers, :] = dpts[keepers, :] + hkls_in[iG][:, keepers] = dhkl[:, keepers] + angles[iG][keepers, :] = tth_eta[keepers, :] + dspacing[iG, keepers] = dsp[keepers] + energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) + return xy_det, hkls_in, angles, dspacing, energy + + @staticmethod + def update_memoization_sizes(all_panels): + funcs = [ + _polarization_factor, + _lorentz_factor, + ] + + min_size = len(all_panels) + return Detector.increase_memoization_sizes(funcs, min_size) + + @staticmethod + def increase_memoization_sizes(funcs, min_size): + for f in funcs: + cache_info = f.cache_info() + if cache_info['maxsize'] < min_size: + f.set_cache_maxsize(min_size) + + def calc_physics_package_transmission(self, energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage) -> np.float64: + """get the transmission from the physics package + need to consider HED and HEDM samples separately + """ + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_sample = self.calc_transmission_sample( + seca, secb, energy, physics_package) + T_window = self.calc_transmission_window( + secb, energy, physics_package) + + transmission_physics_package = T_sample * T_window + return transmission_physics_package + + def calc_compton_physics_package_transmission( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + '''calculate the attenuation of inelastically + scattered photons. since these photons lose energy, + the attenuation length is angle dependent ergo a separate + routine than elastically scattered absorption. + ''' + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_sample = self.calc_compton_transmission( + seca, secb, energy, + physics_package, 'sample') + T_window = self.calc_compton_transmission_window( + secb, energy, physics_package) + + return T_sample * T_window + + def calc_compton_window_transmission( + self, + energy: np.floating, + rMat_s: np.ndarray, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + '''calculate the attenuation of inelastically + scattered photons just fropm the window. + since these photons lose energy, the attenuation length + is angle dependent ergo a separate routine than + elastically scattered absorption. + ''' + bvec = self.bvec + sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) + seca = 1./np.dot(bvec, sample_normal) + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cosb = np.dot(dvecs, sample_normal) + '''angles for which secb <= 0 or close are diffracted beams + almost parallel to the sample surface or backscattered, we + can mask out these values by setting secb to nan + ''' + mask = np.logical_or( + cosb < 0, + np.isclose( + cosb, + 0., + atol=5E-2, + ) + ) + cosb[mask] = np.nan + secb = 1./cosb.reshape(self.shape) + + T_window = self.calc_compton_transmission( + seca, secb, energy, + physics_package, 'window') + T_sample = self.calc_compton_transmission_sample( + seca, energy, physics_package) + + return T_sample * T_window + + def calc_transmission_sample(self, seca: np.array, + secb: np.array, energy: np.floating, + physics_package: AbstractPhysicsPackage) -> np.array: + thickness_s = physics_package.sample_thickness # in microns + if np.isclose(thickness_s, 0): + return np.ones(self.shape) + + # in microns^-1 + mu_s = 1./physics_package.sample_absorption_length(energy) + x = (mu_s*thickness_s) + pre = 1./x/(secb - seca) + num = np.exp(-x*seca) - np.exp(-x*secb) + return pre * num + + def calc_transmission_window(self, secb: np.array, energy: np.floating, + physics_package: AbstractPhysicsPackage) -> np.array: + material_w = physics_package.window_material + thickness_w = physics_package.window_thickness # in microns + if material_w is None or np.isclose(thickness_w, 0): + return np.ones(self.shape) + + # in microns^-1 + mu_w = 1./physics_package.window_absorption_length(energy) + return np.exp(-thickness_w*mu_w*secb) + + def calc_compton_transmission( + self, + seca: np.ndarray, + secb: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + pp_layer: str, + ) -> np.ndarray: + + if pp_layer == 'sample': + formula = physics_package.sample_material + density = physics_package.sample_density + thickness = physics_package.sample_thickness + mu = 1./physics_package.sample_absorption_length(energy) + mu_prime = 1. / self.pixel_compton_attenuation_length( + energy, density, formula, + ) + elif pp_layer == 'window': + formula = physics_package.window_material + if formula is None: + return np.ones(self.shape) + + density = physics_package.window_density + thickness = physics_package.window_thickness + mu = 1./physics_package.sample_absorption_length(energy) + mu_prime = 1./self.pixel_compton_attenuation_length( + energy, density, formula) + + if thickness <= 0: + return np.ones(self.shape) + + x1 = mu*thickness*seca + x2 = mu_prime*thickness*secb + num = (np.exp(-x1) - np.exp(-x2)) + return -num/(x1 - x2) + + def calc_compton_transmission_sample( + self, + seca: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + thickness_s = physics_package.sample_thickness # in microns + + mu_s = 1./physics_package.sample_absorption_length( + energy) + return np.exp(-mu_s*thickness_s*seca) + + def calc_compton_transmission_window( + self, + secb: np.ndarray, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.ndarray: + formula = physics_package.window_material + if formula is None: + return np.ones(self.shape) + + density = physics_package.window_density # in g/cc + thickness_w = physics_package.window_thickness # in microns + + mu_w_prime = 1./self.pixel_compton_attenuation_length( + energy, density, formula) + return np.exp(-mu_w_prime*thickness_w*secb) + + def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) -> np.array: + """get the effective pinhole area correction + """ + if (np.isclose(physics_package.pinhole_diameter, 0) + or np.isclose(physics_package.pinhole_thickness, 0)): + return np.ones(self.shape) + + hod = (physics_package.pinhole_thickness / + physics_package.pinhole_diameter) + bvec = self.bvec + + tth, eta = self.pixel_angles() + angs = np.vstack((tth.flatten(), eta.flatten(), + np.zeros(tth.flatten().shape))).T + dvecs = angles_to_dvec(angs, beam_vec=bvec) + + cth = -dvecs[:, 2].reshape(self.shape) + tanth = np.tan(np.arccos(cth)) + f = hod*tanth + f[np.abs(f) > 1.] = np.nan + asinf = np.arcsin(f) + return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) + + def calc_transmission_generic(self, + secb: np.array, + thickness: np.floating, + absorption_length: np.floating) -> np.array: + if np.isclose(thickness, 0): + return np.ones(self.shape) + + mu = 1./absorption_length # in microns^-1 + return np.exp(-thickness*mu*secb) + + def calc_transmission_phosphor(self, + secb: np.array, + thickness: np.floating, + readout_length: np.floating, + absorption_length: np.floating, + energy: np.floating, + pre_U0: np.floating) -> np.array: + if np.isclose(thickness, 0): + return np.ones(self.shape) + + f1 = absorption_length*thickness + f2 = absorption_length*readout_length + arg = (secb + 1/f2) + return pre_U0 * energy*((1.0 - np.exp(-f1*arg))/arg) + +# ============================================================================= +# UTILITY METHODS +# ============================================================================= + + +def _fix_indices(idx, lo, hi): + nidx = np.array(idx) + off_lo = nidx < lo + off_hi = nidx > hi + nidx[off_lo] = lo + nidx[off_hi] = hi + return nidx + + +def _row_edge_vec(rows, pixel_size_row): + return pixel_size_row * (0.5 * rows - np.arange(rows + 1)) + + +def _col_edge_vec(cols, pixel_size_col): + return pixel_size_col * (np.arange(cols + 1) - 0.5 * cols) + + +# FIXME find a better place for this, and maybe include loop over pixels +@numba.njit(nogil=True, cache=True) +def _solid_angle_of_triangle(vtx_list): + norms = np.sqrt(np.sum(vtx_list * vtx_list, axis=1)) + norms_prod = norms[0] * norms[1] * norms[2] + scalar_triple_product = np.dot( + vtx_list[0], np.cross(vtx_list[2], vtx_list[1]) + ) + denominator = ( + norms_prod + + norms[0] * np.dot(vtx_list[1], vtx_list[2]) + + norms[1] * np.dot(vtx_list[2], vtx_list[0]) + + norms[2] * np.dot(vtx_list[0], vtx_list[1]) + ) + + return 2.0 * np.arctan2(scalar_triple_product, denominator) diff --git a/hexrd/powder/instrument/hedm_instrument.py b/hexrd/powder/instrument/hedm_instrument.py new file mode 100644 index 000000000..1d768b47c --- /dev/null +++ b/hexrd/powder/instrument/hedm_instrument.py @@ -0,0 +1,2747 @@ +# -*- coding: utf-8 -*- +# ============================================================================= +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================================= +""" +Created on Fri Dec 9 13:05:27 2016 + +@author: bernier2 +""" +from contextlib import contextmanager +import copy +import logging +import os +from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor +from functools import partial +from typing import Optional + +from tqdm import tqdm + +import yaml + +import h5py + +import numpy as np + +from io import IOBase + +from scipy import ndimage +from scipy.linalg import logm +from skimage.measure import regionprops + +from hexrd import constants +from hexrd.imageseries import ImageSeries +from hexrd.imageseries.process import ProcessedImageSeries +from hexrd.imageseries.omega import OmegaImageSeries +from hexrd.fitting.utils import fit_ring +from hexrd.gridutil import make_tolerance_grid +from hexrd import matrixutil as mutil +from hexrd.transforms.xfcapi import ( + angles_to_gvec, + gvec_to_xy, + make_sample_rmat, + make_rmat_of_expmap, + unit_vector, +) +from hexrd import xrdutil +from hexrd.material.crystallography import PlaneData +from hexrd import constants as ct +from hexrd.rotations import mapAngle +from hexrd import distortion as distortion_pkg +from hexrd.utils.concurrent import distribute_tasks +from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict +from hexrd.utils.yaml import NumpyToNativeDumper +from hexrd.valunits import valWUnit +from hexrd.wppf import LeBail + +from .cylindrical_detector import CylindricalDetector +from .detector import ( + beam_energy_DFLT, + Detector, + max_workers_DFLT, +) +from .planar_detector import PlanarDetector + +from skimage.draw import polygon +from skimage.util import random_noise +from hexrd.wppf import wppfsupport + +try: + from fast_histogram import histogram1d + fast_histogram = True +except ImportError: + from numpy import histogram as histogram1d + fast_histogram = False + +logger = logging.getLogger() +logger.setLevel('INFO') + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +instrument_name_DFLT = 'instrument' + +beam_vec_DFLT = ct.beam_vec +source_distance_DFLT = np.inf + +eta_vec_DFLT = ct.eta_vec + +panel_id_DFLT = 'generic' +nrows_DFLT = 2048 +ncols_DFLT = 2048 +pixel_size_DFLT = (0.2, 0.2) + +tilt_params_DFLT = np.zeros(3) +t_vec_d_DFLT = np.r_[0., 0., -1000.] + +chi_DFLT = 0. +t_vec_s_DFLT = np.zeros(3) + +multi_ims_key = ct.shared_ims_key +ims_classes = (ImageSeries, ProcessedImageSeries, OmegaImageSeries) + +buffer_key = 'buffer' +distortion_key = 'distortion' + +# ============================================================================= +# UTILITY METHODS +# ============================================================================= + + +def generate_chunks(nrows, ncols, base_nrows, base_ncols, + row_gap=0, col_gap=0): + """ + Generate chunking data for regularly tiled composite detectors. + + Parameters + ---------- + nrows : int + DESCRIPTION. + ncols : int + DESCRIPTION. + base_nrows : int + DESCRIPTION. + base_ncols : int + DESCRIPTION. + row_gap : int, optional + DESCRIPTION. The default is 0. + col_gap : int, optional + DESCRIPTION. The default is 0. + + Returns + ------- + rects : array_like + The (nrows*ncols, ) list of ROI specs (see Notes). + labels : array_like + The (nrows*ncols, ) list of ROI (i, j) matrix indexing labels 'i_j'. + + Notes + ----- + ProcessedImageSeries needs a (2, 2) array for the 'rect' kwarg: + [[row_start, row_stop], + [col_start, col_stop]] + """ + row_starts = np.array([i*(base_nrows + row_gap) for i in range(nrows)]) + col_starts = np.array([i*(base_ncols + col_gap) for i in range(ncols)]) + rr = np.vstack([row_starts, row_starts + base_nrows]) + cc = np.vstack([col_starts, col_starts + base_ncols]) + rects = [] + labels = [] + for i in range(nrows): + for j in range(ncols): + this_rect = np.array( + [[rr[0, i], rr[1, i]], + [cc[0, j], cc[1, j]]] + ) + rects.append(this_rect) + labels.append('%d_%d' % (i, j)) + return rects, labels + + +def chunk_instrument(instr, rects, labels, use_roi=False): + """ + Generate chunked config fro regularly tiled composite detectors. + + Parameters + ---------- + instr : TYPE + DESCRIPTION. + rects : TYPE + DESCRIPTION. + labels : TYPE + DESCRIPTION. + + Returns + ------- + new_icfg_dict : TYPE + DESCRIPTION. + + """ + icfg_dict = instr.write_config() + new_icfg_dict = dict(beam=icfg_dict['beam'], + oscillation_stage=icfg_dict['oscillation_stage'], + detectors={}) + for panel_id, panel in instr.detectors.items(): + pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] + + for pnum, pdata in enumerate(zip(rects, labels)): + rect, label = pdata + panel_name = f'{panel_id}_{label}' + + row_col_dim = np.diff(rect) # (2, 1) + shape = tuple(row_col_dim.flatten()) + center = (rect[:, 0].reshape(2, 1) + 0.5*row_col_dim) + + sp_tvec = np.concatenate( + [panel.pixelToCart(center.T).flatten(), np.zeros(1)] + ) + + tvec = np.dot(panel.rmat, sp_tvec) + panel.tvec + + # new config dict + tmp_cfg = copy.deepcopy(pcfg_dict) + + # fix sizes + tmp_cfg['pixels']['rows'] = shape[0] + tmp_cfg['pixels']['columns'] = shape[1] + if use_roi: + tmp_cfg['pixels']['roi'] = (rect[0][0], rect[1][0]) + + # update tvec + tmp_cfg['transform']['translation'] = tvec.tolist() + + new_icfg_dict['detectors'][panel_name] = copy.deepcopy(tmp_cfg) + + if panel.panel_buffer is not None: + if panel.panel_buffer.ndim == 2: # have a mask array! + submask = panel.panel_buffer[ + rect[0, 0]:rect[0, 1], rect[1, 0]:rect[1, 1] + ] + new_icfg_dict['detectors'][panel_name]['buffer'] = submask + return new_icfg_dict + + +def _parse_imgser_dict(imgser_dict, det_key, roi=None): + """ + Associates a dict of imageseries to the target panel(s). + + Parameters + ---------- + imgser_dict : dict + The input dict of imageseries. Either `det_key` is in imgser_dict, or + the shared key is. Entries can be an ImageSeries object or a 2- or 3-d + ndarray of images. + det_key : str + The target detector key. + roi : tuple or None, optional + The roi of the target images. Format is + ((row_start, row_stop), (col_start, col_stop)) + The stops are used in the normal sense of a slice. The default is None. + + Raises + ------ + RuntimeError + If niether `det_key` nor the shared key is in the input imgser_dict; + Also, if the shared key is specified but the roi is None. + + Returns + ------- + ims : hexrd.imageseries + The desired imageseries object. + + """ + # grab imageseries for this detector + try: + ims = imgser_dict[det_key] + except KeyError: + matched_det_keys = [det_key in k for k in imgser_dict] + if multi_ims_key in imgser_dict: + images_in = imgser_dict[multi_ims_key] + elif np.any(matched_det_keys): + if sum(matched_det_keys) != 1: + raise RuntimeError( + f"multiple entries found for '{det_key}'" + ) + # use boolean array to index the proper key + # !!! these should be in the same order + img_keys = img_keys = np.asarray(list(imgser_dict.keys())) + matched_det_key = img_keys[matched_det_keys][0] # !!! only one + images_in = imgser_dict[matched_det_key] + else: + raise RuntimeError( + f"neither '{det_key}' nor '{multi_ims_key}' found" + + 'in imageseries input' + ) + + # have images now + if roi is None: + raise RuntimeError( + "roi must be specified to use shared imageseries" + ) + + if isinstance(images_in, ims_classes): + # input is an imageseries of some kind + ims = ProcessedImageSeries(images_in, [('rectangle', roi), ]) + if isinstance(images_in, OmegaImageSeries): + # if it was an OmegaImageSeries, must re-cast + ims = OmegaImageSeries(ims) + elif isinstance(images_in, np.ndarray): + # 2- or 3-d array of images + ndim = images_in.ndim + if ndim == 2: + ims = images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + elif ndim == 3: + nrows = roi[0][1] - roi[0][0] + ncols = roi[1][1] - roi[1][0] + n_images = len(images_in) + ims = np.empty((n_images, nrows, ncols), + dtype=images_in.dtype) + for i, image in images_in: + ims[i, :, :] = \ + images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + else: + raise RuntimeError( + f"image input dim must be 2 or 3; you gave {ndim}" + ) + return ims + + +def calc_beam_vec(azim, pola): + """ + Calculate unit beam propagation vector from + spherical coordinate spec in DEGREES. + + ...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL! + """ + tht = np.radians(azim) + phi = np.radians(pola) + bv = np.r_[ + np.sin(phi)*np.cos(tht), + np.cos(phi), + np.sin(phi)*np.sin(tht)] + return -bv + + +def calc_angles_from_beam_vec(bvec): + """ + Return the azimuth and polar angle from a beam + vector + """ + bvec = np.atleast_1d(bvec).flatten() + nvec = unit_vector(-bvec) + azim = float( + np.degrees(np.arctan2(nvec[2], nvec[0])) + ) + pola = float(np.degrees(np.arccos(nvec[1]))) + return azim, pola + + +def migrate_instrument_config(instrument_config): + """utility function to generate old instrument config dictionary""" + cfg_list = [] + for detector_id in instrument_config['detectors']: + cfg_list.append( + dict( + detector=instrument_config['detectors'][detector_id], + oscillation_stage=instrument_config['oscillation_stage'], + ) + ) + return cfg_list + + +def angle_in_range(angle, ranges, ccw=True, units='degrees'): + """ + Return the index of the first wedge the angle is found in + + WARNING: always clockwise; assumes wedges are not overlapping + """ + tau = 360. + if units.lower() == 'radians': + tau = 2*np.pi + w = np.nan + for i, wedge in enumerate(ranges): + amin = wedge[0] + amax = wedge[1] + check = amin + np.mod(angle - amin, tau) + if check < amax: + w = i + break + return w + + +# ???: move to gridutil? +def centers_of_edge_vec(edges): + assert np.asarray(edges).ndim == 1, "edges must be 1-d" + return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0) + + +def max_tth(instr): + """ + Return the maximum Bragg angle (in radians) subtended by the instrument. + + Parameters + ---------- + instr : hexrd.instrument.HEDMInstrument instance + the instrument class to evalutate. + + Returns + ------- + tth_max : float + The maximum observable Bragg angle by the instrument in radians. + """ + tth_max = 0. + for det in instr.detectors.values(): + ptth, peta = det.pixel_angles() + tth_max = max(np.max(ptth), tth_max) + return tth_max + + +def pixel_resolution(instr): + """ + Return the minimum, median, and maximum angular + resolution of the instrument. + + Parameters + ---------- + instr : HEDMInstrument instance + An instrument. + + Returns + ------- + tth_stats : float + min/median/max tth resolution in radians. + eta_stats : TYPE + min/median/max eta resolution in radians. + + """ + max_tth = np.inf + max_eta = np.inf + min_tth = -np.inf + min_eta = -np.inf + ang_ps_full = [] + for panel in instr.detectors.values(): + angps = panel.angularPixelSize( + np.stack( + panel.pixel_coords, + axis=0 + ).reshape(2, np.cumprod(panel.shape)[-1]).T + ) + ang_ps_full.append(angps) + max_tth = min(max_tth, np.min(angps[:, 0])) + max_eta = min(max_eta, np.min(angps[:, 1])) + min_tth = max(min_tth, np.max(angps[:, 0])) + min_eta = max(min_eta, np.max(angps[:, 1])) + med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten() + return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta) + + +def max_resolution(instr): + """ + Return the maximum angular resolution of the instrument. + + Parameters + ---------- + instr : HEDMInstrument instance + An instrument. + + Returns + ------- + max_tth : float + Maximum tth resolution in radians. + max_eta : TYPE + maximum eta resolution in radians. + + """ + max_tth = np.inf + max_eta = np.inf + for panel in instr.detectors.values(): + angps = panel.angularPixelSize( + np.stack( + panel.pixel_coords, + axis=0 + ).reshape(2, np.cumprod(panel.shape)[-1]).T + ) + max_tth = min(max_tth, np.min(angps[:, 0])) + max_eta = min(max_eta, np.min(angps[:, 1])) + return max_tth, max_eta + + +def _gaussian_dist(x, cen, fwhm): + sigm = fwhm/(2*np.sqrt(2*np.log(2))) + return np.exp(-0.5*(x - cen)**2/sigm**2) + + +def _sigma_to_fwhm(sigm): + return sigm*ct.sigma_to_fwhm + + +def _fwhm_to_sigma(fwhm): + return fwhm/ct.sigma_to_fwhm + + +# ============================================================================= +# CLASSES +# ============================================================================= + + +class HEDMInstrument(object): + """ + Abstraction of XRD instrument. + + * Distortion needs to be moved to a class with registry; tuple unworkable + * where should reference eta be defined? currently set to default config + """ + + def __init__(self, instrument_config=None, + image_series=None, eta_vector=None, + instrument_name=None, tilt_calibration_mapping=None, + max_workers=max_workers_DFLT, + physics_package=None, + active_beam_name: Optional[str] = None): + self._id = instrument_name_DFLT + + self._active_beam_name = active_beam_name + self._beam_dict = {} + + if eta_vector is None: + self._eta_vector = eta_vec_DFLT + else: + self._eta_vector = eta_vector + + self.max_workers = max_workers + + self.physics_package = physics_package + + if instrument_config is None: + # Default instrument + if instrument_name is not None: + self._id = instrument_name + self._num_panels = 1 + self._create_default_beam() + + # FIXME: must add cylindrical + self._detectors = dict( + panel_id_DFLT=PlanarDetector( + rows=nrows_DFLT, cols=ncols_DFLT, + pixel_size=pixel_size_DFLT, + tvec=t_vec_d_DFLT, + tilt=tilt_params_DFLT, + bvec=self.beam_vector, + xrs_dist=self.source_distance, + evec=self._eta_vector, + distortion=None, + roi=None, group=None, + max_workers=self.max_workers), + ) + + self._tvec = t_vec_s_DFLT + self._chi = chi_DFLT + else: + if isinstance(instrument_config, h5py.File): + tmp = {} + unwrap_h5_to_dict(instrument_config, tmp) + instrument_config = tmp['instrument'] + elif not isinstance(instrument_config, dict): + raise RuntimeError( + "instrument_config must be either an HDF5 file object" + + "or a dictionary. You gave a %s" + % type(instrument_config) + ) + if instrument_name is None: + if 'id' in instrument_config: + self._id = instrument_config['id'] + else: + self._id = instrument_name + + self._num_panels = len(instrument_config['detectors']) + + if instrument_config.get('physics_package', None) is not None: + self.physics_package = instrument_config['physics_package'] + + xrs_config = instrument_config['beam'] + is_single_beam = ( + 'energy' in xrs_config and + 'vector' in xrs_config + ) + if is_single_beam: + # Assume single beam. Load the same way as multibeam + self._create_default_beam() + xrs_config = {self.active_beam_name: xrs_config} + + # Multi beam load + for beam_name, beam in xrs_config.items(): + self._beam_dict[beam_name] = { + 'energy': beam['energy'], + 'vector': calc_beam_vec( + beam['vector']['azimuth'], + beam['vector']['polar_angle'], + ), + 'distance': beam.get('source_distance', np.inf), + } + + # Set the active beam name if not set already + if self._active_beam_name is None: + self._active_beam_name = next(iter(self._beam_dict)) + + # now build detector dict + detectors_config = instrument_config['detectors'] + det_dict = dict.fromkeys(detectors_config) + for det_id, det_info in detectors_config.items(): + det_group = det_info.get('group') # optional detector group + pixel_info = det_info['pixels'] + affine_info = det_info['transform'] + detector_type = det_info.get('detector_type', 'planar') + filter = det_info.get('filter', None) + coating = det_info.get('coating', None) + phosphor = det_info.get('phosphor', None) + try: + saturation_level = det_info['saturation_level'] + except KeyError: + saturation_level = 2**16 + shape = (pixel_info['rows'], pixel_info['columns']) + + panel_buffer = None + if buffer_key in det_info: + det_buffer = det_info[buffer_key] + if det_buffer is not None: + if isinstance(det_buffer, np.ndarray): + if det_buffer.ndim == 2: + if det_buffer.shape != shape: + msg = ( + f'Buffer shape for {det_id} ' + f'({det_buffer.shape}) does not match ' + f'detector shape ({shape})' + ) + raise BufferShapeMismatchError(msg) + else: + assert len(det_buffer) == 2 + panel_buffer = det_buffer + elif isinstance(det_buffer, list): + panel_buffer = np.asarray(det_buffer) + elif np.isscalar(det_buffer): + panel_buffer = det_buffer*np.ones(2) + else: + raise RuntimeError( + "panel buffer spec invalid for %s" % det_id + ) + + # optional roi + roi = pixel_info.get('roi') + + # handle distortion + distortion = None + if distortion_key in det_info: + distortion_cfg = det_info[distortion_key] + if distortion_cfg is not None: + try: + func_name = distortion_cfg['function_name'] + dparams = distortion_cfg['parameters'] + distortion = distortion_pkg.get_mapping( + func_name, dparams + ) + except KeyError: + raise RuntimeError( + "problem with distortion specification" + ) + if detector_type.lower() not in DETECTOR_TYPES: + msg = f'Unknown detector type: {detector_type}' + raise NotImplementedError(msg) + + DetectorClass = DETECTOR_TYPES[detector_type.lower()] + kwargs = dict( + name=det_id, + rows=pixel_info['rows'], + cols=pixel_info['columns'], + pixel_size=pixel_info['size'], + panel_buffer=panel_buffer, + saturation_level=saturation_level, + tvec=affine_info['translation'], + tilt=affine_info['tilt'], + bvec=self.beam_vector, + xrs_dist=self.source_distance, + evec=self._eta_vector, + distortion=distortion, + roi=roi, + group=det_group, + max_workers=self.max_workers, + detector_filter=filter, + detector_coating=coating, + phosphor=phosphor, + ) + + if DetectorClass is CylindricalDetector: + # Add cylindrical detector kwargs + kwargs['radius'] = det_info.get('radius', 49.51) + + det_dict[det_id] = DetectorClass(**kwargs) + + self._detectors = det_dict + + self._tvec = np.r_[ + instrument_config['oscillation_stage']['translation'] + ] + self._chi = instrument_config['oscillation_stage']['chi'] + + # grab angles from beam vec + # !!! these are in DEGREES! + azim, pola = calc_angles_from_beam_vec(self.beam_vector) + + self.update_memoization_sizes() + + @property + def mean_detector_center(self) -> np.ndarray: + """Return the mean center for all detectors""" + centers = np.array([panel.tvec for panel in self.detectors.values()]) + return centers.sum(axis=0) / len(centers) + + def mean_group_center(self, group: str) -> np.ndarray: + """Return the mean center for detectors belonging to a group""" + centers = np.array([ + x.tvec for x in self.detectors_in_group(group).values() + ]) + return centers.sum(axis=0) / len(centers) + + @property + def detector_groups(self) -> list[str]: + groups = [] + for panel in self.detectors.values(): + group = panel.group + if group is not None and group not in groups: + groups.append(group) + + return groups + + def detectors_in_group(self, group: str) -> dict[str, Detector]: + return {k: v for k, v in self.detectors.items() if v.group == group} + + # properties for physical size of rectangular detector + @property + def id(self): + return self._id + + @property + def num_panels(self): + return self._num_panels + + @property + def detectors(self): + return self._detectors + + @property + def detector_parameters(self): + pdict = {} + for key, panel in self.detectors.items(): + pdict[key] = panel.config_dict( + self.chi, self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + return pdict + + @property + def tvec(self): + return self._tvec + + @tvec.setter + def tvec(self, x): + x = np.array(x).flatten() + assert len(x) == 3, 'input must have length = 3' + self._tvec = x + + @property + def chi(self): + return self._chi + + @chi.setter + def chi(self, x): + self._chi = float(x) + + @property + def beam_energy(self) -> float: + return self.active_beam['energy'] + + @beam_energy.setter + def beam_energy(self, x: float): + self.active_beam['energy'] = float(x) + self.beam_dict_modified() + + @property + def beam_wavelength(self): + return ct.keVToAngstrom(self.beam_energy) + + @property + def has_multi_beam(self) -> bool: + return len(self.beam_dict) > 1 + + @property + def beam_dict(self) -> dict: + return self._beam_dict + + def _create_default_beam(self): + name = 'XRS1' + self._beam_dict[name] = { + 'energy': beam_energy_DFLT, + 'vector': beam_vec_DFLT.copy(), + 'distance': np.inf, + } + + if self._active_beam_name is None: + self._active_beam_name = name + + @property + def beam_names(self) -> list[str]: + return list(self.beam_dict) + + def xrs_beam_energy(self, beam_name: Optional[str]) -> float: + if beam_name is None: + beam_name = self.active_beam_name + + return self.beam_dict[beam_name]['energy'] + + @property + def active_beam_name(self) -> str: + return self._active_beam_name + + @active_beam_name.setter + def active_beam_name(self, name: str): + if self._active_beam_name not in self.beam_dict: + raise RuntimeError( + f'"{name}" is not present in "{self.beam_names}"' + ) + + self._active_beam_name = name + + # Update anything beam related where we need to + self._update_panel_beams() + + def beam_dict_modified(self): + # A function to call to indicate that the beam dict was modified. + # Update anything beam related where we need to + self._update_panel_beams() + + @property + def active_beam(self) -> dict: + return self.beam_dict[self.active_beam_name] + + def _update_panel_beams(self): + # FIXME: maybe we shouldn't store these on the panels? + # Might be hard to fix, though... + for panel in self.detectors.values(): + panel.bvec = self.beam_vector + panel.xrs_dist = self.source_distance + + @property + def beam_vector(self) -> np.ndarray: + return self.active_beam['vector'] + + @beam_vector.setter + def beam_vector(self, x: np.ndarray): + x = np.array(x).flatten() + if len(x) == 3: + assert sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + bvec = x + elif len(x) == 2: + bvec = calc_beam_vec(*x) + else: + raise RuntimeError("input must be a unit vector or angle pair") + + # Modify the beam vector for the active beam dict + self.active_beam['vector'] = bvec + self.beam_dict_modified() + + @property + def source_distance(self): + return self.active_beam['distance'] + + @source_distance.setter + def source_distance(self, x): + assert np.isscalar(x), \ + f"'source_distance' must be a scalar; you input '{x}'" + self.active_beam['distance'] = x + self.beam_dict_modified() + + @property + def eta_vector(self): + return self._eta_vector + + @eta_vector.setter + def eta_vector(self, x): + x = np.array(x).flatten() + assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ + 'input must have length = 3 and have unit magnitude' + self._eta_vector = x + # ...maybe change dictionary item behavior for 3.x compatibility? + for detector_id in self.detectors: + panel = self.detectors[detector_id] + panel.evec = self._eta_vector + + # ========================================================================= + # METHODS + # ========================================================================= + + def write_config(self, file=None, style='yaml', calibration_dict={}): + """ WRITE OUT YAML FILE """ + # initialize output dictionary + assert style.lower() in ['yaml', 'hdf5'], \ + "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + + par_dict = {} + + par_dict['id'] = self.id + + # Multi beam writer + beam_dict = {} + for beam_name, beam in self.beam_dict.items(): + azim, polar = calc_angles_from_beam_vec(beam['vector']) + beam_dict[beam_name] = { + 'energy': beam['energy'], + 'vector': { + 'azimuth': azim, + 'polar_angle': polar, + }, + } + if beam['distance'] != np.inf: + beam_dict[beam_name]['source_distance'] = beam['distance'] + + if len(beam_dict) == 1: + # Just write it out a single beam (classical way) + beam_dict = next(iter(beam_dict.values())) + + par_dict['beam'] = beam_dict + + if calibration_dict: + par_dict['calibration_crystal'] = calibration_dict + + ostage = dict( + chi=self.chi, + translation=self.tvec.tolist() + ) + par_dict['oscillation_stage'] = ostage + + det_dict = dict.fromkeys(self.detectors) + for det_name, detector in self.detectors.items(): + # grab panel config + # !!! don't need beam or tvec + # !!! have vetted style + pdict = detector.config_dict(chi=self.chi, tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style=style) + det_dict[det_name] = pdict['detector'] + par_dict['detectors'] = det_dict + + # handle output file if requested + if file is not None: + if style.lower() == 'yaml': + with open(file, 'w') as f: + yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) + else: + def _write_group(file): + instr_grp = file.create_group('instrument') + unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) + + # hdf5 + if isinstance(file, str): + with h5py.File(file, 'w') as f: + _write_group(f) + elif isinstance(file, h5py.File): + _write_group(file) + else: + raise TypeError("Unexpected file type.") + + return par_dict + + def extract_polar_maps(self, plane_data, imgser_dict, + active_hkls=None, threshold=None, + tth_tol=None, eta_tol=0.25): + """ + Extract eta-omega maps from an imageseries. + + Quick and dirty way to histogram angular patch data for make + pole figures suitable for fiber generation + + TODO: streamline projection code + TODO: normalization + !!!: images must be non-negative! + !!!: plane_data is NOT a copy! + """ + if tth_tol is not None: + plane_data.tThWidth = np.radians(tth_tol) + else: + tth_tol = np.degrees(plane_data.tThWidth) + + # make rings clipped to panel + # !!! eta_idx has the same length as plane_data.exclusions + # each entry are the integer indices into the bins + # !!! eta_edges is the list of eta bin EDGES; same for all + # detectors, so calculate it once + # !!! grab first panel + panel = next(iter(self.detectors.values())) + pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = \ + panel.make_powder_rings( + plane_data, merge_hkls=False, + delta_eta=eta_tol, full_output=True + ) + + if active_hkls is not None: + assert hasattr(active_hkls, '__len__'), \ + "active_hkls must be an iterable with __len__" + + # need to re-cast for element-wise operations + active_hkls = np.array(active_hkls) + + # these are all active reflection unique hklIDs + active_hklIDs = plane_data.getHKLID( + plane_data.hkls, master=True + ) + + # find indices + idx = np.zeros_like(active_hkls, dtype=int) + for i, input_hklID in enumerate(active_hkls): + try: + idx[i] = np.where(active_hklIDs == input_hklID)[0] + except ValueError: + raise RuntimeError(f"hklID '{input_hklID}' is invalid") + tth_ranges = tth_ranges[idx] + + delta_eta = eta_edges[1] - eta_edges[0] + ncols_eta = len(eta_edges) - 1 + + ring_maps_panel = dict.fromkeys(self.detectors) + for i_d, det_key in enumerate(self.detectors): + print("working on detector '%s'..." % det_key) + + # grab panel + panel = self.detectors[det_key] + # native_area = panel.pixel_area # pixel ref area + + # pixel angular coords for the detector panel + ptth, peta = panel.pixel_angles() + + # grab imageseries for this detector + ims = _parse_imgser_dict(imgser_dict, det_key, roi=panel.roi) + + # grab omegas from imageseries and squawk if missing + try: + omegas = ims.metadata['omega'] + except KeyError: + raise RuntimeError( + f"imageseries for '{det_key}' has no omega info" + ) + + # initialize maps and assing by row (omega/frame) + nrows_ome = len(omegas) + + # init map with NaNs + shape = (len(tth_ranges), nrows_ome, ncols_eta) + ring_maps = np.full(shape, np.nan) + + # Generate ring parameters once, and re-use them for each image + ring_params = [] + for tthr in tth_ranges: + kwargs = { + 'tthr': tthr, + 'ptth': ptth, + 'peta': peta, + 'eta_edges': eta_edges, + 'delta_eta': delta_eta, + } + ring_params.append(_generate_ring_params(**kwargs)) + + # Divide up the images among processes + tasks = distribute_tasks(len(ims), self.max_workers) + func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges, + ring_maps=ring_maps, ring_params=ring_params, + threshold=threshold) + + max_workers = self.max_workers + if max_workers == 1 or len(tasks) == 1: + # Just execute it serially. + for task in tasks: + func(task) + else: + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Evaluate the results via `list()`, so that if an + # exception is raised in a thread, it will be re-raised + # and visible to the user. + list(executor.map(func, tasks)) + + ring_maps_panel[det_key] = ring_maps + + return ring_maps_panel, eta_edges + + def extract_line_positions(self, plane_data, imgser_dict, + tth_tol=None, eta_tol=1., npdiv=2, + eta_centers=None, + collapse_eta=True, collapse_tth=False, + do_interpolation=True, do_fitting=False, + tth_distortion=None, fitting_kwargs=None): + """ + Perform annular interpolation on diffraction images. + + Provides data for extracting the line positions from powder diffraction + images, pole figure patches from imageseries, or Bragg peaks from + Laue diffraction images. + + Parameters + ---------- + plane_data : hexrd.crystallography.PlaneData object or array_like + Object determining the 2theta positions for the integration + sectors. If PlaneData, this will be all non-excluded reflections, + subject to merging within PlaneData.tThWidth. If array_like, + interpreted as a list of 2theta angles IN DEGREES. + imgser_dict : dict + Dictionary of powder diffraction images, one for each detector. + tth_tol : scalar, optional + The radial (i.e. 2theta) width of the integration sectors + IN DEGREES. This arg is required if plane_data is array_like. + The default is None. + eta_tol : scalar, optional + The azimuthal (i.e. eta) width of the integration sectors + IN DEGREES. The default is 1. + npdiv : int, optional + The number of oversampling pixel subdivision (see notes). + The default is 2. + eta_centers : array_like, optional + The desired azimuthal sector centers. The default is None. If + None, then bins are distrubted sequentially from (-180, 180). + collapse_eta : bool, optional + Flag for summing sectors in eta. The default is True. + collapse_tth : bool, optional + Flag for summing sectors in 2theta. The default is False. + do_interpolation : bool, optional + If True, perform bilinear interpolation. The default is True. + do_fitting : bool, optional + If True, then perform spectrum fitting, and append the results + to the returned data. collapse_eta must also be True for this + to have any effect. The default is False. + tth_distortion : special class, optional + for special case of pinhole camera distortions. See + hexrd.xrdutil.phutil.SampleLayerDistortion (only type supported) + fitting_kwargs : dict, optional + kwargs passed to hexrd.fitting.utils.fit_ring if do_fitting is True + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + panel_data : dict + Dictionary over the detctors with the following structure: + [list over (merged) 2theta ranges] + [list over valid eta sectors] + [angle data , + bin intensities , + fitting results ] + + Notes + ----- + TODO: May change the array_like input units to degrees. + TODO: rename function. + + """ + + if fitting_kwargs is None: + fitting_kwargs = {} + + # ===================================================================== + # LOOP OVER DETECTORS + # ===================================================================== + logger.info("Interpolating ring data") + pbar_dets = partial(tqdm, total=self.num_panels, desc="Detector", + position=self.num_panels) + + # Split up the workers among the detectors + max_workers_per_detector = max(1, self.max_workers // self.num_panels) + + kwargs = { + 'plane_data': plane_data, + 'tth_tol': tth_tol, + 'eta_tol': eta_tol, + 'eta_centers': eta_centers, + 'npdiv': npdiv, + 'collapse_tth': collapse_tth, + 'collapse_eta': collapse_eta, + 'do_interpolation': do_interpolation, + 'do_fitting': do_fitting, + 'fitting_kwargs': fitting_kwargs, + 'tth_distortion': tth_distortion, + 'max_workers': max_workers_per_detector, + } + func = partial(_extract_detector_line_positions, **kwargs) + + def make_instr_cfg(panel): + return panel.config_dict( + chi=self.chi, tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + + images = [] + for detector_id, panel in self.detectors.items(): + images.append(_parse_imgser_dict(imgser_dict, detector_id, + roi=panel.roi)) + + panels = [self.detectors[k] for k in self.detectors] + instr_cfgs = [make_instr_cfg(x) for x in panels] + pbp_array = np.arange(self.num_panels) + iter_args = zip(panels, instr_cfgs, images, pbp_array) + with ProcessPoolExecutor(mp_context=constants.mp_context, + max_workers=self.num_panels) as executor: + results = list(pbar_dets(executor.map(func, iter_args))) + + panel_data = {} + for det, res in zip(self.detectors, results): + panel_data[det] = res + + return panel_data + + def simulate_powder_pattern(self, + mat_list, + params=None, + bkgmethod=None, + origin=None, + noise=None): + """ + Generate powder diffraction iamges from specified materials. + + Parameters + ---------- + mat_list : array_like (n, ) + List of Material classes. + params : dict, optional + Dictionary of LeBail parameters (see Notes). The default is None. + bkgmethod : dict, optional + Background function specification. The default is None. + origin : array_like (3,), optional + Vector describing the origin of the diffrction volume. + The default is None, wiich is equivalent to [0, 0, 0]. + noise : str, optional + Flag describing type of noise to be applied. The default is None. + + Returns + ------- + img_dict : dict + Dictionary of diffraciton images over the detectors. + + Notes + ----- + TODO: add more controls for noise function. + TODO: modify hooks to LeBail parameters. + TODO: add optional volume fraction weights for phases in mat_list + """ + """ + >> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab, + saransh1@llnl.gov + >> @DATE: 01/22/2021 SS 1.0 original + >> @DETAILS: adding hook to WPPF class. this changes the input list + significantly + """ + if origin is None: + origin = self.tvec + origin = np.asarray(origin).squeeze() + assert len(origin) == 3, \ + "origin must be a 3-element sequence" + + if bkgmethod is None: + bkgmethod = {'chebyshev': 3} + + ''' + if params is none, fill in some sane default values + only the first value is used. the rest of the values are + the upper, lower bounds and vary flag for refinement which + are not used but required for interfacing with WPPF + + zero_error : zero shift error + U, V, W : Cagliotti parameters + P, X, Y : Lorentzian parameters + eta1, eta2, eta3 : Mixing parameters + ''' + if params is None: + # params = {'zero_error': [0.0, -1., 1., True], + # 'U': [2e-1, -1., 1., True], + # 'V': [2e-2, -1., 1., True], + # 'W': [2e-2, -1., 1., True], + # 'X': [2e-1, -1., 1., True], + # 'Y': [2e-1, -1., 1., True] + # } + params = wppfsupport._generate_default_parameters_LeBail( + mat_list, + 1, + bkgmethod, + ) + ''' + use the material list to obtain the dictionary of initial intensities + we need to make sure that the intensities are properly scaled by the + lorentz polarization factor. since the calculation is done in the + LeBail class, all that means is the initial intensity needs that factor + in there + ''' + img_dict = dict.fromkeys(self.detectors) + + # find min and max tth over all panels + tth_mi = np.inf + tth_ma = 0. + ptth_dict = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + ptth, peta = panel.pixel_angles(origin=origin) + tth_mi = min(tth_mi, ptth.min()) + tth_ma = max(tth_ma, ptth.max()) + ptth_dict[det_key] = ptth + + ''' + now make a list of two theta and dummy ones for the experimental + spectrum this is never really used so any values should be okay. We + could also pas the integrated detector image if we would like to + simulate some realistic background. But thats for another day. + ''' + # convert angles to degrees because thats what the WPPF expects + tth_mi = np.degrees(tth_mi) + tth_ma = np.degrees(tth_ma) + + # get tth angular resolution for instrument + ang_res = max_resolution(self) + + # !!! calc nsteps by oversampling + nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0]))) + + # evaulation vector for LeBail + tth = np.linspace(tth_mi, tth_ma, nsteps) + + expt = np.vstack([tth, np.ones_like(tth)]).T + + wavelength = [ + valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), + 1. + ] + + ''' + now go through the material list and get the intensity dictionary + ''' + intensity = {} + for mat in mat_list: + + multiplicity = mat.planeData.getMultiplicity() + + tth = mat.planeData.getTTh() + + LP = (1 + np.cos(tth)**2) / \ + np.cos(0.5*tth)/np.sin(0.5*tth)**2 + + intensity[mat.name] = {} + intensity[mat.name]['synchrotron'] = \ + mat.planeData.structFact * LP * multiplicity + + kwargs = { + 'expt_spectrum': expt, + 'params': params, + 'phases': mat_list, + 'wavelength': { + 'synchrotron': wavelength + }, + 'bkgmethod': bkgmethod, + 'intensity_init': intensity, + 'peakshape': 'pvtch' + } + + self.WPPFclass = LeBail(**kwargs) + + self.simulated_spectrum = self.WPPFclass.spectrum_sim + self.background = self.WPPFclass.background + + ''' + now that we have the simulated intensities, its time to get the + two theta for the detector pixels and interpolate what the intensity + for each pixel should be + ''' + + img_dict = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + ptth = ptth_dict[det_key] + + img = np.interp(np.degrees(ptth), + self.simulated_spectrum.x, + self.simulated_spectrum.y + self.background.y) + + if noise is None: + img_dict[det_key] = img + + else: + # Rescale to be between 0 and 1 so random_noise() will work + prev_max = img.max() + img /= prev_max + + if noise.lower() == 'poisson': + im_noise = random_noise(img, + mode='poisson', + clip=True) + mi = im_noise.min() + ma = im_noise.max() + if ma > mi: + im_noise = (im_noise - mi)/(ma - mi) + + elif noise.lower() == 'gaussian': + im_noise = random_noise(img, mode='gaussian', clip=True) + + elif noise.lower() == 'salt': + im_noise = random_noise(img, mode='salt') + + elif noise.lower() == 'pepper': + im_noise = random_noise(img, mode='pepper') + + elif noise.lower() == 's&p': + im_noise = random_noise(img, mode='s&p') + + elif noise.lower() == 'speckle': + im_noise = random_noise(img, mode='speckle', clip=True) + + # Now scale back up + img_dict[det_key] = im_noise * prev_max + + return img_dict + + def simulate_laue_pattern(self, crystal_data, + minEnergy=5., maxEnergy=35., + rmat_s=None, grain_params=None): + """ + Simulate Laue diffraction over the instrument. + + Parameters + ---------- + crystal_data : TYPE + DESCRIPTION. + minEnergy : TYPE, optional + DESCRIPTION. The default is 5.. + maxEnergy : TYPE, optional + DESCRIPTION. The default is 35.. + rmat_s : TYPE, optional + DESCRIPTION. The default is None. + grain_params : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + results : TYPE + DESCRIPTION. + + xy_det, hkls_in, angles, dspacing, energy + + TODO: revisit output; dict, or concatenated list? + """ + results = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + results[det_key] = panel.simulate_laue_pattern( + crystal_data, + minEnergy=minEnergy, maxEnergy=maxEnergy, + rmat_s=rmat_s, tvec_s=self.tvec, + grain_params=grain_params, + beam_vec=self.beam_vector) + return results + + def simulate_rotation_series(self, plane_data, grain_param_list, + eta_ranges=[(-np.pi, np.pi), ], + ome_ranges=[(-np.pi, np.pi), ], + ome_period=(-np.pi, np.pi), + wavelength=None): + """ + Simulate a monochromatic rotation series over the instrument. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_param_list : TYPE + DESCRIPTION. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + wavelength : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + results : TYPE + DESCRIPTION. + + TODO: revisit output; dict, or concatenated list? + """ + results = dict.fromkeys(self.detectors) + for det_key, panel in self.detectors.items(): + results[det_key] = panel.simulate_rotation_series( + plane_data, grain_param_list, + eta_ranges=eta_ranges, + ome_ranges=ome_ranges, + ome_period=ome_period, + chi=self.chi, tVec_s=self.tvec, + wavelength=wavelength) + return results + + def pull_spots(self, plane_data, grain_params, + imgser_dict, + tth_tol=0.25, eta_tol=1., ome_tol=1., + npdiv=2, threshold=10, + eta_ranges=[(-np.pi, np.pi), ], + ome_period=None, + dirname='results', filename=None, output_format='text', + return_spot_list=False, + quiet=True, check_only=False, + interp='nearest'): + """ + Exctract reflection info from a rotation series. + + Input must be encoded as an OmegaImageseries object. + + Parameters + ---------- + plane_data : TYPE + DESCRIPTION. + grain_params : TYPE + DESCRIPTION. + imgser_dict : TYPE + DESCRIPTION. + tth_tol : TYPE, optional + DESCRIPTION. The default is 0.25. + eta_tol : TYPE, optional + DESCRIPTION. The default is 1.. + ome_tol : TYPE, optional + DESCRIPTION. The default is 1.. + npdiv : TYPE, optional + DESCRIPTION. The default is 2. + threshold : TYPE, optional + DESCRIPTION. The default is 10. + eta_ranges : TYPE, optional + DESCRIPTION. The default is [(-np.pi, np.pi), ]. + ome_period : TYPE, optional + DESCRIPTION. The default is (-np.pi, np.pi). + dirname : TYPE, optional + DESCRIPTION. The default is 'results'. + filename : TYPE, optional + DESCRIPTION. The default is None. + output_format : TYPE, optional + DESCRIPTION. The default is 'text'. + return_spot_list : TYPE, optional + DESCRIPTION. The default is False. + quiet : TYPE, optional + DESCRIPTION. The default is True. + check_only : TYPE, optional + DESCRIPTION. The default is False. + interp : TYPE, optional + DESCRIPTION. The default is 'nearest'. + + Returns + ------- + compl : TYPE + DESCRIPTION. + output : TYPE + DESCRIPTION. + + """ + # grain parameters + rMat_c = make_rmat_of_expmap(grain_params[:3]) + tVec_c = grain_params[3:6] + + # grab omega ranges from first imageseries + # + # WARNING: all imageseries AND all wedges within are assumed to have + # the same omega values; put in a check that they are all the same??? + oims0 = next(iter(imgser_dict.values())) + ome_ranges = [np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges] + if ome_period is None: + ims = next(iter(imgser_dict.values())) + ostart = ims.omega[0, 0] + ome_period = np.radians(ostart + np.r_[0., 360.]) + + # delta omega in DEGREES grabbed from first imageseries in the dict + delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] + + # make omega grid for frame expansion around reference frame + # in DEGREES + ndiv_ome, ome_del = make_tolerance_grid( + delta_ome, ome_tol, 1, adjust_window=True, + ) + + # generate structuring element for connected component labeling + if ndiv_ome == 1: + label_struct = ndimage.generate_binary_structure(2, 2) + else: + label_struct = ndimage.generate_binary_structure(3, 3) + + # simulate rotation series + sim_results = self.simulate_rotation_series( + plane_data, [grain_params, ], + eta_ranges=eta_ranges, + ome_ranges=ome_ranges, + ome_period=ome_period) + + # patch vertex generator (global for instrument) + tol_vec = 0.5*np.radians( + [-tth_tol, -eta_tol, + -tth_tol, eta_tol, + tth_tol, eta_tol, + tth_tol, -eta_tol]) + + # prepare output if requested + if filename is not None and output_format.lower() == 'hdf5': + this_filename = os.path.join(dirname, filename) + writer = GrainDataWriter_h5( + os.path.join(dirname, filename), + self.write_config(), grain_params) + + # ===================================================================== + # LOOP OVER PANELS + # ===================================================================== + iRefl = 0 + next_invalid_peak_id = -100 + compl = [] + output = dict.fromkeys(self.detectors) + for detector_id, panel in self.detectors.items(): + # initialize text-based output writer + if filename is not None and output_format.lower() == 'text': + output_dir = os.path.join( + dirname, detector_id + ) + os.makedirs(output_dir, exist_ok=True) + this_filename = os.path.join( + output_dir, filename + ) + writer = PatchDataWriter(this_filename) + + # grab panel + instr_cfg = panel.config_dict( + self.chi, self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style='hdf5' + ) + native_area = panel.pixel_area # pixel ref area + + # pull out the OmegaImageSeries for this panel from input dict + ome_imgser = _parse_imgser_dict(imgser_dict, + detector_id, + roi=panel.roi) + + # extract simulation results + sim_results_p = sim_results[detector_id] + hkl_ids = sim_results_p[0][0] + hkls_p = sim_results_p[1][0] + ang_centers = sim_results_p[2][0] + xy_centers = sim_results_p[3][0] + ang_pixel_size = sim_results_p[4][0] + + # now verify that full patch falls on detector... + # ???: strictly necessary? + # + # patch vertex array from sim + nangs = len(ang_centers) + patch_vertices = ( + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4*nangs, 2) + ome_dupl = np.tile( + ang_centers[:, 2], (4, 1) + ).T.reshape(len(patch_vertices), 1) + + # find vertices that all fall on the panel + det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( + np.hstack([patch_vertices, ome_dupl]), + panel.rmat, rMat_c, self.chi, + panel.tvec, tVec_c, self.tvec, + panel.distortion) + _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) + + # all vertices must be on... + patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) + patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] + + # re-filter... + hkl_ids = hkl_ids[patch_is_on] + hkls_p = hkls_p[patch_is_on, :] + ang_centers = ang_centers[patch_is_on, :] + xy_centers = xy_centers[patch_is_on, :] + ang_pixel_size = ang_pixel_size[patch_is_on, :] + + # TODO: add polygon testing right here! + # done + if check_only: + patch_output = [] + for i_pt, angs in enumerate(ang_centers): + # the evaluation omegas; + # expand about the central value using tol vector + ome_eval = np.degrees(angs[2]) + ome_del + + # ...vectorize the omega_to_frame function to avoid loop? + frame_indices = [ + ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval + ] + if -1 in frame_indices: + if not quiet: + msg = """ + window for (%d %d %d) falls outside omega range + """ % tuple(hkls_p[i_pt, :]) + print(msg) + continue + else: + these_vertices = patch_xys[i_pt] + ijs = panel.cartToPixel(these_vertices) + ii, jj = polygon(ijs[:, 0], ijs[:, 1]) + contains_signal = False + for i_frame in frame_indices: + contains_signal = contains_signal or np.any( + ome_imgser[i_frame][ii, jj] > threshold + ) + compl.append(contains_signal) + patch_output.append((ii, jj, frame_indices)) + else: + # make the tth,eta patches for interpolation + patches = xrdutil.make_reflection_patches( + instr_cfg, + ang_centers[:, :2], ang_pixel_size, + omega=ang_centers[:, 2], + tth_tol=tth_tol, eta_tol=eta_tol, + rmat_c=rMat_c, tvec_c=tVec_c, + npdiv=npdiv, quiet=True) + + # GRAND LOOP over reflections for this panel + patch_output = [] + for i_pt, patch in enumerate(patches): + + # strip relevant objects out of current patch + vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch + + prows, pcols = areas.shape + nrm_fac = areas/float(native_area) + nrm_fac = nrm_fac / np.min(nrm_fac) + + # grab hkl info + hkl = hkls_p[i_pt, :] + hkl_id = hkl_ids[i_pt] + + # edge arrays + tth_edges = vtx_angs[0][0, :] + delta_tth = tth_edges[1] - tth_edges[0] + eta_edges = vtx_angs[1][:, 0] + delta_eta = eta_edges[1] - eta_edges[0] + + # need to reshape eval pts for interpolation + xy_eval = np.vstack([xy_eval[0].flatten(), + xy_eval[1].flatten()]).T + + # the evaluation omegas; + # expand about the central value using tol vector + ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del + + # ???: vectorize the omega_to_frame function to avoid loop? + frame_indices = [ + ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval + ] + + if -1 in frame_indices: + if not quiet: + msg = """ + window for (%d%d%d) falls outside omega range + """ % tuple(hkl) + print(msg) + continue + else: + # initialize spot data parameters + # !!! maybe change these to nan to not fuck up writer + peak_id = next_invalid_peak_id + sum_int = np.nan + max_int = np.nan + meas_angs = np.nan*np.ones(3) + meas_xy = np.nan*np.ones(2) + + # quick check for intensity + contains_signal = False + patch_data_raw = [] + for i_frame in frame_indices: + tmp = ome_imgser[i_frame][ijs[0], ijs[1]] + contains_signal = contains_signal or np.any( + tmp > threshold + ) + patch_data_raw.append(tmp) + patch_data_raw = np.stack(patch_data_raw, axis=0) + compl.append(contains_signal) + + if contains_signal: + # initialize patch data array for intensities + if interp.lower() == 'bilinear': + patch_data = np.zeros( + (len(frame_indices), prows, pcols)) + for i, i_frame in enumerate(frame_indices): + patch_data[i] = \ + panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False + ).reshape(prows, pcols) # * nrm_fac + elif interp.lower() == 'nearest': + patch_data = patch_data_raw # * nrm_fac + else: + msg = "interpolation option " + \ + "'%s' not understood" + raise RuntimeError(msg % interp) + + # now have interpolated patch data... + labels, num_peaks = ndimage.label( + patch_data > threshold, structure=label_struct + ) + slabels = np.arange(1, num_peaks + 1) + + if num_peaks > 0: + peak_id = iRefl + props = regionprops(labels, patch_data) + coms = np.vstack( + [x.weighted_centroid for x in props]) + if num_peaks > 1: + center = np.r_[patch_data.shape]*0.5 + center_t = np.tile(center, (num_peaks, 1)) + com_diff = coms - center_t + closest_peak_idx = np.argmin( + np.sum(com_diff**2, axis=1) + ) + else: + closest_peak_idx = 0 + coms = coms[closest_peak_idx] + # meas_omes = \ + # ome_edges[0] + (0.5 + coms[0])*delta_ome + meas_omes = \ + ome_eval[0] + coms[0]*delta_ome + meas_angs = np.hstack( + [tth_edges[0] + (0.5 + coms[2])*delta_tth, + eta_edges[0] + (0.5 + coms[1])*delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ) + ] + ) + + # intensities + # - summed is 'integrated' over interpolated + # data + # - max is max of raw input data + sum_int = np.sum( + patch_data[ + labels == slabels[closest_peak_idx] + ] + ) + max_int = np.max( + patch_data_raw[ + labels == slabels[closest_peak_idx] + ] + ) + # ???: Should this only use labeled pixels? + # Those are segmented from interpolated data, + # not raw; likely ok in most cases. + + # need MEASURED xy coords + # FIXME: overload angles_to_cart? + gvec_c = angles_to_gvec( + meas_angs, + chi=self.chi, + rmat_c=rMat_c, + beam_vec=self.beam_vector) + rMat_s = make_sample_rmat( + self.chi, meas_angs[2] + ) + meas_xy = gvec_to_xy( + gvec_c, + panel.rmat, rMat_s, rMat_c, + panel.tvec, self.tvec, tVec_c, + beam_vec=self.beam_vector) + if panel.distortion is not None: + meas_xy = panel.distortion.apply_inverse( + np.atleast_2d(meas_xy) + ).flatten() + # FIXME: why is this suddenly necessary??? + meas_xy = meas_xy.squeeze() + else: + patch_data = patch_data_raw + + if peak_id < 0: + # The peak is invalid. + # Decrement the next invalid peak ID. + next_invalid_peak_id -= 1 + + # write output + if filename is not None: + if output_format.lower() == 'text': + writer.dump_patch( + peak_id, hkl_id, hkl, sum_int, max_int, + ang_centers[i_pt], meas_angs, + xy_centers[i_pt], meas_xy) + elif output_format.lower() == 'hdf5': + xyc_arr = xy_eval.reshape( + prows, pcols, 2 + ).transpose(2, 0, 1) + writer.dump_patch( + detector_id, iRefl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, np.radians(ome_eval), + xyc_arr, ijs, frame_indices, patch_data, + ang_centers[i_pt], xy_centers[i_pt], + meas_angs, meas_xy) + + if return_spot_list: + # Full output + xyc_arr = xy_eval.reshape( + prows, pcols, 2 + ).transpose(2, 0, 1) + _patch_output = [ + detector_id, iRefl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, np.radians(ome_eval), + xyc_arr, ijs, frame_indices, patch_data, + ang_centers[i_pt], xy_centers[i_pt], + meas_angs, meas_xy + ] + else: + # Trimmed output + _patch_output = [ + peak_id, hkl_id, hkl, sum_int, max_int, + ang_centers[i_pt], meas_angs, meas_xy + ] + patch_output.append(_patch_output) + iRefl += 1 + output[detector_id] = patch_output + if filename is not None and output_format.lower() == 'text': + writer.close() + if filename is not None and output_format.lower() == 'hdf5': + writer.close() + return compl, output + + def update_memoization_sizes(self): + # Resize all known memoization functions to have a cache at least + # the size of the number of detectors. + all_panels = list(self.detectors.values()) + PlanarDetector.update_memoization_sizes(all_panels) + CylindricalDetector.update_memoization_sizes(all_panels) + + def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: + """calculate the transmission from the + filter and polymer coating. the inverse of this + number is the intensity correction that needs + to be applied. actual computation is done inside + the detector class + """ + if rMat_s is None: + rMat_s = ct.identity_3x3 + + energy = self.beam_energy + transmissions = {} + for det_name, det in self.detectors.items(): + transmission_filter, transmission_phosphor = ( + det.calc_filter_coating_transmission(energy)) + + transmission = transmission_filter * transmission_phosphor + + if self.physics_package is not None: + transmission_physics_package = ( + det.calc_physics_package_transmission( + energy, rMat_s, self.physics_package)) + effective_pinhole_area = det.calc_effective_pinhole_area( + self.physics_package) + + transmission = ( + transmission * + transmission_physics_package * + effective_pinhole_area + ) + + transmissions[det_name] = transmission + return transmissions + +# ============================================================================= +# UTILITIES +# ============================================================================= + + +class PatchDataWriter(object): + """Class for dumping Bragg reflection data.""" + + def __init__(self, filename): + self._delim = ' ' + header_items = ( + '# ID', 'PID', + 'H', 'K', 'L', + 'sum(int)', 'max(int)', + 'pred tth', 'pred eta', 'pred ome', + 'meas tth', 'meas eta', 'meas ome', + 'pred X', 'pred Y', + 'meas X', 'meas Y' + ) + self._header = self._delim.join([ + self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]), + self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), + self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) + ]) + if isinstance(filename, IOBase): + self.fid = filename + else: + self.fid = open(filename, 'w') + print(self._header, file=self.fid) + + def __del__(self): + self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, peak_id, hkl_id, + hkl, spot_int, max_int, + pangs, mangs, pxy, mxy): + """ + !!! maybe need to check that last four inputs are arrays + """ + if mangs is None: + spot_int = np.nan + max_int = np.nan + mangs = np.nan*np.ones(3) + mxy = np.nan*np.ones(2) + + res = [int(peak_id), int(hkl_id)] \ + + np.array(hkl, dtype=int).tolist() \ + + [spot_int, max_int] \ + + pangs.tolist() \ + + mangs.tolist() \ + + pxy.tolist() \ + + mxy.tolist() + + output_str = self._delim.join( + [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + ) + print(output_str, file=self.fid) + return output_str + + +class GrainDataWriter(object): + """Class for dumping grain data.""" + + def __init__(self, filename=None, array=None): + """Writes to either file or np array + + Array must be initialized with number of rows to be written. + """ + if filename is None and array is None: + raise RuntimeError( + 'GrainDataWriter must be specified with filename or array') + + self.array = None + self.fid = None + + # array supersedes filename + if array is not None: + assert array.shape[1] == 21, \ + f'grain data table must have 21 columns not {array.shape[21]}' + self.array = array + self._array_row = 0 + return + + self._delim = ' ' + header_items = ( + '# grain ID', 'completeness', 'chi^2', + 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', + 't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]', + 'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]', + 'inv(V_s)[1,2]*sqrt(2)', + 'inv(V_s)[0,2]*sqrt(2)', + 'inv(V_s)[0,1]*sqrt(2)', + 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]', + 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]' + ) + self._header = self._delim.join( + [self._delim.join( + np.tile('{:<12}', 3) + ).format(*header_items[:3]), + self._delim.join( + np.tile('{:<23}', len(header_items) - 3) + ).format(*header_items[3:])] + ) + if isinstance(filename, IOBase): + self.fid = filename + else: + self.fid = open(filename, 'w') + print(self._header, file=self.fid) + + def __del__(self): + self.close() + + def close(self): + if self.fid is not None: + self.fid.close() + + def dump_grain(self, grain_id, completeness, chisq, + grain_params): + assert len(grain_params) == 12, \ + "len(grain_params) must be 12, not %d" % len(grain_params) + + # extract strain + emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) + evec = mutil.symmToVecMV(emat, scale=False) + + res = [int(grain_id), completeness, chisq] \ + + grain_params.tolist() \ + + evec.tolist() + + if self.array is not None: + row = self._array_row + assert row < self.array.shape[0], \ + f'invalid row {row} in array table' + self.array[row] = res + self._array_row += 1 + return res + + # (else) format and write to file + output_str = self._delim.join( + [self._delim.join( + ['{:<12d}', '{:<12f}', '{:<12e}'] + ).format(*res[:3]), + self._delim.join( + np.tile('{:<23.16e}', len(res) - 3) + ).format(*res[3:])] + ) + print(output_str, file=self.fid) + return output_str + + +class GrainDataWriter_h5(object): + """Class for dumping grain results to an HDF5 archive. + + TODO: add material spec + """ + + def __init__(self, filename, instr_cfg, grain_params, use_attr=False): + if isinstance(filename, h5py.File): + self.fid = filename + else: + self.fid = h5py.File(filename + ".hdf5", "w") + icfg = dict(instr_cfg) + + # add instrument groups and attributes + self.instr_grp = self.fid.create_group('instrument') + unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr) + + # add grain group + self.grain_grp = self.fid.create_group('grain') + rmat_c = make_rmat_of_expmap(grain_params[:3]) + tvec_c = np.array(grain_params[3:6]).flatten() + vinv_s = np.array(grain_params[6:]).flatten() + vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) + + if use_attr: # attribute version + self.grain_grp.attrs.create('rmat_c', rmat_c) + self.grain_grp.attrs.create('tvec_c', tvec_c) + self.grain_grp.attrs.create('inv(V)_s', vinv_s) + self.grain_grp.attrs.create('vmat_s', vmat_s) + else: # dataset version + self.grain_grp.create_dataset('rmat_c', data=rmat_c) + self.grain_grp.create_dataset('tvec_c', data=tvec_c) + self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) + self.grain_grp.create_dataset('vmat_s', data=vmat_s) + + data_key = 'reflection_data' + self.data_grp = self.fid.create_group(data_key) + + for det_key in self.instr_grp['detectors'].keys(): + self.data_grp.create_group(det_key) + + # FIXME: throws exception when called after close method + # def __del__(self): + # self.close() + + def close(self): + self.fid.close() + + def dump_patch(self, panel_id, + i_refl, peak_id, hkl_id, hkl, + tth_edges, eta_edges, ome_centers, + xy_centers, ijs, frame_indices, + spot_data, pangs, pxy, mangs, mxy, gzip=1): + """ + to be called inside loop over patches + + default GZIP level for data arrays is 1 + """ + fi = np.array(frame_indices, dtype=int) + + panel_grp = self.data_grp[panel_id] + spot_grp = panel_grp.create_group("spot_%05d" % i_refl) + spot_grp.attrs.create('peak_id', int(peak_id)) + spot_grp.attrs.create('hkl_id', int(hkl_id)) + spot_grp.attrs.create('hkl', np.array(hkl, dtype=int)) + spot_grp.attrs.create('predicted_angles', pangs) + spot_grp.attrs.create('predicted_xy', pxy) + if mangs is None: + mangs = np.nan*np.ones(3) + spot_grp.attrs.create('measured_angles', mangs) + if mxy is None: + mxy = np.nan*np.ones(3) + spot_grp.attrs.create('measured_xy', mxy) + + # get centers crds from edge arrays + # FIXME: export full coordinate arrays, or just center vectors??? + # + # ome_crd, eta_crd, tth_crd = np.meshgrid( + # ome_centers, + # centers_of_edge_vec(eta_edges), + # centers_of_edge_vec(tth_edges), + # indexing='ij') + # + # ome_dim, eta_dim, tth_dim = spot_data.shape + + # !!! for now just exporting center vectors for spot_data + tth_crd = centers_of_edge_vec(tth_edges) + eta_crd = centers_of_edge_vec(eta_edges) + + shuffle_data = True # reduces size by 20% + spot_grp.create_dataset('tth_crd', data=tth_crd, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('eta_crd', data=eta_crd, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('ome_crd', data=ome_centers, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('xy_centers', data=xy_centers, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('ij_centers', data=ijs, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('frame_indices', data=fi, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + spot_grp.create_dataset('intensities', data=spot_data, + compression="gzip", compression_opts=gzip, + shuffle=shuffle_data) + return + + +class GenerateEtaOmeMaps(object): + """ + eta-ome map class derived from new image_series and YAML config + + ...for now... + + must provide: + + self.dataStore + self.planeData + self.iHKLList + self.etaEdges # IN RADIANS + self.omeEdges # IN RADIANS + self.etas # IN RADIANS + self.omegas # IN RADIANS + + """ + + def __init__(self, image_series_dict, instrument, plane_data, + active_hkls=None, eta_step=0.25, threshold=None, + ome_period=(0, 360)): + """ + image_series must be OmegaImageSeries class + instrument_params must be a dict (loaded from yaml spec) + active_hkls must be a list (required for now) + + FIXME: get rid of omega period; should get it from imageseries + """ + + self._planeData = plane_data + + # ???: change name of iHKLList? + # ???: can we change the behavior of iHKLList? + if active_hkls is None: + self._iHKLList = plane_data.getHKLID( + plane_data.hkls, master=True + ) + n_rings = len(self._iHKLList) + else: + assert hasattr(active_hkls, '__len__'), \ + "active_hkls must be an iterable with __len__" + self._iHKLList = active_hkls + n_rings = len(active_hkls) + + # grab a det key and corresponding imageseries (first will do) + # !!! assuming that the imageseries for all panels + # have the same length and omegas + det_key, this_det_ims = next(iter(image_series_dict.items())) + + # handle omegas + # !!! for multi wedge, enforncing monotonicity + # !!! wedges also cannot overlap or span more than 360 + omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES + delta_ome = omegas_array[0][-1] - omegas_array[0][0] + frame_mask = None + ome_period = omegas_array[0, 0] + np.r_[0., 360.] # !!! be careful + if this_det_ims.omegawedges.nwedges > 1: + delta_omes = [(i['ostop'] - i['ostart'])/i['nsteps'] + for i in this_det_ims.omegawedges.wedges] + check_wedges = mutil.uniqueVectors(np.atleast_2d(delta_omes), + tol=1e-6).squeeze() + assert check_wedges.size == 1, \ + "all wedges must have the same delta omega to 1e-6" + # grab representative delta ome + # !!! assuming positive delta consistent with OmegaImageSeries + delta_ome = delta_omes[0] + + # grab full-range start/stop + # !!! be sure to map to the same period to enable arithmatic + # ??? safer to do this way rather than just pulling from + # the omegas attribute? + owedges = this_det_ims.omegawedges.wedges + ostart = owedges[0]['ostart'] # !!! DEGREES + ostop = float( + mapAngle(owedges[-1]['ostop'], ome_period, units='degrees') + ) + # compute total nsteps + # FIXME: need check for roundoff badness + nsteps = int((ostop - ostart)/delta_ome) + ome_edges_full = np.linspace( + ostart, ostop, num=nsteps+1, endpoint=True + ) + omegas_array = np.vstack( + [ome_edges_full[:-1], ome_edges_full[1:]] + ).T + ome_centers = np.average(omegas_array, axis=1) + + # use OmegaImageSeries method to determine which bins have data + # !!! this array has -1 outside a wedge + # !!! again assuming the valid frame order increases monotonically + frame_mask = np.array( + [this_det_ims.omega_to_frame(ome)[0] != -1 + for ome in ome_centers] + ) + + # ???: need to pass a threshold? + eta_mapping, etas = instrument.extract_polar_maps( + plane_data, image_series_dict, + active_hkls=active_hkls, threshold=threshold, + tth_tol=None, eta_tol=eta_step) + + # for convenience grab map shape from first + map_shape = next(iter(eta_mapping.values())).shape[1:] + + # pack all detectors with masking + # FIXME: add omega masking + data_store = [] + for i_ring in range(n_rings): + # first handle etas + full_map = np.zeros(map_shape, dtype=float) + nan_mask_full = np.zeros( + (len(eta_mapping), map_shape[0], map_shape[1]) + ) + i_p = 0 + for det_key, eta_map in eta_mapping.items(): + nan_mask = ~np.isnan(eta_map[i_ring]) + nan_mask_full[i_p] = nan_mask + full_map[nan_mask] += eta_map[i_ring][nan_mask] + i_p += 1 + re_nan_these = np.sum(nan_mask_full, axis=0) == 0 + full_map[re_nan_these] = np.nan + + # now omegas + if frame_mask is not None: + # !!! must expand row dimension to include + # skipped omegas + tmp = np.ones((len(frame_mask), map_shape[1]))*np.nan + tmp[frame_mask, :] = full_map + full_map = tmp + data_store.append(full_map) + self._dataStore = data_store + + # set required attributes + self._omegas = mapAngle( + np.radians(np.average(omegas_array, axis=1)), + np.radians(ome_period) + ) + self._omeEdges = mapAngle( + np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), + np.radians(ome_period) + ) + + # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the + # indexer to work properly + if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf: + # !!! SIGNED delta ome + del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0]) + self._omeEdges[-1] = self._omeEdges[-2] + del_ome + + # handle etas + # WARNING: unlinke the omegas in imageseries metadata, + # these are in RADIANS and represent bin centers + self._etaEdges = etas + self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + + @property + def dataStore(self): + return self._dataStore + + @property + def planeData(self): + return self._planeData + + @property + def iHKLList(self): + return np.atleast_1d(self._iHKLList).flatten() + + @property + def etaEdges(self): + return self._etaEdges + + @property + def omeEdges(self): + return self._omeEdges + + @property + def etas(self): + return self._etas + + @property + def omegas(self): + return self._omegas + + def save(self, filename): + xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename) + + +def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): + # mark pixels in the spec'd tth range + pixels_in_tthr = np.logical_and( + ptth >= tthr[0], ptth <= tthr[1] + ) + + # catch case where ring isn't on detector + if not np.any(pixels_in_tthr): + return None + + pixel_ids = np.where(pixels_in_tthr) + + # grab relevant eta coords using histogram + pixel_etas = peta[pixel_ids] + reta_hist = histogram(pixel_etas, eta_edges) + bins_on_detector = np.where(reta_hist)[0] + + return pixel_etas, eta_edges, pixel_ids, bins_on_detector + + +def run_fast_histogram(x, bins, weights=None): + return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), + weights=weights) + + +def run_numpy_histogram(x, bins, weights=None): + return histogram1d(x, bins=bins, weights=weights)[0] + + +histogram = run_fast_histogram if fast_histogram else run_numpy_histogram + + +def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): + for i_row in range(*rows): + image = ims[i_row] + + # handle threshold if specified + if threshold is not None: + # !!! NaNs get preserved + image = np.array(image) + image[image < threshold] = 0. + + for i_r, tthr in enumerate(tth_ranges): + this_map = ring_maps[i_r] + params = ring_params[i_r] + if not params: + # We are supposed to skip this ring... + continue + + # Unpack the params + pixel_etas, eta_edges, pixel_ids, bins_on_detector = params + result = histogram(pixel_etas, eta_edges, weights=image[pixel_ids]) + + # Note that this preserves nan values for bins not on the detector. + this_map[i_row, bins_on_detector] = result[bins_on_detector] + + +def _extract_detector_line_positions(iter_args, plane_data, tth_tol, + eta_tol, eta_centers, npdiv, + collapse_tth, collapse_eta, + do_interpolation, do_fitting, + fitting_kwargs, tth_distortion, + max_workers): + panel, instr_cfg, images, pbp = iter_args + + if images.ndim == 2: + images = np.tile(images, (1, 1, 1)) + elif images.ndim != 3: + raise RuntimeError("images must be 2- or 3-d") + + # make rings + # !!! adding tth_distortion pass-through; comes in as dict over panels + tth_distr_cls = None + if tth_distortion is not None: + tth_distr_cls = tth_distortion[panel.name] + + pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( + plane_data, merge_hkls=True, + delta_tth=tth_tol, delta_eta=eta_tol, + eta_list=eta_centers, tth_distortion=tth_distr_cls) + + tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) + + # !!! this is only needed if doing fitting + if isinstance(plane_data, PlaneData): + tth_idx, tth_ranges = plane_data.getMergedRanges(cullDupl=True) + tth_ref = plane_data.getTTh() + tth0 = [np.degrees(tth_ref[i]) for i in tth_idx] + else: + tth0 = plane_data + + # ================================================================= + # LOOP OVER RING SETS + # ================================================================= + pbar_rings = partial(tqdm, total=len(pow_angs), desc="Ringset", + position=pbp) + + kwargs = { + 'instr_cfg': instr_cfg, + 'panel': panel, + 'eta_tol': eta_tol, + 'npdiv': npdiv, + 'collapse_tth': collapse_tth, + 'collapse_eta': collapse_eta, + 'images': images, + 'do_interpolation': do_interpolation, + 'do_fitting': do_fitting, + 'fitting_kwargs': fitting_kwargs, + 'tth_distortion': tth_distr_cls, + } + func = partial(_extract_ring_line_positions, **kwargs) + iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) + with ProcessPoolExecutor(mp_context=constants.mp_context, + max_workers=max_workers) as executor: + return list(pbar_rings(executor.map(func, iter_arg))) + + +def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, + collapse_tth, collapse_eta, images, + do_interpolation, do_fitting, fitting_kwargs, + tth_distortion): + """ + Extracts data for a single Debye-Scherrer ring . + + Parameters + ---------- + iter_args : tuple + (angs [radians], + xys [mm], + tth_tol [deg], + this_tth0 [deg]) + instr_cfg : TYPE + DESCRIPTION. + panel : TYPE + DESCRIPTION. + eta_tol : TYPE + DESCRIPTION. + npdiv : TYPE + DESCRIPTION. + collapse_tth : TYPE + DESCRIPTION. + collapse_eta : TYPE + DESCRIPTION. + images : TYPE + DESCRIPTION. + do_interpolation : TYPE + DESCRIPTION. + do_fitting : TYPE + DESCRIPTION. + fitting_kwargs : TYPE + DESCRIPTION. + tth_distortion : TYPE + DESCRIPTION. + + Yields + ------ + patch_data : TYPE + DESCRIPTION. + + """ + # points are already checked to fall on detector + angs, xys, tth_tol, this_tth0 = iter_args + + # SS 01/31/25 noticed some nans in xys even after clipping + # going to do another round of masking to get rid of those + nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) + nan_mask = np.logical_or.reduce(nan_mask, 1) + if angs.ndim > 1 and xys.ndim > 1: + angs = angs[nan_mask,:] + xys = xys[nan_mask, :] + + n_images = len(images) + native_area = panel.pixel_area + + # make the tth,eta patches for interpolation + patches = xrdutil.make_reflection_patches( + instr_cfg, angs, panel.angularPixelSize(xys), + tth_tol=tth_tol, eta_tol=eta_tol, npdiv=npdiv, quiet=True) + + # loop over patches + # FIXME: fix initialization + if collapse_tth: + patch_data = np.zeros((len(angs), n_images)) + else: + patch_data = [] + for i_p, patch in enumerate(patches): + # strip relevant objects out of current patch + vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch + + # need to reshape eval pts for interpolation + xy_eval = np.vstack([ + xys_eval[0].flatten(), + xys_eval[1].flatten()]).T + + _, on_panel = panel.clip_to_panel(xy_eval) + + if np.any(~on_panel): + continue + + if collapse_tth: + ang_data = (vtx_angs[0][0, [0, -1]], + vtx_angs[1][[0, -1], 0]) + elif collapse_eta: + # !!! yield the tth bin centers + tth_centers = np.average( + np.vstack( + [vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]] + ), + axis=0 + ) + ang_data = (tth_centers, + angs[i_p][-1]) + if do_fitting: + fit_data = [] + else: + ang_data = vtx_angs + + prows, pcols = areas.shape + area_fac = areas/float(native_area) + + # interpolate + if not collapse_tth: + ims_data = [] + for j_p in np.arange(len(images)): + # catch interpolation type + image = images[j_p] + if do_interpolation: + p_img = panel.interpolate_bilinear( + xy_eval, + image, + ).reshape(prows, pcols)*area_fac + else: + p_img = image[ijs[0], ijs[1]]*area_fac + + # catch flat spectrum data, which will cause + # fitting to fail. + # ???: best here, or make fitting handle it? + mxval = np.max(p_img) + mnval = np.min(p_img) + if mxval == 0 or (1. - mnval/mxval) < 0.01: + continue + + # catch collapsing options + if collapse_tth: + patch_data[i_p, j_p] = np.average(p_img) + # ims_data.append(np.sum(p_img)) + else: + if collapse_eta: + lineout = np.average(p_img, axis=0) + ims_data.append(lineout) + if do_fitting: + if tth_distortion is not None: + # must correct tth0 + tmp = tth_distortion.apply( + panel.angles_to_cart( + np.vstack( + [np.radians(this_tth0), + np.tile(ang_data[-1], len(this_tth0))] + ).T + ), + return_nominal=True) + pk_centers = np.degrees(tmp[:, 0]) + else: + pk_centers = this_tth0 + kwargs = { + 'tth_centers': np.degrees(tth_centers), + 'lineout': lineout, + 'tth_pred': pk_centers, + **fitting_kwargs, + } + result = fit_ring(**kwargs) + fit_data.append(result) + else: + ims_data.append(p_img) + if not collapse_tth: + output = [ang_data, ims_data] + if do_fitting: + output.append(fit_data) + patch_data.append(output) + + return patch_data + + +DETECTOR_TYPES = { + 'planar': PlanarDetector, + 'cylindrical': CylindricalDetector, +} + + +class BufferShapeMismatchError(RuntimeError): + # This is raised when the buffer shape does not match the detector shape + pass + + +@contextmanager +def switch_xray_source(instr: HEDMInstrument, xray_source: Optional[str]): + if xray_source is None: + # If the x-ray source is None, leave it as the current active one + yield + return + + prev_beam_name = instr.active_beam_name + instr.active_beam_name = xray_source + try: + yield + finally: + instr.active_beam_name = prev_beam_name diff --git a/hexrd/powder/material/crystallography.py b/hexrd/powder/material/crystallography.py new file mode 100644 index 000000000..574225e67 --- /dev/null +++ b/hexrd/powder/material/crystallography.py @@ -0,0 +1,2255 @@ +# -*- coding: utf-8 -*- +# ============================================================================= +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================================= +import re +import copy +import csv +import os +from math import pi +from typing import Optional, Union, Dict, List, Tuple + +import numpy as np + +from hexrd.material.unitcell import unitcell +from hexrd.deprecation import deprecated +from hexrd import constants +from hexrd.matrixutil import unitVector +from hexrd.rotations import ( + rotMatOfExpMap, + mapAngle, + applySym, + ltypeOfLaueGroup, + quatOfLaueGroup, +) +from hexrd.transforms import xfcapi +from hexrd import valunits +from hexrd.valunits import toFloat +from hexrd.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf + +"""module vars""" + +# units +dUnit = 'angstrom' +outputDegrees = False +outputDegrees_bak = outputDegrees + + +def hklToStr(hkl: np.ndarray) -> str: + """ + Converts hkl representation to a string. + + Parameters + ---------- + hkl : np.ndarray + 3 element list of h, k, and l values (Miller indices). + + Returns + ------- + str + Space-separated string representation of h, k, and l values. + + """ + return re.sub(r'[\[\]\(\)\{\},]', '', str(hkl)) + + +def tempSetOutputDegrees(val: bool) -> None: + """ + Set the global outputDegrees flag temporarily. Can be reverted with + revertOutputDegrees(). + + Parameters + ---------- + val : bool + True to output angles in degrees, False to output angles in radians. + + Returns + ------- + None + + """ + global outputDegrees, outputDegrees_bak + outputDegrees_bak = outputDegrees + outputDegrees = val + + +def revertOutputDegrees() -> None: + """ + Revert the effect of tempSetOutputDegrees(), resetting the outputDegrees + flag to its previous value (True to output in degrees, False for radians). + + Returns + ------- + None + """ + global outputDegrees, outputDegrees_bak + outputDegrees = outputDegrees_bak + + +def cosineXform( + a: np.ndarray, b: np.ndarray, c: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + """ + Spherical trig transform to take alpha, beta, gamma to expressions + for cos(alpha*). See ref below. + + [1] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector algebra and + the relations between direct and reciprocal lattice quantities''. Acta + Cryst. (1968), A24, 247--248 + + Parameters + ---------- + a : np.ndarray + List of alpha angle values (radians). + b : np.ndarray + List of beta angle values (radians). + c : np.ndarray + List of gamma angle values (radians). + + Returns + ------- + np.ndarray + List of cos(alpha*) values. + np.ndarray + List of sin(alpha*) values. + + """ + cosar = (np.cos(b) * np.cos(c) - np.cos(a)) / (np.sin(b) * np.sin(c)) + sinar = np.sqrt(1 - cosar**2) + return cosar, sinar + + +def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: + """ + Convert an energy value to a wavelength. If argument has units of length + or energy, will convert to globally specified unit type for wavelength + (dUnit). If argument is a scalar, assumed input units are keV. + """ + if isinstance(arg, valunits.valWUnit): + # arg is a valunits.valWUnit object + if arg.isLength(): + return arg.getVal(dUnit) + elif arg.isEnergy(): + e = arg.getVal('keV') + return valunits.valWUnit( + 'wavelength', 'length', constants.keVToAngstrom(e), 'angstrom' + ).getVal(dUnit) + else: + raise RuntimeError('do not know what to do with ' + str(arg)) + else: + # !!! assuming arg is in keV + return valunits.valWUnit( + 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' + ).getVal(dUnit) + +def latticeParameters(lvec): + """ + Generates direct and reciprocal lattice vector components in a + crystal-relative RHON basis, X. The convention for fixing X to the + lattice is such that a || x1 and c* || x3, where a and c* are + direct and reciprocal lattice vectors, respectively. + """ + lnorm = np.sqrt(np.sum(lvec**2, 0)) + + a = lnorm[0] + b = lnorm[1] + c = lnorm[2] + + ahat = lvec[:, 0] / a + bhat = lvec[:, 1] / b + chat = lvec[:, 2] / c + + gama = np.arccos(np.dot(ahat, bhat)) + beta = np.arccos(np.dot(ahat, chat)) + alfa = np.arccos(np.dot(bhat, chat)) + if outputDegrees: + gama = r2d * gama + beta = r2d * beta + alfa = r2d * alfa + + return [a, b, c, alfa, beta, gama] + +def latticePlanes( + hkls: np.ndarray, + lparms: np.ndarray, + ltype: Optional[str] = 'cubic', + wavelength: Optional[float] = 1.54059292, + strainMag: Optional[float] = None, +) -> Dict[str, np.ndarray]: + """ + Generates lattice plane data in the direct lattice for a given set + of Miller indices. Vector components are written in the + crystal-relative RHON basis, X. The convention for fixing X to the + lattice is such that a || x1 and c* || x3, where a and c* are + direct and reciprocal lattice vectors, respectively. + + USAGE: + + planeInfo = latticePlanes(hkls, lparms, **kwargs) + + INPUTS: + + 1) hkls (3 x n float ndarray) is the array of Miller indices for + the planes of interest. The vectors are assumed to be + concatenated along the 1-axis (horizontal). + + 2) lparms (1 x m float list) is the array of lattice parameters, + where m depends on the symmetry group (see below). + + The following optional arguments are recognized: + + 3) ltype=(string) is a string representing the symmetry type of + the implied Laue group. The 11 available choices are shown + below. The default value is 'cubic'. Note that each group + expects a lattice parameter array of the indicated length + and order. + + latticeType lparms + ----------- ------------ + 'cubic' a + 'hexagonal' a, c + 'trigonal' a, c + 'rhombohedral' a, alpha (in degrees) + 'tetragonal' a, c + 'orthorhombic' a, b, c + 'monoclinic' a, b, c, beta (in degrees) + 'triclinic' a, b, c, alpha, beta, gamma (in degrees) + + 4) wavelength= is a value represented the wavelength in + Angstroms to calculate bragg angles for. The default value + is for Cu K-alpha radiation (1.54059292 Angstrom) + + 5) strainMag=None + + OUTPUTS: + + 1) planeInfo is a dictionary containing the following keys/items: + + normals (3, n) double array array of the components to the + unit normals for each {hkl} in + X (horizontally concatenated) + + dspacings (n, ) double array array of the d-spacings for + each {hkl} + + tThetas (n, ) double array array of the Bragg angles for + each {hkl} relative to the + specified wavelength + + NOTES: + + *) This function is effectively a wrapper to 'latticeVectors'. + See 'help(latticeVectors)' for additional info. + + *) Lattice plane d-spacings are calculated from the reciprocal + lattice vectors specified by {hkl} as shown in Appendix 1 of + [1]. + + REFERENCES: + + [1] B. D. Cullity, ``Elements of X-Ray Diffraction, 2 + ed.''. Addison-Wesley Publishing Company, Inc., 1978. ISBN + 0-201-01174-3 + + """ + location = 'latticePlanes' + + assert ( + hkls.shape[0] == 3 + ), f"hkls aren't column vectors in call to '{location}'!" + + tag = ltype + wlen = wavelength + + # get B + L = latticeVectors(lparms, tag) + + # get G-vectors -- reciprocal vectors in crystal frame + G = np.dot(L['B'], hkls) + + # magnitudes + d = 1 / np.sqrt(np.sum(G**2, 0)) + + aconv = 1.0 + if outputDegrees: + aconv = r2d + + # two thetas + sth = wlen / 2.0 / d + mask = np.abs(sth) < 1.0 + tth = np.zeros(sth.shape) + + tth[~mask] = np.nan + tth[mask] = aconv * 2.0 * np.arcsin(sth[mask]) + + p = dict(normals=unitVector(G), dspacings=d, tThetas=tth) + + if strainMag is not None: + p['tThetasLo'] = np.zeros(sth.shape) + p['tThetasHi'] = np.zeros(sth.shape) + + mask = (np.abs(wlen / 2.0 / (d * (1.0 + strainMag))) < 1.0) & ( + np.abs(wlen / 2.0 / (d * (1.0 - strainMag))) < 1.0 + ) + + p['tThetasLo'][~mask] = np.nan + p['tThetasHi'][~mask] = np.nan + + p['tThetasLo'][mask] = ( + aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 + strainMag))) + ) + p['tThetasHi'][mask] = ( + aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 - strainMag))) + ) + + return p + + +def latticeVectors( + lparms: np.ndarray, + tag: Optional[str] = 'cubic', + radians: Optional[bool] = False, +) -> Dict[str, Union[np.ndarray, float]]: + """ + Generates direct and reciprocal lattice vector components in a + crystal-relative RHON basis, X. The convention for fixing X to the + lattice is such that a || x1 and c* || x3, where a and c* are + direct and reciprocal lattice vectors, respectively. + + USAGE: + + lattice = LatticeVectors(lparms, ) + + INPUTS: + + 1) lparms (1 x n float list) is the array of lattice parameters, + where n depends on the symmetry group (see below). + + 2) tag (string) is a case-insensitive string representing the + symmetry type of the implied Laue group. The 11 available choices + are shown below. The default value is 'cubic'. Note that each + group expects a lattice parameter array of the indicated length + and order. + + latticeType lparms + ----------- ------------ + 'cubic' a + 'hexagonal' a, c + 'trigonal' a, c + 'rhombohedral' a, alpha (in degrees) + 'tetragonal' a, c + 'orthorhombic' a, b, c + 'monoclinic' a, b, c, beta (in degrees) + 'triclinic' a, b, c, alpha, beta, gamma (in degrees) + + The following optional arguments are recognized: + + 3) radians= is a boolean flag indicating usage of radians rather + than degrees, defaults to false. + + OUTPUTS: + + 1) lattice is a dictionary containing the following keys/items: + + F (3, 3) double array transformation matrix taking + componenents in the direct + lattice (i.e. {uvw}) to the + reference, X + + B (3, 3) double array transformation matrix taking + componenents in the reciprocal + lattice (i.e. {hkl}) to X + + BR (3, 3) double array transformation matrix taking + componenents in the reciprocal + lattice to the Fable reference + frame (see notes) + + U0 (3, 3) double array transformation matrix + (orthogonal) taking + componenents in the + Fable reference frame to X + + vol double the unit cell volume + + + dparms (6, ) double list the direct lattice parameters: + [a b c alpha beta gamma] + + rparms (6, ) double list the reciprocal lattice + parameters: + [a* b* c* alpha* beta* gamma*] + + NOTES: + + *) The conventions used for assigning a RHON basis, + X -> {x1, x2, x3}, to each point group are consistent with + those published in Appendix B of [1]. Namely: a || x1 and + c* || x3. This differs from the convention chosen by the Fable + group, where a* || x1 and c || x3 [2]. + + *) The unit cell angles are defined as follows: + alpha=acos(b'*c/|b||c|), beta=acos(c'*a/|c||a|), and + gamma=acos(a'*b/|a||b|). + + *) The reciprocal lattice vectors are calculated using the + crystallographic convention, where the prefactor of 2*pi is + omitted. In this convention, the reciprocal lattice volume is + 1/V. + + *) Several relations from [3] were employed in the component + calculations. + + REFERENCES: + + [1] J. F. Nye, ``Physical Properties of Crystals: Their + Representation by Tensors and Matrices''. Oxford University + Press, 1985. ISBN 0198511655 + + [2] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, + ``Tracking: a method for structural characterization of grains + in powders or polycrystals''. J. Appl. Cryst. (2001). 34, + 744--750 + + [3] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector + algebra and the relations between direct and reciprocal + lattice quantities''. Acta Cryst. (1968), A24, 247--248 + + + """ + + # build index for sorting out lattice parameters + lattStrings = [ + 'cubic', + 'hexagonal', + 'trigonal', + 'rhombohedral', + 'tetragonal', + 'orthorhombic', + 'monoclinic', + 'triclinic', + ] + + if radians: + aconv = 1.0 + else: + aconv = pi / 180.0 # degToRad + deg90 = pi / 2.0 + deg120 = 2.0 * pi / 3.0 + # + if tag == lattStrings[0]: + # cubic + cellparms = np.r_[np.tile(lparms[0], (3,)), deg90 * np.ones((3,))] + elif tag == lattStrings[1] or tag == lattStrings[2]: + # hexagonal | trigonal (hex indices) + cellparms = np.r_[ + lparms[0], lparms[0], lparms[1], deg90, deg90, deg120 + ] + elif tag == lattStrings[3]: + # rhombohedral + cellparms = np.r_[ + np.tile(lparms[0], (3,)), np.tile(aconv * lparms[1], (3,)) + ] + elif tag == lattStrings[4]: + # tetragonal + cellparms = np.r_[lparms[0], lparms[0], lparms[1], deg90, deg90, deg90] + elif tag == lattStrings[5]: + # orthorhombic + cellparms = np.r_[lparms[0], lparms[1], lparms[2], deg90, deg90, deg90] + elif tag == lattStrings[6]: + # monoclinic + cellparms = np.r_[ + lparms[0], lparms[1], lparms[2], deg90, aconv * lparms[3], deg90 + ] + elif tag == lattStrings[7]: + # triclinic + cellparms = np.r_[ + lparms[0], + lparms[1], + lparms[2], + aconv * lparms[3], + aconv * lparms[4], + aconv * lparms[5], + ] + else: + raise RuntimeError(f'lattice tag "{tag}" is not recognized') + + alpha, beta, gamma = cellparms[3:6] + cosalfar, sinalfar = cosineXform(alpha, beta, gamma) + + a = cellparms[0] * np.r_[1, 0, 0] + b = cellparms[1] * np.r_[np.cos(gamma), np.sin(gamma), 0] + c = ( + cellparms[2] + * np.r_[ + np.cos(beta), -cosalfar * np.sin(beta), sinalfar * np.sin(beta) + ] + ) + + ad = np.sqrt(np.sum(a**2)) + bd = np.sqrt(np.sum(b**2)) + cd = np.sqrt(np.sum(c**2)) + + # Cell volume + V = np.dot(a, np.cross(b, c)) + + # F takes components in the direct lattice to X + F = np.c_[a, b, c] + + # Reciprocal lattice vectors + astar = np.cross(b, c) / V + bstar = np.cross(c, a) / V + cstar = np.cross(a, b) / V + + # and parameters + ar = np.sqrt(np.sum(astar**2)) + br = np.sqrt(np.sum(bstar**2)) + cr = np.sqrt(np.sum(cstar**2)) + + alfar = np.arccos(np.dot(bstar, cstar) / br / cr) + betar = np.arccos(np.dot(cstar, astar) / cr / ar) + gamar = np.arccos(np.dot(astar, bstar) / ar / br) + + # B takes components in the reciprocal lattice to X + B = np.c_[astar, bstar, cstar] + + cosalfar2, sinalfar2 = cosineXform(alfar, betar, gamar) + + afable = ar * np.r_[1, 0, 0] + bfable = br * np.r_[np.cos(gamar), np.sin(gamar), 0] + cfable = ( + cr + * np.r_[ + np.cos(betar), + -cosalfar2 * np.sin(betar), + sinalfar2 * np.sin(betar), + ] + ) + + BR = np.c_[afable, bfable, cfable] + U0 = np.dot(B, np.linalg.inv(BR)) + if outputDegrees: + dparms = np.r_[ad, bd, cd, r2d * np.r_[alpha, beta, gamma]] + rparms = np.r_[ar, br, cr, r2d * np.r_[alfar, betar, gamar]] + else: + dparms = np.r_[ad, bd, cd, np.r_[alpha, beta, gamma]] + rparms = np.r_[ar, br, cr, np.r_[alfar, betar, gamar]] + + return { + 'F': F, + 'B': B, + 'BR': BR, + 'U0': U0, + 'vol': V, + 'dparms': dparms, + 'rparms': rparms, + } + +def hexagonalIndicesFromRhombohedral(hkl): + """ + converts rhombohedral hkl to hexagonal indices + """ + HKL = np.zeros((3, hkl.shape[1]), dtype='int') + + HKL[0, :] = hkl[0, :] - hkl[1, :] + HKL[1, :] = hkl[1, :] - hkl[2, :] + HKL[2, :] = hkl[0, :] + hkl[1, :] + hkl[2, :] + + return HKL + + +def rhombohedralIndicesFromHexagonal(HKL): + """ + converts hexagonal hkl to rhombohedral indices + """ + hkl = np.zeros((3, HKL.shape[1]), dtype='int') + + hkl[0, :] = 2 * HKL[0, :] + HKL[1, :] + HKL[2, :] + hkl[1, :] = -HKL[0, :] + HKL[1, :] + HKL[2, :] + hkl[2, :] = -HKL[0, :] - 2 * HKL[1, :] + HKL[2, :] + + hkl = hkl / 3.0 + return hkl + + +def rhombohedralParametersFromHexagonal(a_h, c_h): + """ + converts hexagonal lattice parameters (a, c) to rhombohedral + lattice parameters (a, alpha) + """ + a_r = np.sqrt(3 * a_h**2 + c_h**2) / 3.0 + alfa_r = 2 * np.arcsin(3.0 / (2 * np.sqrt(3 + (c_h / a_h) ** 2))) + if outputDegrees: + alfa_r = r2d * alfa_r + return a_r, alfa_r + + +def convert_Miller_direction_to_cartesian(uvw, a=1.0, c=1.0, normalize=False): + """ + Converts 3-index hexagonal Miller direction indices to components in the + crystal reference frame. + Parameters + ---------- + uvw : array_like + The (n, 3) array of 3-index hexagonal indices to convert. + a : scalar, optional + The `a` lattice parameter. The default value is 1. + c : scalar, optional + The `c` lattice parameter. The default value is 1. + normalize : bool, optional + Flag for whether or not to normalize output vectors + Returns + ------- + numpy.ndarray + The (n, 3) array of cartesian components associated with the input + direction indices. + Notes + ----- + 1) The [uv.w] the Miller-Bravais convention is in the hexagonal basis + {a1, a2, a3, c}. The basis for the output, {o1, o2, o3}, is + chosen such that + o1 || a1 + o3 || c + o2 = o3 ^ o1 + """ + u, v, w = np.atleast_2d(uvw).T + retval = np.vstack([1.5 * u * a, sqrt3by2 * a * (2 * v + u), w * c]) + if normalize: + return unitVector(retval).T + else: + return retval.T + + +def convert_Miller_direction_to_MillerBravias(uvw, suppress_redundant=True): + """ + Converts 3-index hexagonal Miller direction indices to 4-index + Miller-Bravais direction indices. + Parameters + ---------- + uvw : array_like + The (n, 3) array of 3-index hexagonal Miller indices to convert. + suppress_redundant : bool, optional + Flag to suppress the redundant 3rd index. The default is True. + Returns + ------- + numpy.ndarray + The (n, 3) or (n, 4) array -- depending on kwarg -- of Miller-Bravis + components associated with the input Miller direction indices. + Notes + ----- + * NOT for plane normals!!! + """ + u, v, w = np.atleast_2d(uvw).T + retval = np.vstack([(2 * u - v) / 3, (2 * v - u) / 3, w]).T + rem = np.vstack([np.mod(np.tile(i[0], 2), i[1:]) for i in retval]) + rem[abs(rem) < epsf] = np.nan + lcm = np.nanmin(rem, axis=1) + lcm[np.isnan(lcm)] = 1 + retval = retval / np.tile(lcm, (3, 1)).T + if suppress_redundant: + return retval + else: + t = np.atleast_2d(1 - np.sum(retval[:2], axis=1)).T + return np.hstack([retval[:, :2], t, np.atleast_2d(retval[:, 2]).T]) + + +def convert_MillerBravias_direction_to_Miller(UVW): + """ + Converts 4-index hexagonal Miller-Bravais direction indices to + 3-index Miller direction indices. + Parameters + ---------- + UVW : array_like + The (n, 3) array of **non-redundant** Miller-Bravais direction indices + to convert. + Returns + ------- + numpy.ndarray + The (n, 3) array of Miller direction indices associated with the + input Miller-Bravais indices. + Notes + ----- + * NOT for plane normals!!! + """ + U, V, W = np.atleast_2d(UVW).T + return np.vstack([2 * U + V, 2 * V + U, W]) + + +class PlaneData(object): + """ + Careful with ordering: Outputs are ordered by the 2-theta for the + hkl unless you get self._hkls directly, and this order can change + with changes in lattice parameters (lparms); setting and getting + exclusions works on the current hkl ordering, not the original + ordering (in self._hkls), but exclusions are stored in the + original ordering in case the hkl ordering does change with + lattice parameters + + if not None, tThWidth takes priority over strainMag in setting + two-theta ranges; changing strainMag automatically turns off + tThWidth + """ + + def __init__(self, hkls: Optional[np.ndarray], *args, **kwargs) -> None: + """ + Constructor for PlaneData + + Parameters + ---------- + hkls : np.ndarray + Miller indices to be used in the plane data. Can be None if + args is another PlaneData object + + *args + Unnamed arguments. Could be in the format of `lparms, laueGroup, + wavelength, strainMag`, or just a `PlaneData` object. + + **kwargs + Valid keyword arguments include: + - doTThSort + - exclusions + - tThMax + - tThWidth + """ + self._doTThSort = True + self._exclusions = None + self._tThMax = None + + if len(args) == 4: + lparms, laueGroup, wavelength, strainMag = args + tThWidth = None + self._wavelength = processWavelength(wavelength) + self._lparms = self._parseLParms(lparms) + elif len(args) == 1 and isinstance(args[0], PlaneData): + other = args[0] + lparms, laueGroup, wavelength, strainMag, tThWidth = ( + other.getParams() + ) + self._wavelength = wavelength + self._lparms = lparms + self._doTThSort = other._doTThSort + self._exclusions = other._exclusions + self._tThMax = other._tThMax + if hkls is None: + hkls = other._hkls + else: + raise NotImplementedError(f'args : {args}') + + self._laueGroup = laueGroup + self._hkls = copy.deepcopy(hkls) + self._strainMag = strainMag + self._structFact = np.ones(self._hkls.shape[1]) + self.tThWidth = tThWidth + + # ... need to implement tThMin too + if 'doTThSort' in kwargs: + self._doTThSort = kwargs.pop('doTThSort') + if 'exclusions' in kwargs: + self._exclusions = kwargs.pop('exclusions') + if 'tThMax' in kwargs: + self._tThMax = toFloat(kwargs.pop('tThMax'), 'radians') + if 'tThWidth' in kwargs: + self.tThWidth = kwargs.pop('tThWidth') + if len(kwargs) > 0: + raise RuntimeError( + f'have unparsed keyword arguments with keys: {kwargs.keys()}' + ) + + # This is only used to calculate the structure factor if invalidated + self._unitcell: unitcell = None + + self._calc() + + def _calc(self): + symmGroup = ltypeOfLaueGroup(self._laueGroup) + self._q_sym = quatOfLaueGroup(self._laueGroup) + _, latVecOps, hklDataList = PlaneData.makePlaneData( + self._hkls, + self._lparms, + self._q_sym, + symmGroup, + self._strainMag, + self.wavelength, + ) + 'sort by tTheta' + tThs = np.array( + [hklDataList[iHKL]['tTheta'] for iHKL in range(len(hklDataList))] + ) + if self._doTThSort: + # sorted hkl -> _hkl + # _hkl -> sorted hkl + self.tThSort = np.argsort(tThs) + self.tThSortInv = np.empty(len(hklDataList), dtype=int) + self.tThSortInv[self.tThSort] = np.arange(len(hklDataList)) + self.hklDataList = [hklDataList[iHKL] for iHKL in self.tThSort] + else: + self.tThSort = np.arange(len(hklDataList)) + self.tThSortInv = np.arange(len(hklDataList)) + self.hklDataList = hklDataList + self._latVecOps = latVecOps + self.nHKLs = len(self.getHKLs()) + + def __str__(self): + s = '========== plane data ==========\n' + s += 'lattice parameters:\n ' + str(self.lparms) + '\n' + s += f'two theta width: ({str(self.tThWidth)})\n' + s += f'strain magnitude: ({str(self.strainMag)})\n' + s += f'beam energy ({str(self.wavelength)})\n' + s += 'hkls: (%d)\n' % self.nHKLs + s += str(self.getHKLs()) + return s + + def getParams(self): + """ + Getter for the parameters of the plane data. + + Returns + ------- + tuple + The parameters of the plane data. In the order of + _lparams, _laueGroup, _wavelength, _strainMag, tThWidth + + """ + return ( + self._lparms, + self._laueGroup, + self._wavelength, + self._strainMag, + self.tThWidth, + ) + + def getNhklRef(self) -> int: + """ + Get the total number of hkl's in the plane data, not ignoring + ones that are excluded in exclusions. + + Returns + ------- + int + The total number of hkl's in the plane data. + """ + return len(self.hklDataList) + + @property + def hkls(self) -> np.ndarray: + """ + hStacked Hkls of the plane data (Miller indices). + """ + return self.getHKLs().T + + @hkls.setter + def hkls(self, hkls): + raise NotImplementedError('for now, not allowing hkls to be reset') + + @property + def tThMax(self) -> Optional[float]: + """ + Maximum 2-theta value of the plane data. + + float or None + """ + return self._tThMax + + @tThMax.setter + def tThMax(self, t_th_max: Union[float, valunits.valWUnit]) -> None: + self._tThMax = toFloat(t_th_max, 'radians') + + @property + def exclusions(self) -> np.ndarray: + """ + Excluded HKL's the plane data. + + Set as type np.ndarray, as a mask of length getNhklRef(), a list of + indices to be excluded, or a list of ranges of indices. + + Read as a mask of length getNhklRef(). + """ + retval = np.zeros(self.getNhklRef(), dtype=bool) + if self._exclusions is not None: + # report in current hkl ordering + retval[:] = self._exclusions[self.tThSortInv] + if self._tThMax is not None: + for iHKLr, hklData in enumerate(self.hklDataList): + if hklData['tTheta'] > self._tThMax: + retval[iHKLr] = True + return retval + + @exclusions.setter + def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: + excl = np.zeros(len(self.hklDataList), dtype=bool) + if new_exclusions is not None: + exclusions = np.atleast_1d(new_exclusions) + if len(exclusions) == len(self.hklDataList): + assert ( + exclusions.dtype == 'bool' + ), 'Exclusions should be bool if full length' + # convert from current hkl ordering to _hkl ordering + excl[:] = exclusions[self.tThSort] + else: + if len(exclusions.shape) == 1: + # treat exclusions as indices + excl[self.tThSort[exclusions]] = True + elif len(exclusions.shape) == 2: + # treat exclusions as ranges of indices + for r in exclusions: + excl[self.tThSort[r[0]:r[1]]] = True + else: + raise RuntimeError( + f'Unclear behavior for shape {exclusions.shape}' + ) + self._exclusions = excl + self.nHKLs = np.sum(np.logical_not(self._exclusions)) + + def exclude( + self, + dmin: Optional[float] = None, + dmax: Optional[float] = None, + tthmin: Optional[float] = None, + tthmax: Optional[float] = None, + sfacmin: Optional[float] = None, + sfacmax: Optional[float] = None, + pintmin: Optional[float] = None, + pintmax: Optional[float] = None, + ) -> None: + """ + Set exclusions according to various parameters + + Any hkl with a value below any min or above any max will be excluded. So + to be included, an hkl needs to have values between the min and max + for all of the conditions given. + + Note that method resets the tThMax attribute to None. + + PARAMETERS + ---------- + dmin: float > 0 + minimum lattice spacing (angstroms) + dmax: float > 0 + maximum lattice spacing (angstroms) + tthmin: float > 0 + minimum two theta (radians) + tthmax: float > 0 + maximum two theta (radians) + sfacmin: float > 0 + minimum structure factor as a proportion of maximum + sfacmax: float > 0 + maximum structure factor as a proportion of maximum + pintmin: float > 0 + minimum powder intensity as a proportion of maximum + pintmax: float > 0 + maximum powder intensity as a proportion of maximum + """ + excl = np.zeros(self.getNhklRef(), dtype=bool) + self.exclusions = None + self.tThMax = None + + if (dmin is not None) or (dmax is not None): + d = np.array(self.getPlaneSpacings()) + if dmin is not None: + excl[d < dmin] = True + if dmax is not None: + excl[d > dmax] = True + + if (tthmin is not None) or (tthmax is not None): + tth = self.getTTh() + if tthmin is not None: + excl[tth < tthmin] = True + if tthmax is not None: + excl[tth > tthmax] = True + + if (sfacmin is not None) or (sfacmax is not None): + sfac = self.structFact + sfac = sfac / sfac.max() + if sfacmin is not None: + excl[sfac < sfacmin] = True + if sfacmax is not None: + excl[sfac > sfacmax] = True + + if (pintmin is not None) or (pintmax is not None): + pint = self.powder_intensity + pint = pint / pint.max() + if pintmin is not None: + excl[pint < pintmin] = True + if pintmax is not None: + excl[pint > pintmax] = True + + self.exclusions = excl + + def _parseLParms( + self, lparms: List[Union[valunits.valWUnit, float]] + ) -> List[float]: + lparmsDUnit = [] + for lparmThis in lparms: + if isinstance(lparmThis, valunits.valWUnit): + if lparmThis.isLength(): + lparmsDUnit.append(lparmThis.getVal(dUnit)) + elif lparmThis.isAngle(): + # plumbing set up to default to degrees + # for lattice parameters + lparmsDUnit.append(lparmThis.getVal('degrees')) + else: + raise RuntimeError( + f'Do not know what to do with {lparmThis}' + ) + else: + lparmsDUnit.append(lparmThis) + return lparmsDUnit + + @property + def lparms(self) -> List[float]: + """ + Lattice parameters of the plane data. + + Can be set as a List[float | valWUnit], but will be converted to + List[float]. + """ + return self._lparms + + @lparms.setter + def lparms(self, lparms: List[Union[valunits.valWUnit, float]]) -> None: + self._lparms = self._parseLParms(lparms) + self._calc() + + @property + def strainMag(self) -> Optional[float]: + """ + Strain magnitude of the plane data. + + float or None + """ + return self._strainMag + + @strainMag.setter + def strainMag(self, strain_mag: float) -> None: + self._strainMag = strain_mag + self.tThWidth = None + self._calc() + + @property + def wavelength(self) -> float: + """ + Wavelength of the plane data. + + Set as float or valWUnit. + + Read as float + """ + return self._wavelength + + @wavelength.setter + def wavelength(self, wavelength: Union[float, valunits.valWUnit]) -> None: + wavelength = processWavelength(wavelength) + # Do not re-compute if it is almost the same + if np.isclose(self._wavelength, wavelength): + return + + self._wavelength = wavelength + self._calc() + + def invalidate_structure_factor(self, ucell: unitcell) -> None: + """ + It can be expensive to compute the structure factor + This method just invalidates it, providing a unit cell, + so that it can be lazily computed from the unit cell. + + Parameters: + ----------- + unitcell : unitcell + The unit cell to be used to compute the structure factor + """ + self._structFact = None + self._hedm_intensity = None + self._powder_intensity = None + self._unitcell = ucell + + def _compute_sf_if_needed(self): + any_invalid = ( + self._structFact is None + or self._hedm_intensity is None + or self._powder_intensity is None + ) + if any_invalid and self._unitcell is not None: + # Compute the structure factor first. + # This can be expensive to do, so we lazily compute it when needed. + hkls = self.getHKLs(allHKLs=True) + self.structFact = self._unitcell.CalcXRSF(hkls) + + @property + def structFact(self) -> np.ndarray: + """ + Structure factors for each hkl. + + np.ndarray + """ + self._compute_sf_if_needed() + return self._structFact[~self.exclusions] + + @structFact.setter + def structFact(self, structFact: np.ndarray) -> None: + self._structFact = structFact + multiplicity = self.getMultiplicity(allHKLs=True) + tth = self.getTTh(allHKLs=True) + + hedm_intensity = ( + structFact * lorentz_factor(tth) * polarization_factor(tth) + ) + + powderI = hedm_intensity * multiplicity + + # Now scale them + hedm_intensity = 100.0 * hedm_intensity / np.nanmax(hedm_intensity) + powderI = 100.0 * powderI / np.nanmax(powderI) + + self._hedm_intensity = hedm_intensity + self._powder_intensity = powderI + + @property + def powder_intensity(self) -> np.ndarray: + """ + Powder intensity for each hkl. + """ + self._compute_sf_if_needed() + return self._powder_intensity[~self.exclusions] + + @property + def hedm_intensity(self) -> np.ndarray: + """ + HEDM (high energy x-ray diffraction microscopy) intensity for each hkl. + """ + self._compute_sf_if_needed() + return self._hedm_intensity[~self.exclusions] + + @staticmethod + def makePlaneData( + hkls: np.ndarray, + lparms: np.ndarray, + qsym: np.ndarray, + symmGroup, + strainMag, + wavelength, + ) -> Tuple[ + Dict[str, np.ndarray], Dict[str, Union[np.ndarray, float]], List[Dict] + ]: + """ + Generate lattice plane data from inputs. + + Parameters: + ----------- + hkls: np.ndarray + Miller indices, as in crystallography.latticePlanes + lparms: np.ndarray + Lattice parameters, as in crystallography.latticePlanes + qsym: np.ndarray + (4, n) containing quaternions of symmetry + symmGroup: str + Tag for the symmetry (Laue) group of the lattice. Can generate from + ltypeOfLaueGroup + strainMag: float + Swag of strain magnitudes + wavelength: float + Wavelength + + Returns: + ------- + dict: + Dictionary containing lattice plane data + dict: + Dictionary containing lattice vector operators + list: + List of dictionaries, each containing the data for one hkl + """ + + tempSetOutputDegrees(False) + latPlaneData = latticePlanes( + hkls, + lparms, + ltype=symmGroup, + strainMag=strainMag, + wavelength=wavelength, + ) + + latVecOps = latticeVectors(lparms, symmGroup) + + hklDataList = [] + for iHKL in range(len(hkls.T)): + # need transpose because of convention for hkls ordering + + """ + latVec = latPlaneData['normals'][:,iHKL] + # ... if not spots, may be able to work with a subset of these + latPlnNrmlList = applySym( + np.c_[latVec], qsym, csFlag=True, cullPM=False + ) + """ + # returns UN-NORMALIZED lattice plane normals + latPlnNrmls = applySym( + np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), + qsym, + csFlag=True, + cullPM=False, + ) + + # check for +/- in symmetry group + latPlnNrmlsM = applySym( + np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), + qsym, + csFlag=False, + cullPM=False, + ) + + csRefl = latPlnNrmls.shape[1] == latPlnNrmlsM.shape[1] + + # added this so that I retain the actual symmetric + # integer hkls as well + symHKLs = np.array( + np.round(np.dot(latVecOps['F'].T, latPlnNrmls)), dtype='int' + ) + + hklDataList.append( + dict( + hklID=iHKL, + hkl=hkls[:, iHKL], + tTheta=latPlaneData['tThetas'][iHKL], + dSpacings=latPlaneData['dspacings'][iHKL], + tThetaLo=latPlaneData['tThetasLo'][iHKL], + tThetaHi=latPlaneData['tThetasHi'][iHKL], + latPlnNrmls=unitVector(latPlnNrmls), + symHKLs=symHKLs, + centrosym=csRefl, + ) + ) + + revertOutputDegrees() + return latPlaneData, latVecOps, hklDataList + + @property + def laueGroup(self) -> str: + """ + This is the Schoenflies tag, describing symmetry group of the lattice. + Note that setting this with incompatible lattice parameters will + cause an error. If changing both, use set_laue_and_lparms. + + str + """ + return self._laueGroup + + @laueGroup.setter + def laueGroup(self, laueGroup: str) -> None: + self._laueGroup = laueGroup + self._calc() + + def set_laue_and_lparms( + self, laueGroup: str, lparms: List[Union[valunits.valWUnit, float]] + ) -> None: + """ + Set the Laue group and lattice parameters simultaneously + + When the Laue group changes, the lattice parameters may be + incompatible, and cause an error in self._calc(). This function + allows us to update both the Laue group and lattice parameters + simultaneously to avoid this issue. + + Parameters: + ----------- + laueGroup : str + The symmetry (Laue) group to be set + lparms : List[valunits.valWUnit | float] + Lattice parameters to be set + """ + self._laueGroup = laueGroup + self._lparms = self._parseLParms(lparms) + self._calc() + + @property + def q_sym(self) -> np.ndarray: + """ + Quaternions of symmetry for each hkl, generated from the Laue group + + np.ndarray((4, n)) + """ + return self._q_sym # rotations.quatOfLaueGroup(self._laueGroup) + + def getPlaneSpacings(self) -> List[float]: + """ + Plane spacings for each hkl. + + Returns: + ------- + List[float] + List of plane spacings for each hkl + """ + dspacings = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + dspacings.append(hklData['dSpacings']) + return dspacings + + @property + def latVecOps(self) -> Dict[str, Union[np.ndarray, float]]: + """ + gets lattice vector operators as a new (deepcopy) + + Returns: + ------- + Dict[str, np.ndarray | float] + Dictionary containing lattice vector operators + """ + return copy.deepcopy(self._latVecOps) + + def _thisHKL(self, iHKLr: int) -> bool: + hklData = self.hklDataList[iHKLr] + if self._exclusions is not None: + if self._exclusions[self.tThSortInv[iHKLr]]: + return False + if self._tThMax is not None: + if hklData['tTheta'] > self._tThMax or np.isnan(hklData['tTheta']): + return False + return True + + def _getTThRange(self, iHKLr: int) -> Tuple[float, float]: + hklData = self.hklDataList[iHKLr] + if self.tThWidth is not None: # tThHi-tThLo < self.tThWidth + tTh = hklData['tTheta'] + tThHi = tTh + self.tThWidth * 0.5 + tThLo = tTh - self.tThWidth * 0.5 + else: + tThHi = hklData['tThetaHi'] + tThLo = hklData['tThetaLo'] + return (tThLo, tThHi) + + def getTThRanges(self, strainMag: Optional[float] = None) -> np.ndarray: + """ + Get the 2-theta ranges for included hkls + + Parameters: + ----------- + strainMag : Optional[float] + Optional swag of strain magnitude + + Returns: + ------- + np.ndarray: + hstacked array of hstacked tThLo and tThHi for each hkl (n x 2) + """ + tThRanges = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + if strainMag is None: + tThRanges.append(self._getTThRange(iHKLr)) + else: + hklData = self.hklDataList[iHKLr] + d = hklData['dSpacings'] + tThLo = 2.0 * np.arcsin( + self._wavelength / 2.0 / (d * (1.0 + strainMag)) + ) + tThHi = 2.0 * np.arcsin( + self._wavelength / 2.0 / (d * (1.0 - strainMag)) + ) + tThRanges.append((tThLo, tThHi)) + return np.array(tThRanges) + + def getMergedRanges( + self, cullDupl: Optional[bool] = False + ) -> Tuple[List[List[int]], List[List[float]]]: + """ + Return indices and ranges for specified planeData, merging where + there is overlap based on the tThWidth and line positions + + Parameters: + ----------- + cullDupl : (optional) bool + If True, cull duplicate 2-theta values (within sqrt_epsf). Defaults + to False. + + Returns: + -------- + List[List[int]] + List of indices for each merged range + + List[List[float]] + List of merged ranges, (n x 2) + """ + tThs = self.getTTh() + tThRanges = self.getTThRanges() + + # if you end exlcusions in a doublet (or multiple close rings) + # then this will 'fail'. May need to revisit... + nonoverlapNexts = np.hstack( + (tThRanges[:-1, 1] < tThRanges[1:, 0], True) + ) + iHKLLists = [] + mergedRanges = [] + hklsCur = [] + tThLoIdx = 0 + tThHiCur = 0.0 + for iHKL, nonoverlapNext in enumerate(nonoverlapNexts): + tThHi = tThRanges[iHKL, -1] + if not nonoverlapNext: + if cullDupl and abs(tThs[iHKL] - tThs[iHKL + 1]) < sqrt_epsf: + continue + else: + hklsCur.append(iHKL) + tThHiCur = tThHi + else: + hklsCur.append(iHKL) + tThHiCur = tThHi + iHKLLists.append(hklsCur) + mergedRanges.append([tThRanges[tThLoIdx, 0], tThHiCur]) + tThLoIdx = iHKL + 1 + hklsCur = [] + return iHKLLists, mergedRanges + + def getTTh(self, allHKLs: Optional[bool] = False) -> np.ndarray: + """ + Get the 2-theta values for each hkl. + + Parameters: + ----------- + allHKLs : (optional) bool + If True, return all 2-theta values, even if they are excluded in + the current planeData. Default is False. + + Returns: + ------- + np.ndarray + Array of 2-theta values for each hkl + """ + tTh = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not allHKLs and not self._thisHKL(iHKLr): + continue + tTh.append(hklData['tTheta']) + return np.array(tTh) + + def getMultiplicity(self, allHKLs: Optional[bool] = False) -> np.ndarray: + """ + Get the multiplicity for each hkl (number of symHKLs). + + Paramters: + ---------- + allHKLs : (optional) bool + If True, return all multiplicities, even if they are excluded in + the current planeData. Defaults to false. + + Returns + ------- + np.ndarray + Array of multiplicities for each hkl + """ + # ... JVB: is this incorrect? + multip = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if allHKLs or self._thisHKL(iHKLr): + multip.append(hklData['symHKLs'].shape[1]) + return np.array(multip) + + def getHKLID( + self, + hkl: Union[int, Tuple[int, int, int], np.ndarray], + master: Optional[bool] = False, + ) -> Union[List[int], int]: + """ + Return the unique ID of a list of hkls. + + Parameters + ---------- + hkl : int | tuple | list | numpy.ndarray + The input hkl. If an int, or a list of ints, it just passes + through (FIXME). + If a tuple, treated as a single (h, k, l). + If a list of lists/tuples, each is treated as an (h, k, l). + If an numpy.ndarray, it is assumed to have shape (3, N) with the + N (h, k, l) vectors stacked column-wise + + master : bool, optional + If True, return the master hklID, else return the index from the + external (sorted and reduced) list. + + Returns + ------- + hkl_ids : list + The list of requested hklID values associate with the input. + + Notes + ----- + TODO: revisit this weird API??? + + Changes: + ------- + 2020-05-21 (JVB) -- modified to handle all symmetric equavlent reprs. + """ + if hasattr(hkl, '__setitem__'): # tuple does not have __setitem__ + if isinstance(hkl, np.ndarray): + # if is ndarray, assume is 3xN + return [self._getHKLID(x, master=master) for x in hkl.T] + else: + return [self._getHKLID(x, master=master) for x in hkl] + else: + return self._getHKLID(hkl, master=master) + + def _getHKLID( + self, + hkl: Union[int, Tuple[int, int, int], np.ndarray], + master: Optional[bool] = False, + ) -> int: + """ + for hkl that is a tuple, return externally visible hkl index + """ + if isinstance(hkl, int): + return hkl + else: + hklList = self.getSymHKLs() # !!! list, reduced by exclusions + intl_hklIDs = np.asarray([i['hklID'] for i in self.hklDataList]) + intl_hklIDs_sorted = intl_hklIDs[~self.exclusions[self.tThSortInv]] + dHKLInv = {} + for iHKL, symHKLs in enumerate(hklList): + idx = intl_hklIDs_sorted[iHKL] if master else iHKL + for thisHKL in symHKLs.T: + dHKLInv[tuple(thisHKL)] = idx + try: + return dHKLInv[tuple(hkl)] + except KeyError: + raise RuntimeError( + f"hkl '{tuple(hkl)}' is not present in this material!" + ) + + def getHKLs(self, *hkl_ids: int, **kwargs) -> Union[List[str], np.ndarray]: + """ + Returns the powder HKLs subject to specified options. + + Parameters + ---------- + *hkl_ids : int + Optional list of specific master hklIDs. + **kwargs : dict + One or more of the following keyword arguments: + asStr : bool + If True, return a list of strings. The default is False. + thisTTh : scalar | None + If not None, only return hkls overlapping the specified + 2-theta (in radians). The default is None. + allHKLs : bool + If True, then ignore exlcusions. The default is False. + + Raises + ------ + TypeError + If an unknown kwarg is passed. + RuntimeError + If an invalid hklID is passed. + + Returns + ------- + hkls : list | numpy.ndarray + Either a list of hkls as strings (if asStr=True) or a vstacked + array of hkls. + + Notes + ----- + !!! the shape of the return value when asStr=False is the _transpose_ + of the typical return value for self.get_hkls() and self.hkls! + This _may_ change to avoid confusion, but going to leave it for + now so as not to break anything. + + 2022/08/05 JVB: + - Added functionality to handle optional hklID args + - Updated docstring + """ + # kwarg parsing + opts = dict(asStr=False, thisTTh=None, allHKLs=False) + if len(kwargs) > 0: + # check keys + for k, v in kwargs.items(): + if k not in opts: + raise TypeError( + f"getHKLs() got an unexpected keyword argument '{k}'" + ) + opts.update(kwargs) + + hkls = [] + if len(hkl_ids) == 0: + for iHKLr, hklData in enumerate(self.hklDataList): + if not opts['allHKLs']: + if not self._thisHKL(iHKLr): + continue + if opts['thisTTh'] is not None: + tThLo, tThHi = self._getTThRange(iHKLr) + if opts['thisTTh'] < tThHi and opts['thisTTh'] > tThLo: + hkls.append(hklData['hkl']) + else: + hkls.append(hklData['hkl']) + else: + # !!! changing behavior here; if the hkl_id is invalid, raises + # RuntimeError, and if allHKLs=True and the hkl_id is + # excluded, it also raises a RuntimeError + all_hkl_ids = np.asarray([i['hklID'] for i in self.hklDataList]) + sorted_excl = self.exclusions[self.tThSortInv] + idx = np.zeros(len(self.hklDataList), dtype=int) + for i, hkl_id in enumerate(hkl_ids): + # find ordinal index of current hklID + try: + idx[i] = int(np.where(all_hkl_ids == hkl_id)[0]) + except TypeError: + raise RuntimeError( + f"Requested hklID '{hkl_id}'is invalid!" + ) + if sorted_excl[idx[i]] and not opts['allHKLs']: + raise RuntimeError( + f"Requested hklID '{hkl_id}' is excluded!" + ) + hkls.append(self.hklDataList[idx[i]]['hkl']) + + # handle output kwarg + if opts['asStr']: + return list(map(hklToStr, np.array(hkls))) + else: + return np.array(hkls) + + def getSymHKLs( + self, + asStr: Optional[bool] = False, + withID: Optional[bool] = False, + indices: Optional[List[int]] = None, + ) -> Union[List[List[str]], List[np.ndarray]]: + """ + Return all symmetry HKLs. + + Parameters + ---------- + asStr : bool, optional + If True, return the symmetry HKLs as strings. The default is False. + withID : bool, optional + If True, return the symmetry HKLs with the hklID. The default is + False. Does nothing if asStr is True. + indices : list[inr], optional + Optional list of indices of hkls to include. + + Returns + ------- + sym_hkls : list list of strings, or list of numpy.ndarray + List of symmetry HKLs for each HKL, either as strings or as a + vstacked array. + """ + sym_hkls = [] + hkl_index = 0 + if indices is not None: + indB = np.zeros(self.nHKLs, dtype=bool) + indB[np.array(indices)] = True + else: + indB = np.ones(self.nHKLs, dtype=bool) + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + if indB[hkl_index]: + hkls = hklData['symHKLs'] + if asStr: + sym_hkls.append(list(map(hklToStr, np.array(hkls).T))) + elif withID: + sym_hkls.append( + np.vstack( + [ + np.tile(hklData['hklID'], (1, hkls.shape[1])), + hkls, + ] + ) + ) + else: + sym_hkls.append(np.array(hkls)) + hkl_index += 1 + return sym_hkls + + @staticmethod + def makeScatteringVectors( + hkls: np.ndarray, + rMat_c: np.ndarray, + bMat: np.ndarray, + wavelength: float, + chiTilt: Optional[float] = None, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Static method for calculating g-vectors and scattering vector angles + for specified hkls, subject to the bragg conditions specified by + lattice vectors, orientation matrix, and wavelength + + Parameters + ---------- + hkls : np.ndarray + (3, n) array of hkls. + rMat_c : np.ndarray + (3, 3) rotation matrix from the crystal to the sample frame. + bMat : np.ndarray, optional + (3, 3) COB from reciprocal lattice frame to the crystal frame. + wavelength : float + xray wavelength in Angstroms. + chiTilt : float, optional + 0 <= chiTilt <= 90 degrees, defaults to 0 + + Returns + ------- + gVec_s : np.ndarray + (3, n) array of g-vectors (reciprocal lattice) in the sample frame. + oangs0 : np.ndarray + (3, n) array containing the feasible (2-theta, eta, ome) triplets + for each input hkl (first solution) + oangs1 : np.ndarray + (3, n) array containing the feasible (2-theta, eta, ome) triplets + for each input hkl (second solution) + + FIXME: must do testing on strained bMat + """ + # arg munging + chi = float(chiTilt) if chiTilt is not None else 0.0 + rMat_c = rMat_c.squeeze() + + # these are the reciprocal lattice vectors in the SAMPLE FRAME + # ** NOTE ** + # if strained, assumes that you handed it a bMat calculated from + # strained [a, b, c] in the CRYSTAL FRAME + gVec_s = np.dot(rMat_c, np.dot(bMat, hkls)) + + dim0 = gVec_s.shape[0] + if dim0 != 3: + raise ValueError(f'Number of lattice plane normal dims is {dim0}') + + # call model from transforms now + oangs0, oangs1 = xfcapi.oscill_angles_of_hkls( + hkls.T, chi, rMat_c, bMat, wavelength + ) + + return gVec_s, oangs0.T, oangs1.T + + def _makeScatteringVectors( + self, + rMat: np.ndarray, + bMat: Optional[np.ndarray] = None, + chiTilt: Optional[float] = None, + ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: + """ + modeled after QFromU.m + """ + + if bMat is None: + bMat = self._latVecOps['B'] + + Qs_vec = [] + Qs_ang0 = [] + Qs_ang1 = [] + for iHKLr, hklData in enumerate(self.hklDataList): + if not self._thisHKL(iHKLr): + continue + thisQs, thisAng0, thisAng1 = PlaneData.makeScatteringVectors( + hklData['symHKLs'], + rMat, + bMat, + self._wavelength, + chiTilt=chiTilt, + ) + Qs_vec.append(thisQs) + Qs_ang0.append(thisAng0) + Qs_ang1.append(thisAng1) + + return Qs_vec, Qs_ang0, Qs_ang1 + + def calcStructFactor(self, atominfo): + """ + Calculates unit cell structure factors as a function of hkl + USAGE: + FSquared = calcStructFactor(atominfo,hkls,B) + INPUTS: + 1) atominfo (m x 1 float ndarray) the first threee columns of the + matrix contain fractional atom positions [uvw] of atoms in the unit + cell. The last column contains the number of electrons for a given atom + 2) hkls (3 x n float ndarray) is the array of Miller indices for + the planes of interest. The vectors are assumed to be + concatenated along the 1-axis (horizontal) + 3) B (3 x 3 float ndarray) is a matrix of reciprocal lattice basis + vectors,where each column contains a reciprocal lattice basis vector + ({g}=[B]*{hkl}) + OUTPUTS: + 1) FSquared (n x 1 float ndarray) array of structure factors, + one for each hkl passed into the function + """ + r = atominfo[:, 0:3] + elecNum = atominfo[:, 3] + hkls = self.hkls + B = self.latVecOps['B'] + sinThOverLamdaList, ffDataList = LoadFormFactorData() + FSquared = np.zeros(hkls.shape[1]) + + for jj in np.arange(0, hkls.shape[1]): + # ???: probably have other functions for this + # Calculate G for each hkl + # Calculate magnitude of G for each hkl + G = ( + hkls[0, jj] * B[:, 0] + + hkls[1, jj] * B[:, 1] + + hkls[2, jj] * B[:, 2] + ) + magG = np.sqrt(G[0] ** 2 + G[1] ** 2 + G[2] ** 2) + + # Begin calculating form factor + F = 0 + for ii in np.arange(0, r.shape[0]): + ff = RetrieveAtomicFormFactor( + elecNum[ii], magG, sinThOverLamdaList, ffDataList + ) + exparg = complex( + 0.0, + 2.0 + * np.pi + * ( + hkls[0, jj] * r[ii, 0] + + hkls[1, jj] * r[ii, 1] + + hkls[2, jj] * r[ii, 2] + ), + ) + F += ff * np.exp(exparg) + + """ + F = sum_atoms(ff(Q)*e^(2*pi*i(hu+kv+lw))) + """ + FSquared[jj] = np.real(F * np.conj(F)) + + return FSquared + + # OLD DEPRECATED PLANE_DATA STUFF ==================================== + @deprecated(new_func="len(self.hkls.T)", removal_date="2025-08-01") + def getNHKLs(self): + return len(self.getHKLs()) + + @deprecated(new_func="self.exclusions", removal_date="2025-08-01") + def get_exclusions(self): + return self.exclusions + + @deprecated(new_func="self.exclusions=...", removal_date="2025-08-01") + def set_exclusions(self, exclusions): + self.exclusions = exclusions + + @deprecated(new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", + removal_date="2025-08-01") + def getLatticeType(self): + return ltypeOfLaueGroup(self.laueGroup) + + @deprecated(new_func="self.q_sym", removal_date="2025-08-01") + def getQSym(self): + return self.q_sym + + +@deprecated(removal_date='2025-01-01') +def getFriedelPair(tth0, eta0, *ome0, **kwargs): + """ + Get the diffractometer angular coordinates in degrees for + the Friedel pair of a given reflection (min angular distance). + + AUTHORS: + + J. V. Bernier -- 10 Nov 2009 + + USAGE: + + ome1, eta1 = getFriedelPair(tth0, eta0, *ome0, + display=False, + units='degrees', + convention='hexrd') + + INPUTS: + + 1) tth0 is a list (or ndarray) of 1 or n the bragg angles (2theta) for + the n reflections (tiled to match eta0 if only 1 is given). + + 2) eta0 is a list (or ndarray) of 1 or n azimuthal coordinates for the n + reflections (tiled to match tth0 if only 1 is given). + + 3) ome0 is a list (or ndarray) of 1 or n reference oscillation + angles for the n reflections (denoted omega in [1]). This argument + is optional. + + 4) Keyword arguments may be one of the following: + + Keyword Values|{default} Action + -------------- -------------- -------------- + 'display' True|{False} toggles display to cmd line + 'units' 'radians'|{'degrees'} sets units for input angles + 'convention' 'fable'|{'hexrd'} sets conventions defining + the angles (see below) + 'chiTilt' None the inclination (about Xlab) of + the oscillation axis + + OUTPUTS: + + 1) ome1 contains the oscialltion angle coordinates of the + Friedel pairs associated with the n input reflections, relative to ome0 + (i.e. ome1 = + ome0). Output is in DEGREES! + + 2) eta1 contains the azimuthal coordinates of the Friedel + pairs associated with the n input reflections. Output units are + controlled via the module variable 'outputDegrees' + + NOTES: + + !!!: The ouputs ome1, eta1 are written using the selected convention, but + the units are alway degrees. May change this to work with Nathan's + global... + + !!!: In the 'fable' convention [1], {XYZ} form a RHON basis where X is + downstream, Z is vertical, and eta is CCW with +Z defining eta = 0. + + !!!: In the 'hexrd' convention [2], {XYZ} form a RHON basis where Z is + upstream, Y is vertical, and eta is CCW with +X defining eta = 0. + + REFERENCES: + + [1] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, + ``Tracking: a method for structural characterization of grains in + powders or polycrystals''. J. Appl. Cryst. (2001). 34, 744--750 + + [2] J. V. Bernier, M. P. Miller, J. -S. Park, and U. Lienert, + ``Quantitative Stress Analysis of Recrystallized OFHC Cu Subject + to Deformed In Situ'', J. Eng. Mater. Technol. (2008). 130. + DOI:10.1115/1.2870234 + """ + + dispFlag = False + fableFlag = False + chi = None + c1 = 1.0 + c2 = pi / 180.0 + + eta0 = np.atleast_1d(eta0) + tth0 = np.atleast_1d(tth0) + ome0 = np.atleast_1d(ome0) + + if eta0.ndim != 1: + raise RuntimeError('azimuthal input must be 1-D') + + npts = len(eta0) + + if tth0.ndim != 1: + raise RuntimeError('Bragg angle input must be not 1-D') + else: + if len(tth0) != npts: + if len(tth0) == 1: + tth0 *= np.ones(npts) + elif npts == 1: + npts = len(tth0) + eta0 *= np.ones(npts) + else: + raise RuntimeError( + 'the azimuthal and Bragg angle inputs are inconsistent' + ) + + if len(ome0) == 0: + ome0 = np.zeros(npts) # dummy ome0 + elif len(ome0) == 1 and npts > 1: + ome0 *= np.ones(npts) + else: + if len(ome0) != npts: + raise RuntimeError( + 'your oscialltion angle input is inconsistent; ' + + f'it has length {len(ome0)} while it should be {npts}' + ) + + # keyword args processing + kwarglen = len(kwargs) + if kwarglen > 0: + argkeys = list(kwargs.keys()) + for i in range(kwarglen): + if argkeys[i] == 'display': + dispFlag = kwargs[argkeys[i]] + elif argkeys[i] == 'convention': + if kwargs[argkeys[i]].lower() == 'fable': + fableFlag = True + elif argkeys[i] == 'units': + if kwargs[argkeys[i]] == 'radians': + c1 = 180.0 / pi + c2 = 1.0 + elif argkeys[i] == 'chiTilt': + if kwargs[argkeys[i]] is not None: + chi = kwargs[argkeys[i]] + + # a little talkback... + if dispFlag: + if fableFlag: + print('\nUsing Fable angle convention\n') + else: + print('\nUsing image-based angle convention\n') + + # mapped eta input + # - in DEGREES, thanks to c1 + eta0 = mapAngle(c1 * eta0, [-180, 180], units='degrees') + if fableFlag: + eta0 = 90 - eta0 + + # must put args into RADIANS + # - eta0 is in DEGREES, + # - the others are in whatever was entered, hence c2 + eta0 = d2r * eta0 + tht0 = c2 * tth0 / 2 + if chi is not None: + chi = c2 * chi + else: + chi = 0 + + """ + SYSTEM SOLVE + + + cos(chi)cos(eta)cos(theta)sin(x) - cos(chi)sin(theta)cos(x) \ + = sin(theta) - sin(chi)sin(eta)cos(theta) + + + Identity: a sin x + b cos x = sqrt(a**2 + b**2) sin (x + alpha) + + / + | atan(b/a) for a > 0 + alpha < + | pi + atan(b/a) for a < 0 + \ + + => sin (x + alpha) = c / sqrt(a**2 + b**2) + + must use both branches for sin(x) = n: + x = u (+ 2k*pi) | x = pi - u (+ 2k*pi) + """ + cchi = np.cos(chi) + schi = np.sin(chi) + ceta = np.cos(eta0) + seta = np.sin(eta0) + ctht = np.cos(tht0) + stht = np.sin(tht0) + + nchi = np.c_[0.0, cchi, schi].T + + gHat0_l = -np.vstack([ceta * ctht, seta * ctht, stht]) + + a = cchi * ceta * ctht + b = -cchi * stht + c = stht + schi * seta * ctht + + # form solution + abMag = np.sqrt(a * a + b * b) + assert np.all(abMag > 0), "Beam vector specification is infeasible!" + phaseAng = np.arctan2(b, a) + rhs = c / abMag + rhs[abs(rhs) > 1.0] = np.nan + rhsAng = np.arcsin(rhs) + + # write ome angle output arrays (NaNs persist here) + ome1 = rhsAng - phaseAng + ome2 = np.pi - rhsAng - phaseAng + + ome1 = mapAngle(ome1, [-np.pi, np.pi], units='radians') + ome2 = mapAngle(ome2, [-np.pi, np.pi], units='radians') + + ome_stack = np.vstack([ome1, ome2]) + + min_idx = np.argmin(abs(ome_stack), axis=0) + + ome_min = ome_stack[min_idx, list(range(len(ome1)))] + eta_min = np.nan * np.ones_like(ome_min) + + # mark feasible reflections + goodOnes = ~np.isnan(ome_min) + + numGood = np.sum(goodOnes) + tmp_eta = np.empty(numGood) + tmp_gvec = gHat0_l[:, goodOnes] + for i in range(numGood): + rchi = rotMatOfExpMap(np.tile(ome_min[goodOnes][i], (3, 1)) * nchi) + gHat_l = np.dot(rchi, tmp_gvec[:, i].reshape(3, 1)) + tmp_eta[i] = np.arctan2(gHat_l[1], gHat_l[0]) + eta_min[goodOnes] = tmp_eta + + # everybody back to DEGREES! + # - ome1 is in RADIANS here + # - convert and put into [-180, 180] + ome1 = mapAngle( + mapAngle(r2d * ome_min, [-180, 180], units='degrees') + c1 * ome0, + [-180, 180], + units='degrees', + ) + + # put eta1 in [-180, 180] + eta1 = mapAngle(r2d * eta_min, [-180, 180], units='degrees') + + if not outputDegrees: + ome1 *= d2r + eta1 *= d2r + + return ome1, eta1 + + +def getDparms( + lp: np.ndarray, lpTag: str, radians: Optional[bool] = True +) -> np.ndarray: + """ + Utility routine for getting dparms, that is the lattice parameters + without symmetry -- 'triclinic' + + Parameters + ---------- + lp : np.ndarray + Parsed lattice parameters + lpTag : str + Tag for the symmetry group of the lattice (from Laue group) + radians : bool, optional + Whether or not to use radians for angles, default is True + + Returns + ------- + np.ndarray + The lattice parameters without symmetry. + """ + latVecOps = latticeVectors(lp, tag=lpTag, radians=radians) + return latVecOps['dparms'] + + +def LoadFormFactorData(): + """ + Script to read in a csv file containing information relating the + magnitude of Q (sin(th)/lambda) to atomic form factor + Notes: + Atomic form factor data gathered from the International Tables of + Crystallography: + P. J. Brown, A. G. Fox, E. N. Maslen, M. A. O'Keefec and B. T. M. Willis, + "Chapter 6.1. Intensity of diffracted intensities", International Tables + for Crystallography (2006). Vol. C, ch. 6.1, pp. 554-595 + """ + + dir1 = os.path.split(valunits.__file__) + dataloc = os.path.join(dir1[0], 'data', 'FormFactorVsQ.csv') + + data = np.zeros((62, 99), float) + + # FIXME: marked broken by DP + jj = 0 + with open(dataloc, 'rU') as csvfile: + datareader = csv.reader(csvfile, dialect=csv.excel) + for row in datareader: + ii = 0 + for val in row: + data[jj, ii] = float(val) + ii += 1 + jj += 1 + + sinThOverLamdaList = data[:, 0] + ffDataList = data[:, 1:] + + return sinThOverLamdaList, ffDataList + + +def RetrieveAtomicFormFactor(elecNum, magG, sinThOverLamdaList, ffDataList): + """Interpolates between tabulated data to find the atomic form factor + for an atom with elecNum electrons for a given magnitude of Q + USAGE: + ff = RetrieveAtomicFormFactor(elecNum,magG,sinThOverLamdaList,ffDataList) + INPUTS: + 1) elecNum, (1 x 1 float) number of electrons for atom of interest + 2) magG (1 x 1 float) magnitude of G + 3) sinThOverLamdaList (n x 1 float ndarray) form factor data is tabulated + in terms of sin(theta)/lambda (A^-1). + 3) ffDataList (n x m float ndarray) form factor data is tabulated in terms + of sin(theta)/lambda (A^-1). Each column corresponds to a different + number of electrons + OUTPUTS: + 1) ff (n x 1 float) atomic form factor for atom and hkl of interest + NOTES: + Data should be calculated in terms of G at some point + """ + sinThOverLambda = 0.5 * magG + # lambda=2*d*sin(th) + # lambda=2*sin(th)/G + # 1/2*G=sin(th)/lambda + + ff = np.interp( + sinThOverLambda, sinThOverLamdaList, ffDataList[:, (elecNum - 1)] + ) + + return ff + + +def lorentz_factor(tth: np.ndarray) -> np.ndarray: + """ + 05/26/2022 SS adding lorentz factor computation + to the detector so that it can be compenstated for in the + intensity correction + + Parameters + ---------- + tth: np.ndarray + 2-theta of every pixel in radians + + Returns + ------- + np.ndarray + Lorentz factor for each pixel + """ + + theta = 0.5 * tth + + cth = np.cos(theta) + sth2 = np.sin(theta) ** 2 + + return 1.0 / (4.0 * cth * sth2) + + +def polarization_factor( + tth: np.ndarray, + unpolarized: Optional[bool] = True, + eta: Optional[np.ndarray] = None, + f_hor: Optional[float] = None, + f_vert: Optional[float] = None, +) -> np.ndarray: + """ + 06/14/2021 SS adding lorentz polarization factor computation + to the detector so that it can be compenstated for in the + intensity correction + + 05/26/2022 decoupling lorentz factor from polarization factor + + parameters: tth two theta of every pixel in radians + if unpolarized is True, all subsequent arguments are optional + eta azimuthal angle of every pixel + f_hor fraction of horizontal polarization + (~1 for XFELs) + f_vert fraction of vertical polarization + (~0 for XFELs) + notice f_hor + f_vert = 1 + + FIXME, called without parameters like eta, f_hor, f_vert, but they default + to none in the current implementation, which will throw an error. + """ + + ctth2 = np.cos(tth) ** 2 + + if unpolarized: + return (1 + ctth2) / 2 + + seta2 = np.sin(eta) ** 2 + ceta2 = np.cos(eta) ** 2 + return f_hor * (seta2 + ceta2 * ctth2) + f_vert * (ceta2 + seta2 * ctth2) diff --git a/hexrd/wppf/LeBailCalibration.py b/hexrd/powder/wppf/LeBailCalibration.py similarity index 100% rename from hexrd/wppf/LeBailCalibration.py rename to hexrd/powder/wppf/LeBailCalibration.py diff --git a/hexrd/wppf/RietveldHEDM.py b/hexrd/powder/wppf/RietveldHEDM.py similarity index 100% rename from hexrd/wppf/RietveldHEDM.py rename to hexrd/powder/wppf/RietveldHEDM.py diff --git a/hexrd/wppf/WPPF.py b/hexrd/powder/wppf/WPPF.py similarity index 100% rename from hexrd/wppf/WPPF.py rename to hexrd/powder/wppf/WPPF.py diff --git a/hexrd/wppf/__init__.py b/hexrd/powder/wppf/__init__.py similarity index 100% rename from hexrd/wppf/__init__.py rename to hexrd/powder/wppf/__init__.py diff --git a/hexrd/wppf/derivatives.py b/hexrd/powder/wppf/derivatives.py similarity index 100% rename from hexrd/wppf/derivatives.py rename to hexrd/powder/wppf/derivatives.py diff --git a/hexrd/wppf/parameters.py b/hexrd/powder/wppf/parameters.py similarity index 100% rename from hexrd/wppf/parameters.py rename to hexrd/powder/wppf/parameters.py diff --git a/hexrd/wppf/peakfunctions.py b/hexrd/powder/wppf/peakfunctions.py similarity index 100% rename from hexrd/wppf/peakfunctions.py rename to hexrd/powder/wppf/peakfunctions.py diff --git a/hexrd/wppf/phase.py b/hexrd/powder/wppf/phase.py similarity index 100% rename from hexrd/wppf/phase.py rename to hexrd/powder/wppf/phase.py diff --git a/hexrd/wppf/spectrum.py b/hexrd/powder/wppf/spectrum.py similarity index 100% rename from hexrd/wppf/spectrum.py rename to hexrd/powder/wppf/spectrum.py diff --git a/hexrd/wppf/texture.py b/hexrd/powder/wppf/texture.py similarity index 100% rename from hexrd/wppf/texture.py rename to hexrd/powder/wppf/texture.py diff --git a/hexrd/wppf/wppfsupport.py b/hexrd/powder/wppf/wppfsupport.py similarity index 100% rename from hexrd/wppf/wppfsupport.py rename to hexrd/powder/wppf/wppfsupport.py diff --git a/hexrd/wppf/xtal.py b/hexrd/powder/wppf/xtal.py similarity index 100% rename from hexrd/wppf/xtal.py rename to hexrd/powder/wppf/xtal.py From 9d54c295f3e1b1a606e9d44ea3b744a5e30e1d97 Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Mon, 31 Mar 2025 11:21:04 -0400 Subject: [PATCH 02/19] Update Imports --- hexrd/core/config/beam.py | 2 +- hexrd/core/config/imageseries.py | 4 +- hexrd/core/config/instrument.py | 2 +- hexrd/core/config/material.py | 6 +-- hexrd/core/config/root.py | 4 +- hexrd/core/distortion/dexela_2923.py | 2 +- hexrd/core/distortion/dexela_2923_quad.py | 2 +- hexrd/core/distortion/ge_41rt.py | 4 +- .../fitting/calibration/abstract_grain.py | 17 ++---- hexrd/core/fitting/calibration/grain.py | 8 +-- .../calibration/relative_constraints.py | 2 +- hexrd/core/fitting/fitpeak.py | 6 +-- hexrd/core/fitting/peakfunctions.py | 5 +- hexrd/core/fitting/spectrum.py | 18 ++----- hexrd/core/fitting/utils.py | 6 +-- hexrd/core/gridutil.py | 2 +- hexrd/core/imageseries/load/__init__.py | 4 +- .../core/imageseries/load/eiger_stream_v1.py | 2 +- hexrd/core/imageseries/load/framecache.py | 4 +- hexrd/core/imageseries/load/hdf5.py | 2 +- hexrd/core/imageseries/save.py | 4 +- hexrd/core/imageutil.py | 4 +- hexrd/core/instrument/__init__.py | 13 +---- hexrd/core/instrument/constants.py | 2 +- hexrd/core/instrument/cylindrical_detector.py | 14 +++-- hexrd/core/instrument/detector.py | 52 +++++++++--------- hexrd/core/instrument/detector_coatings.py | 3 +- hexrd/core/instrument/hedm_instrument.py | 53 ++++++++----------- hexrd/core/instrument/physics_package.py | 2 +- hexrd/core/instrument/planar_detector.py | 22 +++----- hexrd/core/material/__init__.py | 8 +-- hexrd/core/material/crystallography.py | 25 ++++----- hexrd/core/material/material.py | 28 +++++----- hexrd/core/material/mksupport.py | 10 ++-- hexrd/core/material/spacegroup.py | 4 +- hexrd/core/material/symmetry.py | 14 ++--- hexrd/core/material/unitcell.py | 12 ++--- hexrd/core/material/utils.py | 7 ++- hexrd/core/matrixutil.py | 2 +- hexrd/core/projections/polar.py | 16 +++--- hexrd/core/projections/spherical.py | 2 +- hexrd/core/rotations.py | 14 ++--- hexrd/core/transforms/new_capi/xf_new_capi.py | 8 +-- hexrd/core/transforms/old_xfcapi.py | 6 +-- hexrd/core/transforms/xf.py | 8 ++- hexrd/core/transforms/xfcapi.py | 43 +-------------- hexrd/core/utils/hdf5.py | 2 +- hexrd/core/utils/hkl.py | 5 +- hexrd/core/utils/panel_buffer.py | 6 ++- hexrd/core/valunits.py | 2 +- hexrd/hed/instrument/detector.py | 52 +++++++++--------- hexrd/hed/instrument/hedm_instrument.py | 53 ++++++++----------- hexrd/hed/xrdutil/phutil.py | 8 +-- hexrd/hed/xrdutil/utils.py | 28 ++++++---- hexrd/hedm/cli/find_orientations.py | 11 ++-- hexrd/hedm/cli/fit_grains.py | 15 +++--- hexrd/hedm/cli/main.py | 18 +++---- hexrd/hedm/cli/preprocess.py | 4 +- hexrd/hedm/config/instrument.py | 2 +- hexrd/hedm/config/root.py | 4 +- hexrd/hedm/findorientations.py | 16 +++--- hexrd/hedm/fitgrains.py | 8 +-- hexrd/hedm/fitting/grains.py | 10 ++-- hexrd/hedm/grainmap/nfutil.py | 14 ++--- hexrd/hedm/indexer.py | 6 +-- hexrd/hedm/instrument/detector.py | 52 +++++++++--------- hexrd/hedm/instrument/hedm_instrument.py | 53 ++++++++----------- hexrd/hedm/instrument/physics_package.py | 2 +- hexrd/hedm/ipfcolor/colorspace.py | 2 +- hexrd/hedm/ipfcolor/sphere_sector.py | 4 +- hexrd/hedm/material/crystallography.py | 25 ++++----- hexrd/hedm/material/unitcell.py | 12 ++--- .../preprocess/argument_classes_factory.py | 2 +- hexrd/hedm/preprocess/preprocessors.py | 14 ++--- hexrd/hedm/preprocess/profiles.py | 9 +--- hexrd/hedm/sampleOrientations/__init__.py | 2 +- hexrd/hedm/sampleOrientations/conversions.py | 2 +- hexrd/hedm/sampleOrientations/rfz.py | 4 +- hexrd/hedm/sampleOrientations/sampleRFZ.py | 6 +-- hexrd/hedm/xrdutil/utils.py | 28 ++++++---- hexrd/laue/fitting/calibration/laue.py | 8 +-- hexrd/laue/instrument/detector.py | 52 +++++++++--------- hexrd/laue/instrument/hedm_instrument.py | 53 ++++++++----------- hexrd/laue/material/crystallography.py | 25 ++++----- hexrd/laue/xrdutil/utils.py | 28 ++++++---- .../powder/fitting/calibration/instrument.py | 14 +---- .../calibration/lmfit_param_handling.py | 24 ++------- hexrd/powder/fitting/calibration/powder.py | 11 ++-- .../fitting/calibration/structureless.py | 19 ++----- hexrd/powder/instrument/detector.py | 52 +++++++++--------- hexrd/powder/instrument/hedm_instrument.py | 53 ++++++++----------- hexrd/powder/material/crystallography.py | 25 ++++----- hexrd/powder/wppf/LeBailCalibration.py | 34 +++++------- hexrd/powder/wppf/WPPF.py | 33 ++++-------- hexrd/powder/wppf/__init__.py | 4 +- hexrd/powder/wppf/derivatives.py | 2 +- hexrd/powder/wppf/peakfunctions.py | 4 +- hexrd/powder/wppf/phase.py | 20 +++---- hexrd/powder/wppf/texture.py | 2 +- hexrd/powder/wppf/wppfsupport.py | 13 ++--- hexrd/powder/wppf/xtal.py | 2 +- 101 files changed, 609 insertions(+), 828 deletions(-) diff --git a/hexrd/core/config/beam.py b/hexrd/core/config/beam.py index 820dc7d4b..572b25b52 100644 --- a/hexrd/core/config/beam.py +++ b/hexrd/core/config/beam.py @@ -3,7 +3,7 @@ import numpy as np from .config import Config -from hexrd import imageseries +from hexrd.core import imageseries class Beam(Config): diff --git a/hexrd/core/config/imageseries.py b/hexrd/core/config/imageseries.py index 63bffff07..0a133399c 100644 --- a/hexrd/core/config/imageseries.py +++ b/hexrd/core/config/imageseries.py @@ -1,7 +1,7 @@ from .config import Config -from hexrd import imageseries +from hexrd.core import imageseries -from hexrd.constants import shared_ims_key +from hexrd.core.constants import shared_ims_key class ImageSeries(Config): diff --git a/hexrd/core/config/instrument.py b/hexrd/core/config/instrument.py index 8f0a87daa..31dbbb310 100644 --- a/hexrd/core/config/instrument.py +++ b/hexrd/core/config/instrument.py @@ -4,7 +4,7 @@ from .config import Config from .loader import NumPyIncludeLoader -from hexrd import instrument +from hexrd.core import instrument class Instrument(Config): diff --git a/hexrd/core/config/material.py b/hexrd/core/config/material.py index c55a985c6..100bc515f 100644 --- a/hexrd/core/config/material.py +++ b/hexrd/core/config/material.py @@ -2,9 +2,9 @@ import numpy as np -from hexrd import material -from hexrd.constants import keVToAngstrom -from hexrd.valunits import valWUnit +from hexrd.core import material +from hexrd.core.constants import keVToAngstrom +from hexrd.core.valunits import valWUnit from .config import Config from .utils import get_exclusion_parameters diff --git a/hexrd/core/config/root.py b/hexrd/core/config/root.py index 0fca50ed7..e698a3f37 100644 --- a/hexrd/core/config/root.py +++ b/hexrd/core/config/root.py @@ -3,8 +3,8 @@ import logging import multiprocessing as mp -from hexrd.constants import shared_ims_key -from hexrd import imageseries +from hexrd.core.constants import shared_ims_key +from hexrd.core import imageseries from .config import Config from .instrument import Instrument diff --git a/hexrd/core/distortion/dexela_2923.py b/hexrd/core/distortion/dexela_2923.py index bff2a0cd3..e4cb92dfe 100644 --- a/hexrd/core/distortion/dexela_2923.py +++ b/hexrd/core/distortion/dexela_2923.py @@ -7,7 +7,7 @@ import numpy as np import numba -from hexrd import constants +from hexrd.core import constants from .distortionabc import DistortionABC from .registry import _RegisterDistortionClass diff --git a/hexrd/core/distortion/dexela_2923_quad.py b/hexrd/core/distortion/dexela_2923_quad.py index 6f61f3961..ab84da33e 100644 --- a/hexrd/core/distortion/dexela_2923_quad.py +++ b/hexrd/core/distortion/dexela_2923_quad.py @@ -1,6 +1,6 @@ import numpy as np import numba -from hexrd import constants +from hexrd.core import constants from .distortionabc import DistortionABC from .registry import _RegisterDistortionClass diff --git a/hexrd/core/distortion/ge_41rt.py b/hexrd/core/distortion/ge_41rt.py index 8c52aee95..241e87d40 100644 --- a/hexrd/core/distortion/ge_41rt.py +++ b/hexrd/core/distortion/ge_41rt.py @@ -8,8 +8,8 @@ from .registry import _RegisterDistortionClass from .utils import newton -from hexrd import constants as cnst -from hexrd.extensions import inverse_distortion +from hexrd.core import constants as cnst +from hexrd.core.extensions import inverse_distortion RHO_MAX = 204.8 # max radius in mm for ge detector diff --git a/hexrd/core/fitting/calibration/abstract_grain.py b/hexrd/core/fitting/calibration/abstract_grain.py index dfc5e0262..143b8b6ae 100644 --- a/hexrd/core/fitting/calibration/abstract_grain.py +++ b/hexrd/core/fitting/calibration/abstract_grain.py @@ -4,20 +4,13 @@ import lmfit import numpy as np -import hexrd.constants as cnst -from hexrd.rotations import ( - angleAxisOfRotMat, - RotMatEuler, -) -from hexrd.transforms import xfcapi -from hexrd.utils.hkl import hkl_to_str, str_to_hkl +import hexrd.core.constants as cnst +from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler +from hexrd.core.transforms import xfcapi +from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl from .calibrator import Calibrator -from .lmfit_param_handling import ( - create_grain_params, - DEFAULT_EULER_CONVENTION, - rename_to_avoid_collision, -) +from .lmfit_param_handling import create_grain_params, DEFAULT_EULER_CONVENTION, rename_to_avoid_collision logger = logging.getLogger(__name__) diff --git a/hexrd/core/fitting/calibration/grain.py b/hexrd/core/fitting/calibration/grain.py index 082b59d0a..1fd9e8f5f 100644 --- a/hexrd/core/fitting/calibration/grain.py +++ b/hexrd/core/fitting/calibration/grain.py @@ -2,13 +2,13 @@ import numpy as np -from hexrd import matrixutil as mutil -from hexrd.rotations import angularDifference -from hexrd.transforms import xfcapi +from hexrd.core import matrixutil as mutil +from hexrd.core.rotations import angularDifference +from hexrd.core.transforms import xfcapi from .abstract_grain import AbstractGrainCalibrator from .lmfit_param_handling import DEFAULT_EULER_CONVENTION -from .. import grains as grainutil +from . import grains as grainutil logger = logging.getLogger(__name__) diff --git a/hexrd/core/fitting/calibration/relative_constraints.py b/hexrd/core/fitting/calibration/relative_constraints.py index f094b3212..547fdd29b 100644 --- a/hexrd/core/fitting/calibration/relative_constraints.py +++ b/hexrd/core/fitting/calibration/relative_constraints.py @@ -3,7 +3,7 @@ import numpy as np -from hexrd.instrument import HEDMInstrument +from hexrd.core.instrument import HEDMInstrument class RelativeConstraintsType(Enum): diff --git a/hexrd/core/fitting/fitpeak.py b/hexrd/core/fitting/fitpeak.py index ff641685a..77215f38b 100644 --- a/hexrd/core/fitting/fitpeak.py +++ b/hexrd/core/fitting/fitpeak.py @@ -32,9 +32,9 @@ from scipy import ndimage as imgproc from scipy import optimize -from hexrd import constants -from hexrd.imageutil import snip1d -from hexrd.fitting import peakfunctions as pkfuncs +from hexrd.core import constants +from hexrd.core.imageutil import snip1d +from hexrd.core.fitting import peakfunctions as pkfuncs import matplotlib.pyplot as plt diff --git a/hexrd/core/fitting/peakfunctions.py b/hexrd/core/fitting/peakfunctions.py index 40d74c3dd..8ae64ca31 100644 --- a/hexrd/core/fitting/peakfunctions.py +++ b/hexrd/core/fitting/peakfunctions.py @@ -28,9 +28,8 @@ import numpy as np from numba import njit import copy -from hexrd import constants -from hexrd.constants import \ - c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp +from hexrd.core import constants +from hexrd.core.constants import c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp gauss_width_fact = constants.sigma_to_fwhm lorentz_width_fact = 2. diff --git a/hexrd/core/fitting/spectrum.py b/hexrd/core/fitting/spectrum.py index 7e6d60f75..4f35b3ad3 100644 --- a/hexrd/core/fitting/spectrum.py +++ b/hexrd/core/fitting/spectrum.py @@ -3,20 +3,10 @@ from lmfit import Model, Parameters -from hexrd.constants import fwhm_to_sigma -from hexrd.imageutil import snip1d - -from .utils import (_calc_alpha, _calc_beta, - _mixing_factor_pv, - _gaussian_pink_beam, - _lorentzian_pink_beam, - _parameter_arg_constructor, - _extract_parameters_by_name, - _set_bound_constraints, - _set_refinement_by_name, - _set_width_mixing_bounds, - _set_equality_constraints, - _set_peak_center_bounds) +from hexrd.core.constants import fwhm_to_sigma +from hexrd.core.imageutil import snip1d + +from .utils import _calc_alpha, _calc_beta, _mixing_factor_pv, _gaussian_pink_beam, _lorentzian_pink_beam, _parameter_arg_constructor, _extract_parameters_by_name, _set_bound_constraints, _set_refinement_by_name, _set_width_mixing_bounds, _set_equality_constraints, _set_peak_center_bounds # ============================================================================= # PARAMETERS diff --git a/hexrd/core/fitting/utils.py b/hexrd/core/fitting/utils.py index 47f72c953..d0421ff17 100644 --- a/hexrd/core/fitting/utils.py +++ b/hexrd/core/fitting/utils.py @@ -3,10 +3,8 @@ import numpy as np from numba import njit -from hexrd.constants import ( - c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp -) -from hexrd.matrixutil import uniqueVectors +from hexrd.core.constants import c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp +from hexrd.core.matrixutil import uniqueVectors # ============================================================================= diff --git a/hexrd/core/gridutil.py b/hexrd/core/gridutil.py index 22ff4eb0f..3455400f5 100644 --- a/hexrd/core/gridutil.py +++ b/hexrd/core/gridutil.py @@ -29,7 +29,7 @@ from numpy.linalg import det import numba -from hexrd.constants import sqrt_epsf +from hexrd.core.constants import sqrt_epsf diff --git a/hexrd/core/imageseries/load/__init__.py b/hexrd/core/imageseries/load/__init__.py index 3f891781e..e03a1e717 100644 --- a/hexrd/core/imageseries/load/__init__.py +++ b/hexrd/core/imageseries/load/__init__.py @@ -26,9 +26,7 @@ def __getitem__(self, _): # import all adapter modules -from . import ( - array, framecache, function, hdf5, imagefiles, rawimage, metadata, trivial -) +from . import array, framecache, function, hdf5, imagefiles, rawimage, metadata, trivial try: from dectris.compression import decompress diff --git a/hexrd/core/imageseries/load/eiger_stream_v1.py b/hexrd/core/imageseries/load/eiger_stream_v1.py index 14ad28c5c..d8222e9a7 100644 --- a/hexrd/core/imageseries/load/eiger_stream_v1.py +++ b/hexrd/core/imageseries/load/eiger_stream_v1.py @@ -6,7 +6,7 @@ import h5py import numpy as np -from hexrd.utils.hdf5 import unwrap_h5_to_dict +from hexrd.core.utils.hdf5 import unwrap_h5_to_dict from . import ImageSeriesAdapter from ..imageseriesiter import ImageSeriesIterator diff --git a/hexrd/core/imageseries/load/framecache.py b/hexrd/core/imageseries/load/framecache.py index 9552e1165..485d0770e 100644 --- a/hexrd/core/imageseries/load/framecache.py +++ b/hexrd/core/imageseries/load/framecache.py @@ -11,8 +11,8 @@ from . import ImageSeriesAdapter, RegionType from ..imageseriesiter import ImageSeriesIterator from .metadata import yamlmeta -from hexrd.utils.hdf5 import unwrap_h5_to_dict -from hexrd.utils.compatibility import h5py_read_string +from hexrd.core.utils.hdf5 import unwrap_h5_to_dict +from hexrd.core.utils.compatibility import h5py_read_string import multiprocessing from concurrent.futures import ThreadPoolExecutor diff --git a/hexrd/core/imageseries/load/hdf5.py b/hexrd/core/imageseries/load/hdf5.py index f9fa909c7..d5efde499 100644 --- a/hexrd/core/imageseries/load/hdf5.py +++ b/hexrd/core/imageseries/load/hdf5.py @@ -5,7 +5,7 @@ import numpy as np -from . import ImageSeriesAdapter,RegionType +from . import ImageSeriesAdapter, RegionType from ..imageseriesiter import ImageSeriesIterator diff --git a/hexrd/core/imageseries/save.py b/hexrd/core/imageseries/save.py index ea669c9a0..90746eb92 100644 --- a/hexrd/core/imageseries/save.py +++ b/hexrd/core/imageseries/save.py @@ -12,8 +12,8 @@ import hdf5plugin import yaml -from hexrd.matrixutil import extract_ijv -from hexrd.utils.hdf5 import unwrap_dict_to_h5 +from hexrd.core.matrixutil import extract_ijv +from hexrd.core.utils.hdf5 import unwrap_dict_to_h5 MAX_NZ_FRACTION = 0.1 # 10% sparsity trigger for frame-cache write diff --git a/hexrd/core/imageutil.py b/hexrd/core/imageutil.py index 24412f354..7d6e04691 100644 --- a/hexrd/core/imageutil.py +++ b/hexrd/core/imageutil.py @@ -8,8 +8,8 @@ from skimage.feature import blob_dog, blob_log from skimage.exposure import rescale_intensity -from hexrd import convolution -from hexrd.constants import fwhm_to_sigma +from hexrd.core import convolution +from hexrd.core.constants import fwhm_to_sigma # ============================================================================= diff --git a/hexrd/core/instrument/__init__.py b/hexrd/core/instrument/__init__.py index c5de3a79b..10f7fda33 100644 --- a/hexrd/core/instrument/__init__.py +++ b/hexrd/core/instrument/__init__.py @@ -1,15 +1,4 @@ -from .hedm_instrument import ( - calc_angles_from_beam_vec, - calc_beam_vec, - centers_of_edge_vec, - GenerateEtaOmeMaps, - GrainDataWriter, - HEDMInstrument, - max_tth, - switch_xray_source, - unwrap_dict_to_h5, - unwrap_h5_to_dict, -) +from .hedm_instrument import calc_angles_from_beam_vec, calc_beam_vec, centers_of_edge_vec, GenerateEtaOmeMaps, GrainDataWriter, HEDMInstrument, max_tth, switch_xray_source, unwrap_dict_to_h5, unwrap_h5_to_dict from .cylindrical_detector import CylindricalDetector from .detector import Detector from .planar_detector import PlanarDetector diff --git a/hexrd/core/instrument/constants.py b/hexrd/core/instrument/constants.py index 3e1996ee5..98ce323b5 100644 --- a/hexrd/core/instrument/constants.py +++ b/hexrd/core/instrument/constants.py @@ -1,4 +1,4 @@ -from hexrd.constants import DENSITY, DENSITY_COMPOUNDS +from hexrd.core.constants import DENSITY, DENSITY_COMPOUNDS # default filter and coating materials diff --git a/hexrd/core/instrument/cylindrical_detector.py b/hexrd/core/instrument/cylindrical_detector.py index a1833130f..93d7499e2 100644 --- a/hexrd/core/instrument/cylindrical_detector.py +++ b/hexrd/core/instrument/cylindrical_detector.py @@ -2,17 +2,15 @@ import numpy as np -from hexrd import constants as ct -from hexrd import xrdutil -from hexrd.utils.decorators import memoize +from hexrd.core import constants as ct +from hexrd.hedm import xrdutil +from hexrd.core.utils.decorators import memoize -from .detector import ( - Detector, _solid_angle_of_triangle, _row_edge_vec, _col_edge_vec -) +from .detector import Detector, _solid_angle_of_triangle, _row_edge_vec, _col_edge_vec from functools import partial -from hexrd.gridutil import cellConnectivity -from hexrd.utils.concurrent import distribute_tasks +from hexrd.core.gridutil import cellConnectivity +from hexrd.core.utils.concurrent import distribute_tasks from concurrent.futures import ProcessPoolExecutor logger = logging.getLogger(__name__) diff --git a/hexrd/core/instrument/detector.py b/hexrd/core/instrument/detector.py index db4f95d1a..76aeb9e26 100644 --- a/hexrd/core/instrument/detector.py +++ b/hexrd/core/instrument/detector.py @@ -3,37 +3,33 @@ import os from typing import Optional -from hexrd.instrument.constants import ( - COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT -) -from hexrd.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba -from hexrd import constants as ct -from hexrd import distortion as distortion_pkg -from hexrd import matrixutil as mutil -from hexrd import xrdutil -from hexrd.rotations import mapAngle - -from hexrd.material import crystallography -from hexrd.material.crystallography import PlaneData - -from hexrd.transforms.xfcapi import ( - xy_to_gvec, - gvec_to_xy, - make_beam_rmat, - make_rmat_of_expmap, - oscill_angles_of_hkls, - angles_to_dvec, -) - -from hexrd.utils.decorators import memoize -from hexrd.gridutil import cellIndices -from hexrd.instrument import detector_coatings -from hexrd.material.utils import ( - calculate_linear_absorption_length, - calculate_incoherent_scattering) +from hexrd.core import constants as ct +from hexrd.core import distortion as distortion_pkg +from hexrd.core import matrixutil as mutil +from hexrd.hedm import xrdutil +from hexrd.core.rotations import mapAngle + +from hexrd.hedm.material import crystallography +from hexrd.laue.material import crystallography +from hexrd.powder.material import crystallography +from hexrd.core.material import crystallography +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData + +from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec + +from hexrd.core.utils.decorators import memoize +from hexrd.core.gridutil import cellIndices +from hexrd.core.instrument import detector_coatings +from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_incoherent_scattering distortion_registry = distortion_pkg.Registry() diff --git a/hexrd/core/instrument/detector_coatings.py b/hexrd/core/instrument/detector_coatings.py index 4dc5d854b..44b331ae3 100644 --- a/hexrd/core/instrument/detector_coatings.py +++ b/hexrd/core/instrument/detector_coatings.py @@ -1,6 +1,5 @@ import numpy as np -from hexrd.material.utils import (calculate_linear_absorption_length, - calculate_energy_absorption_length) +from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_energy_absorption_length class AbstractLayer: diff --git a/hexrd/core/instrument/hedm_instrument.py b/hexrd/core/instrument/hedm_instrument.py index 1d768b47c..fe6bd619a 100644 --- a/hexrd/core/instrument/hedm_instrument.py +++ b/hexrd/core/instrument/hedm_instrument.py @@ -52,42 +52,35 @@ from scipy.linalg import logm from skimage.measure import regionprops -from hexrd import constants -from hexrd.imageseries import ImageSeries -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.imageseries.omega import OmegaImageSeries -from hexrd.fitting.utils import fit_ring -from hexrd.gridutil import make_tolerance_grid -from hexrd import matrixutil as mutil -from hexrd.transforms.xfcapi import ( - angles_to_gvec, - gvec_to_xy, - make_sample_rmat, - make_rmat_of_expmap, - unit_vector, -) -from hexrd import xrdutil -from hexrd.material.crystallography import PlaneData -from hexrd import constants as ct -from hexrd.rotations import mapAngle -from hexrd import distortion as distortion_pkg -from hexrd.utils.concurrent import distribute_tasks -from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict -from hexrd.utils.yaml import NumpyToNativeDumper -from hexrd.valunits import valWUnit -from hexrd.wppf import LeBail +from hexrd.core import constants +from hexrd.core.imageseries import ImageSeries +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.imageseries.omega import OmegaImageSeries +from hexrd.core.fitting.utils import fit_ring +from hexrd.core.gridutil import make_tolerance_grid +from hexrd.core import matrixutil as mutil +from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +from hexrd.hedm import xrdutil +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData +from hexrd.core import constants as ct +from hexrd.core.rotations import mapAngle +from hexrd.core import distortion as distortion_pkg +from hexrd.core.utils.concurrent import distribute_tasks +from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict +from hexrd.core.utils.yaml import NumpyToNativeDumper +from hexrd.core.valunits import valWUnit +from hexrd.powder.wppf import LeBail from .cylindrical_detector import CylindricalDetector -from .detector import ( - beam_energy_DFLT, - Detector, - max_workers_DFLT, -) +from .detector import beam_energy_DFLT, Detector, max_workers_DFLT from .planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise -from hexrd.wppf import wppfsupport +from hexrd.powder.wppf import wppfsupport try: from fast_histogram import histogram1d diff --git a/hexrd/core/instrument/physics_package.py b/hexrd/core/instrument/physics_package.py index d5837d99a..0a50258b9 100644 --- a/hexrd/core/instrument/physics_package.py +++ b/hexrd/core/instrument/physics_package.py @@ -1,6 +1,6 @@ from abc import abstractmethod import numpy as np -from hexrd.material.utils import calculate_linear_absorption_length +from hexrd.core.material.utils import calculate_linear_absorption_length class AbstractPhysicsPackage: diff --git a/hexrd/core/instrument/planar_detector.py b/hexrd/core/instrument/planar_detector.py index 218aef23b..fe0aa104a 100644 --- a/hexrd/core/instrument/planar_detector.py +++ b/hexrd/core/instrument/planar_detector.py @@ -1,22 +1,14 @@ import numpy as np -from hexrd import constants as ct -from hexrd.transforms.xfcapi import ( - angles_to_gvec, - xy_to_gvec, - gvec_to_xy, - make_beam_rmat, - angles_to_dvec, -) -from hexrd.utils.decorators import memoize - -from .detector import ( - Detector, _solid_angle_of_triangle, _row_edge_vec, _col_edge_vec -) +from hexrd.core import constants as ct +from hexrd.core.transforms.xfcapi import angles_to_gvec, xy_to_gvec, gvec_to_xy, make_beam_rmat, angles_to_dvec +from hexrd.core.utils.decorators import memoize + +from .detector import Detector, _solid_angle_of_triangle, _row_edge_vec, _col_edge_vec from functools import partial -from hexrd.gridutil import cellConnectivity -from hexrd.utils.concurrent import distribute_tasks +from hexrd.core.gridutil import cellConnectivity +from hexrd.core.utils.concurrent import distribute_tasks from concurrent.futures import ProcessPoolExecutor diff --git a/hexrd/core/material/__init__.py b/hexrd/core/material/__init__.py index 8ce0c9625..b46571887 100644 --- a/hexrd/core/material/__init__.py +++ b/hexrd/core/material/__init__.py @@ -1,7 +1 @@ -from .material import ( - _angstroms, - _kev, - load_materials_hdf5, - Material, - save_materials_hdf5, -) +from .material import _angstroms, _kev, load_materials_hdf5, Material, save_materials_hdf5 diff --git a/hexrd/core/material/crystallography.py b/hexrd/core/material/crystallography.py index 574225e67..48eb857e8 100644 --- a/hexrd/core/material/crystallography.py +++ b/hexrd/core/material/crystallography.py @@ -34,21 +34,16 @@ import numpy as np -from hexrd.material.unitcell import unitcell -from hexrd.deprecation import deprecated -from hexrd import constants -from hexrd.matrixutil import unitVector -from hexrd.rotations import ( - rotMatOfExpMap, - mapAngle, - applySym, - ltypeOfLaueGroup, - quatOfLaueGroup, -) -from hexrd.transforms import xfcapi -from hexrd import valunits -from hexrd.valunits import toFloat -from hexrd.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf +from hexrd.hedm.material.unitcell import unitcell +from hexrd.core.material.unitcell import unitcell +from hexrd.core.deprecation import deprecated +from hexrd.core import constants +from hexrd.core.matrixutil import unitVector +from hexrd.core.rotations import rotMatOfExpMap, mapAngle, applySym, ltypeOfLaueGroup, quatOfLaueGroup +from hexrd.core.transforms import xfcapi +from hexrd.core import valunits +from hexrd.core.valunits import toFloat +from hexrd.core.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf """module vars""" diff --git a/hexrd/core/material/material.py b/hexrd/core/material/material.py index 438252c38..fd21f278a 100644 --- a/hexrd/core/material/material.py +++ b/hexrd/core/material/material.py @@ -34,27 +34,25 @@ from configparser import SafeConfigParser as Parser import numpy as np -from hexrd.material.crystallography import PlaneData as PData -from hexrd.material import symmetry, unitcell -from hexrd.material.symbols import two_origin_choice -from hexrd.valunits import valWUnit -from hexrd.constants import (ptable, - ptableinverse, - chargestate) +from hexrd.hedm.material.crystallography import PlaneData as PData +from hexrd.laue.material.crystallography import PlaneData as PData +from hexrd.powder.material.crystallography import PlaneData as PData +from hexrd.core.material.crystallography import PlaneData as PData +from hexrd.core.material import symmetry, unitcell +from hexrd.hedm.material import unitcell +from hexrd.core.material.symbols import two_origin_choice +from hexrd.core.valunits import valWUnit +from hexrd.core.constants import ptable, ptableinverse, chargestate from os import path from pathlib import Path from CifFile import ReadCif import h5py from warnings import warn -from hexrd.material.mksupport import Write2H5File -from hexrd.material.symbols import ( - xtal_sys_dict, - Hall_to_sgnum, - HM_to_sgnum, -) -from hexrd.utils.compatibility import h5py_read_string -from hexrd.fitting.peakfunctions import _unit_gaussian +from hexrd.core.material.mksupport import Write2H5File +from hexrd.core.material.symbols import xtal_sys_dict, Hall_to_sgnum, HM_to_sgnum +from hexrd.core.utils.compatibility import h5py_read_string +from hexrd.core.fitting.peakfunctions import _unit_gaussian __all__ = ['Material', 'loadMaterialList'] diff --git a/hexrd/core/material/mksupport.py b/hexrd/core/material/mksupport.py index 73f718700..d205ce3e7 100644 --- a/hexrd/core/material/mksupport.py +++ b/hexrd/core/material/mksupport.py @@ -1,15 +1,11 @@ -from hexrd.material.symbols import (pstr_Elements, - two_origin_choice, - PrintPossibleSG, - TRIG, - pstr_spacegroup, - pstr_mkxtal) +from hexrd.core.material.symbols import pstr_Elements, two_origin_choice, PrintPossibleSG, TRIG, pstr_spacegroup, pstr_mkxtal import h5py import os import numpy as np import datetime import getpass -from hexrd.material.unitcell import _StiffnessDict, _pgDict +from hexrd.hedm.material.unitcell import _StiffnessDict, _pgDict +from hexrd.core.material.unitcell import _StiffnessDict, _pgDict def mk(filename, xtalname): diff --git a/hexrd/core/material/spacegroup.py b/hexrd/core/material/spacegroup.py index 0fd69894e..768227f2d 100644 --- a/hexrd/core/material/spacegroup.py +++ b/hexrd/core/material/spacegroup.py @@ -72,8 +72,8 @@ from collections import OrderedDict from math import sqrt, floor -from hexrd import constants -from hexrd.material import symbols, symmetry +from hexrd.core import constants +from hexrd.core.material import symbols, symmetry import numpy as np # diff --git a/hexrd/core/material/symmetry.py b/hexrd/core/material/symmetry.py index 0d614f7ff..a1a7fd0da 100644 --- a/hexrd/core/material/symmetry.py +++ b/hexrd/core/material/symmetry.py @@ -31,19 +31,15 @@ import numpy as np from numba import njit -from numpy import (array, sqrt, pi, - vstack, c_, dot, - argmax) +from numpy import array, sqrt, pi, vstack, c_, dot, argmax # from hexrd.rotations import quatOfAngleAxis, quatProductMatrix, fixQuat -from hexrd import rotations as rot -from hexrd import constants -from hexrd.utils.decorators import memoize +from hexrd.core import rotations as rot +from hexrd.core import constants +from hexrd.core.utils.decorators import memoize # Imports in case others are importing from here -from hexrd.rotations import (toFundamentalRegion, - ltypeOfLaueGroup, - quatOfLaueGroup) +from hexrd.core.rotations import toFundamentalRegion, ltypeOfLaueGroup, quatOfLaueGroup # ============================================================================= diff --git a/hexrd/core/material/unitcell.py b/hexrd/core/material/unitcell.py index 26f3a2fef..c6debaa10 100644 --- a/hexrd/core/material/unitcell.py +++ b/hexrd/core/material/unitcell.py @@ -1,11 +1,11 @@ import importlib.resources import numpy as np from numba import njit -from hexrd import constants -from hexrd.material import spacegroup, symbols, symmetry -from hexrd.ipfcolor import sphere_sector, colorspace -from hexrd.valunits import valWUnit -import hexrd.resources +from hexrd.core import constants +from hexrd.core.material import spacegroup, symbols, symmetry +from hexrd.hedm.ipfcolor import sphere_sector, colorspace +from hexrd.core.valunits import valWUnit +import hexrd.core.resources import warnings import h5py from pathlib import Path @@ -788,7 +788,7 @@ def InitializeInterpTable(self): self.f_anomalous_data[i, :nd, :] = f_anomalous_data[i] def CalcXRSF(self, hkl): - from hexrd.wppf.xtal import _calcxrsf + from hexrd.powder.wppf.xtal import _calcxrsf ''' the 1E-2 is to convert to A^-2 since the fitting is done in those units diff --git a/hexrd/core/material/utils.py b/hexrd/core/material/utils.py index da51c1b84..9d1269ac7 100644 --- a/hexrd/core/material/utils.py +++ b/hexrd/core/material/utils.py @@ -1,13 +1,12 @@ import importlib.resources -import hexrd.resources -from hexrd.constants import cClassicalelectronRad as re,\ -cAvogadro, ATOM_WEIGHTS_DICT +import hexrd.core.resources +from hexrd.core.constants import cClassicalelectronRad as re, cAvogadro, ATOM_WEIGHTS_DICT import chemparse import numpy as np import h5py from copy import deepcopy from scipy.interpolate import interp1d -from hexrd import constants +from hexrd.core import constants """ calculate the molecular weight given the formula unit diff --git a/hexrd/core/matrixutil.py b/hexrd/core/matrixutil.py index d48c9f87d..f94c1b048 100644 --- a/hexrd/core/matrixutil.py +++ b/hexrd/core/matrixutil.py @@ -34,7 +34,7 @@ import numba -from hexrd import constants +from hexrd.core import constants # module variables sqr6i = 1./np.sqrt(6.) diff --git a/hexrd/core/projections/polar.py b/hexrd/core/projections/polar.py index 3b28b2599..9ffc249ef 100644 --- a/hexrd/core/projections/polar.py +++ b/hexrd/core/projections/polar.py @@ -1,12 +1,14 @@ import numpy as np -from hexrd import constants -from hexrd.material.crystallography import PlaneData -from hexrd.xrdutil.utils import ( - _project_on_detector_cylinder, - _project_on_detector_plane, -) -from hexrd.utils.panel_buffer import panel_buffer_as_2d_array +from hexrd.core import constants +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData +from hexrd.hedm.xrdutil.utils import _project_on_detector_cylinder, _project_on_detector_plane +from hexrd.hed.xrdutil.utils import _project_on_detector_cylinder, _project_on_detector_plane +from hexrd.laue.xrdutil.utils import _project_on_detector_cylinder, _project_on_detector_plane +from hexrd.core.utils.panel_buffer import panel_buffer_as_2d_array class PolarView: diff --git a/hexrd/core/projections/spherical.py b/hexrd/core/projections/spherical.py index e0a35fe56..1cb7ece9d 100644 --- a/hexrd/core/projections/spherical.py +++ b/hexrd/core/projections/spherical.py @@ -1,7 +1,7 @@ import numpy as np from skimage.transform import PiecewiseAffineTransform, warp -from hexrd import constants +from hexrd.core import constants from hexrd.xrdutils.util import zproject_sph_angles diff --git a/hexrd/core/rotations.py b/hexrd/core/rotations.py index bf8cfc828..582cdefac 100644 --- a/hexrd/core/rotations.py +++ b/hexrd/core/rotations.py @@ -35,16 +35,10 @@ from scipy.optimize import leastsq from scipy.spatial.transform import Rotation as R -from hexrd.deprecation import deprecated -from hexrd import constants as cnst -from hexrd.matrixutil import ( - columnNorm, - unitVector, - findDuplicateVectors, - multMatArray, - nullSpace, -) -from hexrd.utils.warnings import ignore_warnings +from hexrd.core.deprecation import deprecated +from hexrd.core import constants as cnst +from hexrd.core.matrixutil import columnNorm, unitVector, findDuplicateVectors, multMatArray, nullSpace +from hexrd.core.utils.warnings import ignore_warnings # ============================================================================= diff --git a/hexrd/core/transforms/new_capi/xf_new_capi.py b/hexrd/core/transforms/new_capi/xf_new_capi.py index 33f31da75..287a829c7 100644 --- a/hexrd/core/transforms/new_capi/xf_new_capi.py +++ b/hexrd/core/transforms/new_capi/xf_new_capi.py @@ -26,10 +26,10 @@ from typing import Optional, Tuple, Union import numpy as np -from hexrd.extensions import _new_transforms_capi as _impl -from hexrd.extensions import transforms as cpp_transforms -from hexrd.distortion.distortionabc import DistortionABC -from hexrd import constants as cnst +from hexrd.core.extensions import _new_transforms_capi as _impl +from hexrd.core.extensions import transforms as cpp_transforms +from hexrd.core.distortion.distortionabc import DistortionABC +from hexrd.core import constants as cnst def angles_to_gvec( diff --git a/hexrd/core/transforms/old_xfcapi.py b/hexrd/core/transforms/old_xfcapi.py index 1b328af6a..3179fd640 100644 --- a/hexrd/core/transforms/old_xfcapi.py +++ b/hexrd/core/transforms/old_xfcapi.py @@ -29,10 +29,10 @@ import numpy as np import sys -from hexrd.extensions import _transforms_CAPI +from hexrd.core.extensions import _transforms_CAPI # Imports so that others can import from this module -from hexrd.rotations import mapAngle -from hexrd.matrixutil import columnNorm, rowNorm +from hexrd.core.rotations import mapAngle +from hexrd.core.matrixutil import columnNorm, rowNorm # ###################################################################### # Module Data diff --git a/hexrd/core/transforms/xf.py b/hexrd/core/transforms/xf.py index 74c62de70..2941b0f88 100644 --- a/hexrd/core/transforms/xf.py +++ b/hexrd/core/transforms/xf.py @@ -34,14 +34,12 @@ import scipy.sparse as sparse -from hexrd import matrixutil as mutil +from hexrd.core import matrixutil as mutil # Added to not break people importing these methods -from hexrd.rotations import (mapAngle, - quatProductMatrix as quat_product_matrix, - arccosSafe, angularDifference) -from hexrd.matrixutil import columnNorm, rowNorm +from hexrd.core.rotations import mapAngle, quatProductMatrix as quat_product_matrix, arccosSafe, angularDifference +from hexrd.core.matrixutil import columnNorm, rowNorm # ============================================================================= diff --git a/hexrd/core/transforms/xfcapi.py b/hexrd/core/transforms/xfcapi.py index af0ca3257..8255fc9f5 100644 --- a/hexrd/core/transforms/xfcapi.py +++ b/hexrd/core/transforms/xfcapi.py @@ -2,46 +2,7 @@ # add and test them. # NOTE: we are only importing what is currently being used in hexrd # and hexrdgui. This is so that we can see clearly what is in use. -from .old_xfcapi import ( - # Old transform functions still in use - anglesToDVec, # new version provided below - anglesToGVec, # new version provided below - detectorXYToGvec, # new version provided below - gvecToDetectorXY, # new version provided below - gvecToDetectorXYArray, # new version provided below - oscillAnglesOfHKLs, - # Utility functions - angularDifference, - makeDetectorRotMat, # New version provided below - makeEtaFrameRotMat, # new version provided below - makeOscillRotMat, # new version provided below - makeOscillRotMatArray, # new version provided below - makeRotMatOfExpMap, - makeRotMatOfQuat, # Use rotations.rotMatOfQuat instead - mapAngle, # Use rotations.mapAngle instead - rowNorm, # use numpy.linalg.norm(..., axis=1) instead - unitRowVector, # new version below - # Constants, - bVec_ref, - eta_ref, - Xl, - Yl, -) +from .old_xfcapi import anglesToDVec, anglesToGVec, detectorXYToGvec, gvecToDetectorXY, gvecToDetectorXYArray, oscillAnglesOfHKLs, angularDifference, makeDetectorRotMat, makeEtaFrameRotMat, makeOscillRotMat, makeOscillRotMatArray, makeRotMatOfExpMap, makeRotMatOfQuat, mapAngle, rowNorm, unitRowVector, bVec_ref, eta_ref, Xl, Yl -from .new_capi.xf_new_capi import ( - # New transform functions - angles_to_dvec, - angles_to_gvec, - gvec_to_xy, # this is gvecToDetectorXY and gvecToDetectorXYArray - make_beam_rmat, # this is makeEtaFrameRotMat - make_detector_rmat, - make_rmat_of_expmap, - make_sample_rmat, # this is makeOscillRotMat and makeOscillRotMatArray - oscill_angles_of_hkls, - quat_distance, - rotate_vecs_about_axis, - unit_vector, # this is unitRowVector - validate_angle_ranges, - xy_to_gvec, -) +from .new_capi.xf_new_capi import angles_to_dvec, angles_to_gvec, gvec_to_xy, make_beam_rmat, make_detector_rmat, make_rmat_of_expmap, make_sample_rmat, oscill_angles_of_hkls, quat_distance, rotate_vecs_about_axis, unit_vector, validate_angle_ranges, xy_to_gvec diff --git a/hexrd/core/utils/hdf5.py b/hexrd/core/utils/hdf5.py index f9380daa0..954d1af74 100644 --- a/hexrd/core/utils/hdf5.py +++ b/hexrd/core/utils/hdf5.py @@ -1,4 +1,4 @@ -from hexrd.utils.compatibility import h5py_read_string +from hexrd.core.utils.compatibility import h5py_read_string import numpy as np diff --git a/hexrd/core/utils/hkl.py b/hexrd/core/utils/hkl.py index 10706c603..d25ea5165 100644 --- a/hexrd/core/utils/hkl.py +++ b/hexrd/core/utils/hkl.py @@ -1,6 +1,9 @@ import numpy as np -from hexrd.material.crystallography import hklToStr +from hexrd.hedm.material.crystallography import hklToStr +from hexrd.laue.material.crystallography import hklToStr +from hexrd.powder.material.crystallography import hklToStr +from hexrd.core.material.crystallography import hklToStr def hkl_to_str(hkl): diff --git a/hexrd/core/utils/panel_buffer.py b/hexrd/core/utils/panel_buffer.py index 273eca27d..192eeb3d5 100644 --- a/hexrd/core/utils/panel_buffer.py +++ b/hexrd/core/utils/panel_buffer.py @@ -1,6 +1,10 @@ import numpy as np -from hexrd.instrument.detector import Detector +from hexrd.core.instrument.detector import Detector +from hexrd.hed.instrument.detector import Detector +from hexrd.hedm.instrument.detector import Detector +from hexrd.powder.instrument.detector import Detector +from hexrd.laue.instrument.detector import Detector def panel_buffer_as_2d_array(panel: Detector) -> np.ndarray: diff --git a/hexrd/core/valunits.py b/hexrd/core/valunits.py index 3dd0b9462..50184326d 100644 --- a/hexrd/core/valunits.py +++ b/hexrd/core/valunits.py @@ -37,7 +37,7 @@ import doctest import math -from hexrd.constants import keVToAngstrom +from hexrd.core.constants import keVToAngstrom __all__ = ['valWUnit', 'toFloat', 'valWithDflt'] diff --git a/hexrd/hed/instrument/detector.py b/hexrd/hed/instrument/detector.py index db4f95d1a..76aeb9e26 100644 --- a/hexrd/hed/instrument/detector.py +++ b/hexrd/hed/instrument/detector.py @@ -3,37 +3,33 @@ import os from typing import Optional -from hexrd.instrument.constants import ( - COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT -) -from hexrd.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba -from hexrd import constants as ct -from hexrd import distortion as distortion_pkg -from hexrd import matrixutil as mutil -from hexrd import xrdutil -from hexrd.rotations import mapAngle - -from hexrd.material import crystallography -from hexrd.material.crystallography import PlaneData - -from hexrd.transforms.xfcapi import ( - xy_to_gvec, - gvec_to_xy, - make_beam_rmat, - make_rmat_of_expmap, - oscill_angles_of_hkls, - angles_to_dvec, -) - -from hexrd.utils.decorators import memoize -from hexrd.gridutil import cellIndices -from hexrd.instrument import detector_coatings -from hexrd.material.utils import ( - calculate_linear_absorption_length, - calculate_incoherent_scattering) +from hexrd.core import constants as ct +from hexrd.core import distortion as distortion_pkg +from hexrd.core import matrixutil as mutil +from hexrd.hedm import xrdutil +from hexrd.core.rotations import mapAngle + +from hexrd.hedm.material import crystallography +from hexrd.laue.material import crystallography +from hexrd.powder.material import crystallography +from hexrd.core.material import crystallography +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData + +from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec + +from hexrd.core.utils.decorators import memoize +from hexrd.core.gridutil import cellIndices +from hexrd.core.instrument import detector_coatings +from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_incoherent_scattering distortion_registry = distortion_pkg.Registry() diff --git a/hexrd/hed/instrument/hedm_instrument.py b/hexrd/hed/instrument/hedm_instrument.py index 1d768b47c..fe6bd619a 100644 --- a/hexrd/hed/instrument/hedm_instrument.py +++ b/hexrd/hed/instrument/hedm_instrument.py @@ -52,42 +52,35 @@ from scipy.linalg import logm from skimage.measure import regionprops -from hexrd import constants -from hexrd.imageseries import ImageSeries -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.imageseries.omega import OmegaImageSeries -from hexrd.fitting.utils import fit_ring -from hexrd.gridutil import make_tolerance_grid -from hexrd import matrixutil as mutil -from hexrd.transforms.xfcapi import ( - angles_to_gvec, - gvec_to_xy, - make_sample_rmat, - make_rmat_of_expmap, - unit_vector, -) -from hexrd import xrdutil -from hexrd.material.crystallography import PlaneData -from hexrd import constants as ct -from hexrd.rotations import mapAngle -from hexrd import distortion as distortion_pkg -from hexrd.utils.concurrent import distribute_tasks -from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict -from hexrd.utils.yaml import NumpyToNativeDumper -from hexrd.valunits import valWUnit -from hexrd.wppf import LeBail +from hexrd.core import constants +from hexrd.core.imageseries import ImageSeries +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.imageseries.omega import OmegaImageSeries +from hexrd.core.fitting.utils import fit_ring +from hexrd.core.gridutil import make_tolerance_grid +from hexrd.core import matrixutil as mutil +from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +from hexrd.hedm import xrdutil +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData +from hexrd.core import constants as ct +from hexrd.core.rotations import mapAngle +from hexrd.core import distortion as distortion_pkg +from hexrd.core.utils.concurrent import distribute_tasks +from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict +from hexrd.core.utils.yaml import NumpyToNativeDumper +from hexrd.core.valunits import valWUnit +from hexrd.powder.wppf import LeBail from .cylindrical_detector import CylindricalDetector -from .detector import ( - beam_energy_DFLT, - Detector, - max_workers_DFLT, -) +from .detector import beam_energy_DFLT, Detector, max_workers_DFLT from .planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise -from hexrd.wppf import wppfsupport +from hexrd.powder.wppf import wppfsupport try: from fast_histogram import histogram1d diff --git a/hexrd/hed/xrdutil/phutil.py b/hexrd/hed/xrdutil/phutil.py index 512c3399e..277c0fc3c 100644 --- a/hexrd/hed/xrdutil/phutil.py +++ b/hexrd/hed/xrdutil/phutil.py @@ -12,10 +12,10 @@ import numpy as np from numba import njit -from hexrd import constants as ct -from hexrd.instrument import Detector -from hexrd.transforms import xfcapi -from hexrd.utils.concurrent import distribute_tasks +from hexrd.core import constants as ct +from hexrd.core.instrument import Detector +from hexrd.core.transforms import xfcapi +from hexrd.core.utils.concurrent import distribute_tasks class SampleLayerDistortion: diff --git a/hexrd/hed/xrdutil/utils.py b/hexrd/hed/xrdutil/utils.py index 2cbae2b6f..4c31e29ea 100644 --- a/hexrd/hed/xrdutil/utils.py +++ b/hexrd/hed/xrdutil/utils.py @@ -28,26 +28,32 @@ from typing import Optional, Union, Any, Generator -from hexrd.material.crystallography import PlaneData -from hexrd.distortion.distortionabc import DistortionABC +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData +from hexrd.core.distortion.distortionabc import DistortionABC import numba import numpy as np import numba -from hexrd import constants -from hexrd import matrixutil as mutil -from hexrd import rotations as rot -from hexrd import gridutil as gutil +from hexrd.core import constants +from hexrd.core import matrixutil as mutil +from hexrd.core import rotations as rot +from hexrd.core import gridutil as gutil -from hexrd.material.crystallography import processWavelength, PlaneData +from hexrd.hedm.material.crystallography import processWavelength, PlaneData +from hexrd.laue.material.crystallography import processWavelength, PlaneData +from hexrd.powder.material.crystallography import processWavelength, PlaneData +from hexrd.core.material.crystallography import processWavelength, PlaneData -from hexrd.transforms import xfcapi -from hexrd.valunits import valWUnit +from hexrd.core.transforms import xfcapi +from hexrd.core.valunits import valWUnit -from hexrd import distortion as distortion_pkg +from hexrd.core import distortion as distortion_pkg -from hexrd.deprecation import deprecated +from hexrd.core.deprecation import deprecated simlp = 'hexrd.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' diff --git a/hexrd/hedm/cli/find_orientations.py b/hexrd/hedm/cli/find_orientations.py index 7b3792b8d..5d7bf768f 100644 --- a/hexrd/hedm/cli/find_orientations.py +++ b/hexrd/hedm/cli/find_orientations.py @@ -6,11 +6,12 @@ import numpy as np -from hexrd import constants as const -from hexrd import config -from hexrd import instrument -from hexrd.transforms import xfcapi -from hexrd.findorientations import find_orientations, write_scored_orientations +from hexrd.core import constants as const +from hexrd.hedm import config +from hexrd.core import config +from hexrd.core import instrument +from hexrd.core.transforms import xfcapi +from hexrd.hedm.findorientations import find_orientations, write_scored_orientations descr = 'Process rotation image series to find grain orientations' diff --git a/hexrd/hedm/cli/fit_grains.py b/hexrd/hedm/cli/fit_grains.py index 615807f0e..f99039fb3 100644 --- a/hexrd/hedm/cli/fit_grains.py +++ b/hexrd/hedm/cli/fit_grains.py @@ -5,13 +5,14 @@ import numpy as np -from hexrd import config -from hexrd import constants as cnst -from hexrd import rotations -from hexrd import instrument -from hexrd.findorientations import find_orientations -from hexrd.fitgrains import fit_grains -from hexrd.transforms import xfcapi +from hexrd.hedm import config +from hexrd.core import config +from hexrd.core import constants as cnst +from hexrd.core import rotations +from hexrd.core import instrument +from hexrd.hedm.findorientations import find_orientations +from hexrd.hedm.fitgrains import fit_grains +from hexrd.core.transforms import xfcapi descr = 'Extracts G vectors, grain position and strain' diff --git a/hexrd/hedm/cli/main.py b/hexrd/hedm/cli/main.py index eb5825acf..fa09d69f5 100644 --- a/hexrd/hedm/cli/main.py +++ b/hexrd/hedm/cli/main.py @@ -8,15 +8,15 @@ # These can't be relative imports on Windows because of the hack # in main() for multiprocessing.freeze_support() -from hexrd.cli import help -from hexrd.cli import test -from hexrd.cli import documentation -from hexrd.utils import profiler - -from hexrd.cli import find_orientations -from hexrd.cli import fit_grains -from hexrd.cli import pickle23 -from hexrd.cli import preprocess +from hexrd.hedm.cli import help +from hexrd.hedm.cli import test +from hexrd.hedm.cli import documentation +from hexrd.core.utils import profiler + +from hexrd.hedm.cli import find_orientations +from hexrd.hedm.cli import fit_grains +from hexrd.hedm.cli import pickle23 +from hexrd.hedm.cli import preprocess try: diff --git a/hexrd/hedm/cli/preprocess.py b/hexrd/hedm/cli/preprocess.py index 02791ef1b..1713a2726 100644 --- a/hexrd/hedm/cli/preprocess.py +++ b/hexrd/hedm/cli/preprocess.py @@ -1,6 +1,6 @@ import dataclasses -from hexrd.preprocess.profiles import HexrdPPScript_Arguments -from hexrd.preprocess.preprocessors import preprocess +from hexrd.hedm.preprocess.profiles import HexrdPPScript_Arguments +from hexrd.hedm.preprocess.preprocessors import preprocess from dataclasses import fields import json import copy diff --git a/hexrd/hedm/config/instrument.py b/hexrd/hedm/config/instrument.py index 8f0a87daa..31dbbb310 100644 --- a/hexrd/hedm/config/instrument.py +++ b/hexrd/hedm/config/instrument.py @@ -4,7 +4,7 @@ from .config import Config from .loader import NumPyIncludeLoader -from hexrd import instrument +from hexrd.core import instrument class Instrument(Config): diff --git a/hexrd/hedm/config/root.py b/hexrd/hedm/config/root.py index 0fca50ed7..e698a3f37 100644 --- a/hexrd/hedm/config/root.py +++ b/hexrd/hedm/config/root.py @@ -3,8 +3,8 @@ import logging import multiprocessing as mp -from hexrd.constants import shared_ims_key -from hexrd import imageseries +from hexrd.core.constants import shared_ims_key +from hexrd.core import imageseries from .config import Config from .instrument import Instrument diff --git a/hexrd/hedm/findorientations.py b/hexrd/hedm/findorientations.py index f6f7cce0f..96750cc03 100644 --- a/hexrd/hedm/findorientations.py +++ b/hexrd/hedm/findorientations.py @@ -12,14 +12,14 @@ import scipy.cluster as cluster from scipy import ndimage -from hexrd import constants as const -from hexrd import matrixutil as mutil -from hexrd import indexer -from hexrd import instrument -from hexrd.imageutil import find_peaks_2d -from hexrd import rotations as rot -from hexrd.transforms import xfcapi -from hexrd.xrdutil import EtaOmeMaps +from hexrd.core import constants as const +from hexrd.core import matrixutil as mutil +from hexrd.hedm import indexer +from hexrd.core import instrument +from hexrd.core.imageutil import find_peaks_2d +from hexrd.core import rotations as rot +from hexrd.core.transforms import xfcapi +from hexrd.hedm.xrdutil import EtaOmeMaps # just require scikit-learn? have_sklearn = False diff --git a/hexrd/hedm/fitgrains.py b/hexrd/hedm/fitgrains.py index 26a3e1bc4..461aaa639 100644 --- a/hexrd/hedm/fitgrains.py +++ b/hexrd/hedm/fitgrains.py @@ -12,10 +12,10 @@ import timeit import warnings -from hexrd import instrument -from hexrd.transforms import xfcapi -from hexrd import rotations -from hexrd.fitting import fitGrain, objFuncFitGrain, gFlag_ref +from hexrd.core import instrument +from hexrd.core.transforms import xfcapi +from hexrd.core import rotations +from hexrd.core.fitting import fitGrain, objFuncFitGrain, gFlag_ref logger = logging.getLogger(__name__) diff --git a/hexrd/hedm/fitting/grains.py b/hexrd/hedm/fitting/grains.py index c65ae37d5..9549f1c05 100644 --- a/hexrd/hedm/fitting/grains.py +++ b/hexrd/hedm/fitting/grains.py @@ -4,13 +4,13 @@ from scipy import optimize -from hexrd import matrixutil as mutil +from hexrd.core import matrixutil as mutil -from hexrd.transforms import xfcapi -from hexrd import constants -from hexrd import rotations +from hexrd.core.transforms import xfcapi +from hexrd.core import constants +from hexrd.core import rotations -from hexrd.xrdutil import extract_detector_transformation +from hexrd.hedm.xrdutil import extract_detector_transformation return_value_flag = None diff --git a/hexrd/hedm/grainmap/nfutil.py b/hexrd/hedm/grainmap/nfutil.py index 4c20a3cb9..a59b8c8c7 100644 --- a/hexrd/hedm/grainmap/nfutil.py +++ b/hexrd/hedm/grainmap/nfutil.py @@ -22,13 +22,13 @@ # import of hexrd modules # import hexrd -from hexrd import constants -from hexrd import instrument -from hexrd import material -from hexrd import rotations -from hexrd.transforms import xfcapi -from hexrd import valunits -from hexrd import xrdutil +from hexrd.core import constants +from hexrd.core import instrument +from hexrd.core import material +from hexrd.core import rotations +from hexrd.core.transforms import xfcapi +from hexrd.core import valunits +from hexrd.hedm import xrdutil from skimage.morphology import dilation as ski_dilation diff --git a/hexrd/hedm/indexer.py b/hexrd/hedm/indexer.py index 8e388e1e9..7033e4754 100644 --- a/hexrd/hedm/indexer.py +++ b/hexrd/hedm/indexer.py @@ -36,9 +36,9 @@ import timeit -from hexrd import constants -from hexrd import rotations -from hexrd.transforms import xfcapi +from hexrd.core import constants +from hexrd.core import rotations +from hexrd.core.transforms import xfcapi # ============================================================================= diff --git a/hexrd/hedm/instrument/detector.py b/hexrd/hedm/instrument/detector.py index db4f95d1a..76aeb9e26 100644 --- a/hexrd/hedm/instrument/detector.py +++ b/hexrd/hedm/instrument/detector.py @@ -3,37 +3,33 @@ import os from typing import Optional -from hexrd.instrument.constants import ( - COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT -) -from hexrd.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba -from hexrd import constants as ct -from hexrd import distortion as distortion_pkg -from hexrd import matrixutil as mutil -from hexrd import xrdutil -from hexrd.rotations import mapAngle - -from hexrd.material import crystallography -from hexrd.material.crystallography import PlaneData - -from hexrd.transforms.xfcapi import ( - xy_to_gvec, - gvec_to_xy, - make_beam_rmat, - make_rmat_of_expmap, - oscill_angles_of_hkls, - angles_to_dvec, -) - -from hexrd.utils.decorators import memoize -from hexrd.gridutil import cellIndices -from hexrd.instrument import detector_coatings -from hexrd.material.utils import ( - calculate_linear_absorption_length, - calculate_incoherent_scattering) +from hexrd.core import constants as ct +from hexrd.core import distortion as distortion_pkg +from hexrd.core import matrixutil as mutil +from hexrd.hedm import xrdutil +from hexrd.core.rotations import mapAngle + +from hexrd.hedm.material import crystallography +from hexrd.laue.material import crystallography +from hexrd.powder.material import crystallography +from hexrd.core.material import crystallography +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData + +from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec + +from hexrd.core.utils.decorators import memoize +from hexrd.core.gridutil import cellIndices +from hexrd.core.instrument import detector_coatings +from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_incoherent_scattering distortion_registry = distortion_pkg.Registry() diff --git a/hexrd/hedm/instrument/hedm_instrument.py b/hexrd/hedm/instrument/hedm_instrument.py index 1d768b47c..fe6bd619a 100644 --- a/hexrd/hedm/instrument/hedm_instrument.py +++ b/hexrd/hedm/instrument/hedm_instrument.py @@ -52,42 +52,35 @@ from scipy.linalg import logm from skimage.measure import regionprops -from hexrd import constants -from hexrd.imageseries import ImageSeries -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.imageseries.omega import OmegaImageSeries -from hexrd.fitting.utils import fit_ring -from hexrd.gridutil import make_tolerance_grid -from hexrd import matrixutil as mutil -from hexrd.transforms.xfcapi import ( - angles_to_gvec, - gvec_to_xy, - make_sample_rmat, - make_rmat_of_expmap, - unit_vector, -) -from hexrd import xrdutil -from hexrd.material.crystallography import PlaneData -from hexrd import constants as ct -from hexrd.rotations import mapAngle -from hexrd import distortion as distortion_pkg -from hexrd.utils.concurrent import distribute_tasks -from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict -from hexrd.utils.yaml import NumpyToNativeDumper -from hexrd.valunits import valWUnit -from hexrd.wppf import LeBail +from hexrd.core import constants +from hexrd.core.imageseries import ImageSeries +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.imageseries.omega import OmegaImageSeries +from hexrd.core.fitting.utils import fit_ring +from hexrd.core.gridutil import make_tolerance_grid +from hexrd.core import matrixutil as mutil +from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +from hexrd.hedm import xrdutil +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData +from hexrd.core import constants as ct +from hexrd.core.rotations import mapAngle +from hexrd.core import distortion as distortion_pkg +from hexrd.core.utils.concurrent import distribute_tasks +from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict +from hexrd.core.utils.yaml import NumpyToNativeDumper +from hexrd.core.valunits import valWUnit +from hexrd.powder.wppf import LeBail from .cylindrical_detector import CylindricalDetector -from .detector import ( - beam_energy_DFLT, - Detector, - max_workers_DFLT, -) +from .detector import beam_energy_DFLT, Detector, max_workers_DFLT from .planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise -from hexrd.wppf import wppfsupport +from hexrd.powder.wppf import wppfsupport try: from fast_histogram import histogram1d diff --git a/hexrd/hedm/instrument/physics_package.py b/hexrd/hedm/instrument/physics_package.py index d5837d99a..0a50258b9 100644 --- a/hexrd/hedm/instrument/physics_package.py +++ b/hexrd/hedm/instrument/physics_package.py @@ -1,6 +1,6 @@ from abc import abstractmethod import numpy as np -from hexrd.material.utils import calculate_linear_absorption_length +from hexrd.core.material.utils import calculate_linear_absorption_length class AbstractPhysicsPackage: diff --git a/hexrd/hedm/ipfcolor/colorspace.py b/hexrd/hedm/ipfcolor/colorspace.py index 615d04b03..34fa59408 100644 --- a/hexrd/hedm/ipfcolor/colorspace.py +++ b/hexrd/hedm/ipfcolor/colorspace.py @@ -26,7 +26,7 @@ # Boston, MA 02111-1307 USA or visit . # ============================================================================= -from hexrd import constants +from hexrd.core import constants import numpy as np eps = constants.sqrt_epsf diff --git a/hexrd/hedm/ipfcolor/sphere_sector.py b/hexrd/hedm/ipfcolor/sphere_sector.py index 8c3366379..99d18e938 100644 --- a/hexrd/hedm/ipfcolor/sphere_sector.py +++ b/hexrd/hedm/ipfcolor/sphere_sector.py @@ -25,9 +25,9 @@ # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA or visit . # ============================================================================= -from hexrd import constants +from hexrd.core import constants import numpy as np -from hexrd.ipfcolor import colorspace +from hexrd.hedm.ipfcolor import colorspace eps = constants.sqrt_epsf diff --git a/hexrd/hedm/material/crystallography.py b/hexrd/hedm/material/crystallography.py index 574225e67..48eb857e8 100644 --- a/hexrd/hedm/material/crystallography.py +++ b/hexrd/hedm/material/crystallography.py @@ -34,21 +34,16 @@ import numpy as np -from hexrd.material.unitcell import unitcell -from hexrd.deprecation import deprecated -from hexrd import constants -from hexrd.matrixutil import unitVector -from hexrd.rotations import ( - rotMatOfExpMap, - mapAngle, - applySym, - ltypeOfLaueGroup, - quatOfLaueGroup, -) -from hexrd.transforms import xfcapi -from hexrd import valunits -from hexrd.valunits import toFloat -from hexrd.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf +from hexrd.hedm.material.unitcell import unitcell +from hexrd.core.material.unitcell import unitcell +from hexrd.core.deprecation import deprecated +from hexrd.core import constants +from hexrd.core.matrixutil import unitVector +from hexrd.core.rotations import rotMatOfExpMap, mapAngle, applySym, ltypeOfLaueGroup, quatOfLaueGroup +from hexrd.core.transforms import xfcapi +from hexrd.core import valunits +from hexrd.core.valunits import toFloat +from hexrd.core.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf """module vars""" diff --git a/hexrd/hedm/material/unitcell.py b/hexrd/hedm/material/unitcell.py index 26f3a2fef..c6debaa10 100644 --- a/hexrd/hedm/material/unitcell.py +++ b/hexrd/hedm/material/unitcell.py @@ -1,11 +1,11 @@ import importlib.resources import numpy as np from numba import njit -from hexrd import constants -from hexrd.material import spacegroup, symbols, symmetry -from hexrd.ipfcolor import sphere_sector, colorspace -from hexrd.valunits import valWUnit -import hexrd.resources +from hexrd.core import constants +from hexrd.core.material import spacegroup, symbols, symmetry +from hexrd.hedm.ipfcolor import sphere_sector, colorspace +from hexrd.core.valunits import valWUnit +import hexrd.core.resources import warnings import h5py from pathlib import Path @@ -788,7 +788,7 @@ def InitializeInterpTable(self): self.f_anomalous_data[i, :nd, :] = f_anomalous_data[i] def CalcXRSF(self, hkl): - from hexrd.wppf.xtal import _calcxrsf + from hexrd.powder.wppf.xtal import _calcxrsf ''' the 1E-2 is to convert to A^-2 since the fitting is done in those units diff --git a/hexrd/hedm/preprocess/argument_classes_factory.py b/hexrd/hedm/preprocess/argument_classes_factory.py index a62dd583a..b188bd9f5 100644 --- a/hexrd/hedm/preprocess/argument_classes_factory.py +++ b/hexrd/hedm/preprocess/argument_classes_factory.py @@ -1,4 +1,4 @@ -import hexrd.preprocess.profiles as profiles +import hexrd.hedm.preprocess.profiles as profiles from typing import Type diff --git a/hexrd/hedm/preprocess/preprocessors.py b/hexrd/hedm/preprocess/preprocessors.py index 4c96297b1..9b09e922e 100644 --- a/hexrd/hedm/preprocess/preprocessors.py +++ b/hexrd/hedm/preprocess/preprocessors.py @@ -1,12 +1,8 @@ -from hexrd.imageseries.baseclass import ImageSeries -from hexrd.imageseries.omega import OmegaWedges -from hexrd.preprocess.profiles import ( - Eiger_Arguments, - Dexelas_Arguments, - HexrdPPScript_Arguments, -) -from hexrd import imageseries -from hexrd.imageseries.process import ProcessedImageSeries +from hexrd.core.imageseries.baseclass import ImageSeries +from hexrd.core.imageseries.omega import OmegaWedges +from hexrd.hedm.preprocess.profiles import Eiger_Arguments, Dexelas_Arguments, HexrdPPScript_Arguments +from hexrd.core import imageseries +from hexrd.core.imageseries.process import ProcessedImageSeries import os import time from typing import Any, Optional, Union, Sequence, cast diff --git a/hexrd/hedm/preprocess/profiles.py b/hexrd/hedm/preprocess/profiles.py index d36e3a012..747abfbbc 100644 --- a/hexrd/hedm/preprocess/profiles.py +++ b/hexrd/hedm/preprocess/profiles.py @@ -2,13 +2,8 @@ import glob import os import yaml -from hexrd.preprocess.argument_classes_factory import ( - ArgumentClassesFactory, - autoregister, -) -from hexrd.preprocess.yaml_internals import ( - HexrdPPScriptArgumentsDumper, -) +from hexrd.hedm.preprocess.argument_classes_factory import ArgumentClassesFactory, autoregister +from hexrd.hedm.preprocess.yaml_internals import HexrdPPScriptArgumentsDumper from typing import Any, Union, Optional, cast diff --git a/hexrd/hedm/sampleOrientations/__init__.py b/hexrd/hedm/sampleOrientations/__init__.py index 29d998782..a4f6e5491 100644 --- a/hexrd/hedm/sampleOrientations/__init__.py +++ b/hexrd/hedm/sampleOrientations/__init__.py @@ -1 +1 @@ -from hexrd.sampleOrientations.sampleRFZ import sampleRFZ \ No newline at end of file +from hexrd.hedm.sampleOrientations.sampleRFZ import sampleRFZ diff --git a/hexrd/hedm/sampleOrientations/conversions.py b/hexrd/hedm/sampleOrientations/conversions.py index 2694fe6cc..f4a85beb0 100644 --- a/hexrd/hedm/sampleOrientations/conversions.py +++ b/hexrd/hedm/sampleOrientations/conversions.py @@ -1,6 +1,6 @@ import numpy as np from numba import njit -from hexrd import constants +from hexrd.core import constants ap_2 = constants.cuA_2 sc = constants.sc diff --git a/hexrd/hedm/sampleOrientations/rfz.py b/hexrd/hedm/sampleOrientations/rfz.py index 36556ca4e..17fbd833a 100644 --- a/hexrd/hedm/sampleOrientations/rfz.py +++ b/hexrd/hedm/sampleOrientations/rfz.py @@ -1,8 +1,8 @@ import numpy as np import numba -from hexrd.constants import FZtypeArray, FZorderArray -from hexrd import constants +from hexrd.core.constants import FZtypeArray, FZorderArray +from hexrd.core import constants @numba.njit(cache=True, nogil=True) diff --git a/hexrd/hedm/sampleOrientations/sampleRFZ.py b/hexrd/hedm/sampleOrientations/sampleRFZ.py index 27b50fd4b..2168b490d 100644 --- a/hexrd/hedm/sampleOrientations/sampleRFZ.py +++ b/hexrd/hedm/sampleOrientations/sampleRFZ.py @@ -2,9 +2,9 @@ import numba from numba import prange -from hexrd.sampleOrientations.conversions import cu2ro, ro2qu -from hexrd.sampleOrientations.rfz import insideFZ -from hexrd import constants +from hexrd.hedm.sampleOrientations.conversions import cu2ro, ro2qu +from hexrd.hedm.sampleOrientations.rfz import insideFZ +from hexrd.core import constants @numba.njit(cache=True, nogil=True, parallel=True) diff --git a/hexrd/hedm/xrdutil/utils.py b/hexrd/hedm/xrdutil/utils.py index 2cbae2b6f..4c31e29ea 100644 --- a/hexrd/hedm/xrdutil/utils.py +++ b/hexrd/hedm/xrdutil/utils.py @@ -28,26 +28,32 @@ from typing import Optional, Union, Any, Generator -from hexrd.material.crystallography import PlaneData -from hexrd.distortion.distortionabc import DistortionABC +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData +from hexrd.core.distortion.distortionabc import DistortionABC import numba import numpy as np import numba -from hexrd import constants -from hexrd import matrixutil as mutil -from hexrd import rotations as rot -from hexrd import gridutil as gutil +from hexrd.core import constants +from hexrd.core import matrixutil as mutil +from hexrd.core import rotations as rot +from hexrd.core import gridutil as gutil -from hexrd.material.crystallography import processWavelength, PlaneData +from hexrd.hedm.material.crystallography import processWavelength, PlaneData +from hexrd.laue.material.crystallography import processWavelength, PlaneData +from hexrd.powder.material.crystallography import processWavelength, PlaneData +from hexrd.core.material.crystallography import processWavelength, PlaneData -from hexrd.transforms import xfcapi -from hexrd.valunits import valWUnit +from hexrd.core.transforms import xfcapi +from hexrd.core.valunits import valWUnit -from hexrd import distortion as distortion_pkg +from hexrd.core import distortion as distortion_pkg -from hexrd.deprecation import deprecated +from hexrd.core.deprecation import deprecated simlp = 'hexrd.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' diff --git a/hexrd/laue/fitting/calibration/laue.py b/hexrd/laue/fitting/calibration/laue.py index 1603fa0bc..03224acf3 100644 --- a/hexrd/laue/fitting/calibration/laue.py +++ b/hexrd/laue/fitting/calibration/laue.py @@ -8,10 +8,10 @@ from skimage import filters from skimage.feature import blob_log -from hexrd import xrdutil -from hexrd.constants import fwhm_to_sigma -from hexrd.instrument import switch_xray_source -from hexrd.transforms import xfcapi +from hexrd.hedm import xrdutil +from hexrd.core.constants import fwhm_to_sigma +from hexrd.core.instrument import switch_xray_source +from hexrd.core.transforms import xfcapi from .abstract_grain import AbstractGrainCalibrator from .lmfit_param_handling import DEFAULT_EULER_CONVENTION diff --git a/hexrd/laue/instrument/detector.py b/hexrd/laue/instrument/detector.py index db4f95d1a..76aeb9e26 100644 --- a/hexrd/laue/instrument/detector.py +++ b/hexrd/laue/instrument/detector.py @@ -3,37 +3,33 @@ import os from typing import Optional -from hexrd.instrument.constants import ( - COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT -) -from hexrd.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba -from hexrd import constants as ct -from hexrd import distortion as distortion_pkg -from hexrd import matrixutil as mutil -from hexrd import xrdutil -from hexrd.rotations import mapAngle - -from hexrd.material import crystallography -from hexrd.material.crystallography import PlaneData - -from hexrd.transforms.xfcapi import ( - xy_to_gvec, - gvec_to_xy, - make_beam_rmat, - make_rmat_of_expmap, - oscill_angles_of_hkls, - angles_to_dvec, -) - -from hexrd.utils.decorators import memoize -from hexrd.gridutil import cellIndices -from hexrd.instrument import detector_coatings -from hexrd.material.utils import ( - calculate_linear_absorption_length, - calculate_incoherent_scattering) +from hexrd.core import constants as ct +from hexrd.core import distortion as distortion_pkg +from hexrd.core import matrixutil as mutil +from hexrd.hedm import xrdutil +from hexrd.core.rotations import mapAngle + +from hexrd.hedm.material import crystallography +from hexrd.laue.material import crystallography +from hexrd.powder.material import crystallography +from hexrd.core.material import crystallography +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData + +from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec + +from hexrd.core.utils.decorators import memoize +from hexrd.core.gridutil import cellIndices +from hexrd.core.instrument import detector_coatings +from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_incoherent_scattering distortion_registry = distortion_pkg.Registry() diff --git a/hexrd/laue/instrument/hedm_instrument.py b/hexrd/laue/instrument/hedm_instrument.py index 1d768b47c..fe6bd619a 100644 --- a/hexrd/laue/instrument/hedm_instrument.py +++ b/hexrd/laue/instrument/hedm_instrument.py @@ -52,42 +52,35 @@ from scipy.linalg import logm from skimage.measure import regionprops -from hexrd import constants -from hexrd.imageseries import ImageSeries -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.imageseries.omega import OmegaImageSeries -from hexrd.fitting.utils import fit_ring -from hexrd.gridutil import make_tolerance_grid -from hexrd import matrixutil as mutil -from hexrd.transforms.xfcapi import ( - angles_to_gvec, - gvec_to_xy, - make_sample_rmat, - make_rmat_of_expmap, - unit_vector, -) -from hexrd import xrdutil -from hexrd.material.crystallography import PlaneData -from hexrd import constants as ct -from hexrd.rotations import mapAngle -from hexrd import distortion as distortion_pkg -from hexrd.utils.concurrent import distribute_tasks -from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict -from hexrd.utils.yaml import NumpyToNativeDumper -from hexrd.valunits import valWUnit -from hexrd.wppf import LeBail +from hexrd.core import constants +from hexrd.core.imageseries import ImageSeries +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.imageseries.omega import OmegaImageSeries +from hexrd.core.fitting.utils import fit_ring +from hexrd.core.gridutil import make_tolerance_grid +from hexrd.core import matrixutil as mutil +from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +from hexrd.hedm import xrdutil +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData +from hexrd.core import constants as ct +from hexrd.core.rotations import mapAngle +from hexrd.core import distortion as distortion_pkg +from hexrd.core.utils.concurrent import distribute_tasks +from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict +from hexrd.core.utils.yaml import NumpyToNativeDumper +from hexrd.core.valunits import valWUnit +from hexrd.powder.wppf import LeBail from .cylindrical_detector import CylindricalDetector -from .detector import ( - beam_energy_DFLT, - Detector, - max_workers_DFLT, -) +from .detector import beam_energy_DFLT, Detector, max_workers_DFLT from .planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise -from hexrd.wppf import wppfsupport +from hexrd.powder.wppf import wppfsupport try: from fast_histogram import histogram1d diff --git a/hexrd/laue/material/crystallography.py b/hexrd/laue/material/crystallography.py index 574225e67..48eb857e8 100644 --- a/hexrd/laue/material/crystallography.py +++ b/hexrd/laue/material/crystallography.py @@ -34,21 +34,16 @@ import numpy as np -from hexrd.material.unitcell import unitcell -from hexrd.deprecation import deprecated -from hexrd import constants -from hexrd.matrixutil import unitVector -from hexrd.rotations import ( - rotMatOfExpMap, - mapAngle, - applySym, - ltypeOfLaueGroup, - quatOfLaueGroup, -) -from hexrd.transforms import xfcapi -from hexrd import valunits -from hexrd.valunits import toFloat -from hexrd.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf +from hexrd.hedm.material.unitcell import unitcell +from hexrd.core.material.unitcell import unitcell +from hexrd.core.deprecation import deprecated +from hexrd.core import constants +from hexrd.core.matrixutil import unitVector +from hexrd.core.rotations import rotMatOfExpMap, mapAngle, applySym, ltypeOfLaueGroup, quatOfLaueGroup +from hexrd.core.transforms import xfcapi +from hexrd.core import valunits +from hexrd.core.valunits import toFloat +from hexrd.core.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf """module vars""" diff --git a/hexrd/laue/xrdutil/utils.py b/hexrd/laue/xrdutil/utils.py index 2cbae2b6f..4c31e29ea 100644 --- a/hexrd/laue/xrdutil/utils.py +++ b/hexrd/laue/xrdutil/utils.py @@ -28,26 +28,32 @@ from typing import Optional, Union, Any, Generator -from hexrd.material.crystallography import PlaneData -from hexrd.distortion.distortionabc import DistortionABC +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData +from hexrd.core.distortion.distortionabc import DistortionABC import numba import numpy as np import numba -from hexrd import constants -from hexrd import matrixutil as mutil -from hexrd import rotations as rot -from hexrd import gridutil as gutil +from hexrd.core import constants +from hexrd.core import matrixutil as mutil +from hexrd.core import rotations as rot +from hexrd.core import gridutil as gutil -from hexrd.material.crystallography import processWavelength, PlaneData +from hexrd.hedm.material.crystallography import processWavelength, PlaneData +from hexrd.laue.material.crystallography import processWavelength, PlaneData +from hexrd.powder.material.crystallography import processWavelength, PlaneData +from hexrd.core.material.crystallography import processWavelength, PlaneData -from hexrd.transforms import xfcapi -from hexrd.valunits import valWUnit +from hexrd.core.transforms import xfcapi +from hexrd.core.valunits import valWUnit -from hexrd import distortion as distortion_pkg +from hexrd.core import distortion as distortion_pkg -from hexrd.deprecation import deprecated +from hexrd.core.deprecation import deprecated simlp = 'hexrd.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' diff --git a/hexrd/powder/fitting/calibration/instrument.py b/hexrd/powder/fitting/calibration/instrument.py index 8309452aa..7ca9ecd1b 100644 --- a/hexrd/powder/fitting/calibration/instrument.py +++ b/hexrd/powder/fitting/calibration/instrument.py @@ -4,18 +4,8 @@ import lmfit import numpy as np -from .lmfit_param_handling import ( - add_engineering_constraints, - create_instr_params, - DEFAULT_EULER_CONVENTION, - update_instrument_from_params, - validate_params_list, -) -from .relative_constraints import ( - create_relative_constraints, - RelativeConstraints, - RelativeConstraintsType, -) +from .lmfit_param_handling import add_engineering_constraints, create_instr_params, DEFAULT_EULER_CONVENTION, update_instrument_from_params, validate_params_list +from .relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType logger = logging.getLogger() logger.setLevel('INFO') diff --git a/hexrd/powder/fitting/calibration/lmfit_param_handling.py b/hexrd/powder/fitting/calibration/lmfit_param_handling.py index bd2a9db87..311fcc7da 100644 --- a/hexrd/powder/fitting/calibration/lmfit_param_handling.py +++ b/hexrd/powder/fitting/calibration/lmfit_param_handling.py @@ -3,25 +3,11 @@ import lmfit import numpy as np -from hexrd.instrument import ( - calc_angles_from_beam_vec, - calc_beam_vec, - Detector, - HEDMInstrument, -) -from hexrd.rotations import ( - angleAxisOfRotMat, - expMapOfQuat, - make_rmat_euler, - quatOfRotMat, - RotMatEuler, - rotMatOfExpMap, -) -from hexrd.material.unitcell import _lpname -from .relative_constraints import ( - RelativeConstraints, - RelativeConstraintsType, -) +from hexrd.core.instrument import calc_angles_from_beam_vec, calc_beam_vec, Detector, HEDMInstrument +from hexrd.core.rotations import angleAxisOfRotMat, expMapOfQuat, make_rmat_euler, quatOfRotMat, RotMatEuler, rotMatOfExpMap +from hexrd.hedm.material.unitcell import _lpname +from hexrd.core.material.unitcell import _lpname +from .relative_constraints import RelativeConstraints, RelativeConstraintsType # First is the axes_order, second is extrinsic diff --git a/hexrd/powder/fitting/calibration/powder.py b/hexrd/powder/fitting/calibration/powder.py index 0f09a25c0..dc25f32f3 100644 --- a/hexrd/powder/fitting/calibration/powder.py +++ b/hexrd/powder/fitting/calibration/powder.py @@ -3,15 +3,12 @@ import numpy as np -from hexrd import matrixutil as mutil -from hexrd.instrument import calc_angles_from_beam_vec, switch_xray_source -from hexrd.utils.hkl import hkl_to_str, str_to_hkl +from hexrd.core import matrixutil as mutil +from hexrd.core.instrument import calc_angles_from_beam_vec, switch_xray_source +from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl from .calibrator import Calibrator -from .lmfit_param_handling import ( - create_material_params, - update_material_from_params, -) +from .lmfit_param_handling import create_material_params, update_material_from_params nfields_powder_data = 8 diff --git a/hexrd/powder/fitting/calibration/structureless.py b/hexrd/powder/fitting/calibration/structureless.py index 4bc743594..5316a1fd3 100644 --- a/hexrd/powder/fitting/calibration/structureless.py +++ b/hexrd/powder/fitting/calibration/structureless.py @@ -4,21 +4,10 @@ import lmfit import numpy as np -from hexrd.instrument import switch_xray_source - -from .lmfit_param_handling import ( - add_engineering_constraints, - create_instr_params, - create_tth_parameters, - DEFAULT_EULER_CONVENTION, - tth_parameter_prefixes, - update_instrument_from_params, -) -from .relative_constraints import ( - create_relative_constraints, - RelativeConstraints, - RelativeConstraintsType, -) +from hexrd.core.instrument import switch_xray_source + +from .lmfit_param_handling import add_engineering_constraints, create_instr_params, create_tth_parameters, DEFAULT_EULER_CONVENTION, tth_parameter_prefixes, update_instrument_from_params +from .relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType class StructurelessCalibrator: diff --git a/hexrd/powder/instrument/detector.py b/hexrd/powder/instrument/detector.py index db4f95d1a..76aeb9e26 100644 --- a/hexrd/powder/instrument/detector.py +++ b/hexrd/powder/instrument/detector.py @@ -3,37 +3,33 @@ import os from typing import Optional -from hexrd.instrument.constants import ( - COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT -) -from hexrd.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba -from hexrd import constants as ct -from hexrd import distortion as distortion_pkg -from hexrd import matrixutil as mutil -from hexrd import xrdutil -from hexrd.rotations import mapAngle - -from hexrd.material import crystallography -from hexrd.material.crystallography import PlaneData - -from hexrd.transforms.xfcapi import ( - xy_to_gvec, - gvec_to_xy, - make_beam_rmat, - make_rmat_of_expmap, - oscill_angles_of_hkls, - angles_to_dvec, -) - -from hexrd.utils.decorators import memoize -from hexrd.gridutil import cellIndices -from hexrd.instrument import detector_coatings -from hexrd.material.utils import ( - calculate_linear_absorption_length, - calculate_incoherent_scattering) +from hexrd.core import constants as ct +from hexrd.core import distortion as distortion_pkg +from hexrd.core import matrixutil as mutil +from hexrd.hedm import xrdutil +from hexrd.core.rotations import mapAngle + +from hexrd.hedm.material import crystallography +from hexrd.laue.material import crystallography +from hexrd.powder.material import crystallography +from hexrd.core.material import crystallography +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData + +from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec + +from hexrd.core.utils.decorators import memoize +from hexrd.core.gridutil import cellIndices +from hexrd.core.instrument import detector_coatings +from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_incoherent_scattering distortion_registry = distortion_pkg.Registry() diff --git a/hexrd/powder/instrument/hedm_instrument.py b/hexrd/powder/instrument/hedm_instrument.py index 1d768b47c..fe6bd619a 100644 --- a/hexrd/powder/instrument/hedm_instrument.py +++ b/hexrd/powder/instrument/hedm_instrument.py @@ -52,42 +52,35 @@ from scipy.linalg import logm from skimage.measure import regionprops -from hexrd import constants -from hexrd.imageseries import ImageSeries -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.imageseries.omega import OmegaImageSeries -from hexrd.fitting.utils import fit_ring -from hexrd.gridutil import make_tolerance_grid -from hexrd import matrixutil as mutil -from hexrd.transforms.xfcapi import ( - angles_to_gvec, - gvec_to_xy, - make_sample_rmat, - make_rmat_of_expmap, - unit_vector, -) -from hexrd import xrdutil -from hexrd.material.crystallography import PlaneData -from hexrd import constants as ct -from hexrd.rotations import mapAngle -from hexrd import distortion as distortion_pkg -from hexrd.utils.concurrent import distribute_tasks -from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict -from hexrd.utils.yaml import NumpyToNativeDumper -from hexrd.valunits import valWUnit -from hexrd.wppf import LeBail +from hexrd.core import constants +from hexrd.core.imageseries import ImageSeries +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.imageseries.omega import OmegaImageSeries +from hexrd.core.fitting.utils import fit_ring +from hexrd.core.gridutil import make_tolerance_grid +from hexrd.core import matrixutil as mutil +from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +from hexrd.hedm import xrdutil +from hexrd.hedm.material.crystallography import PlaneData +from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData +from hexrd.core import constants as ct +from hexrd.core.rotations import mapAngle +from hexrd.core import distortion as distortion_pkg +from hexrd.core.utils.concurrent import distribute_tasks +from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict +from hexrd.core.utils.yaml import NumpyToNativeDumper +from hexrd.core.valunits import valWUnit +from hexrd.powder.wppf import LeBail from .cylindrical_detector import CylindricalDetector -from .detector import ( - beam_energy_DFLT, - Detector, - max_workers_DFLT, -) +from .detector import beam_energy_DFLT, Detector, max_workers_DFLT from .planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise -from hexrd.wppf import wppfsupport +from hexrd.powder.wppf import wppfsupport try: from fast_histogram import histogram1d diff --git a/hexrd/powder/material/crystallography.py b/hexrd/powder/material/crystallography.py index 574225e67..48eb857e8 100644 --- a/hexrd/powder/material/crystallography.py +++ b/hexrd/powder/material/crystallography.py @@ -34,21 +34,16 @@ import numpy as np -from hexrd.material.unitcell import unitcell -from hexrd.deprecation import deprecated -from hexrd import constants -from hexrd.matrixutil import unitVector -from hexrd.rotations import ( - rotMatOfExpMap, - mapAngle, - applySym, - ltypeOfLaueGroup, - quatOfLaueGroup, -) -from hexrd.transforms import xfcapi -from hexrd import valunits -from hexrd.valunits import toFloat -from hexrd.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf +from hexrd.hedm.material.unitcell import unitcell +from hexrd.core.material.unitcell import unitcell +from hexrd.core.deprecation import deprecated +from hexrd.core import constants +from hexrd.core.matrixutil import unitVector +from hexrd.core.rotations import rotMatOfExpMap, mapAngle, applySym, ltypeOfLaueGroup, quatOfLaueGroup +from hexrd.core.transforms import xfcapi +from hexrd.core import valunits +from hexrd.core.valunits import toFloat +from hexrd.core.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf """module vars""" diff --git a/hexrd/powder/wppf/LeBailCalibration.py b/hexrd/powder/wppf/LeBailCalibration.py index 878a9c17e..794f3005d 100644 --- a/hexrd/powder/wppf/LeBailCalibration.py +++ b/hexrd/powder/wppf/LeBailCalibration.py @@ -3,27 +3,21 @@ from numpy.polynomial.chebyshev import Chebyshev import lmfit import warnings -from hexrd.wppf.peakfunctions import \ -calc_rwp, computespectrum_pvfcj, \ -computespectrum_pvtch,\ -computespectrum_pvpink,\ -calc_Iobs_pvfcj,\ -calc_Iobs_pvtch,\ -calc_Iobs_pvpink -from hexrd.wppf.spectrum import Spectrum -from hexrd.wppf import wppfsupport, LeBail -from hexrd.wppf.parameters import Parameters +from hexrd.powder.wppf.peakfunctions import calc_rwp, computespectrum_pvfcj, computespectrum_pvtch, computespectrum_pvpink, calc_Iobs_pvfcj, calc_Iobs_pvtch, calc_Iobs_pvpink +from hexrd.powder.wppf.spectrum import Spectrum +from hexrd.powder.wppf import wppfsupport, LeBail +from hexrd.powder.wppf.parameters import Parameters from lmfit import Parameters as Parameters_lmfit -from hexrd.wppf.phase import Phases_LeBail, Material_LeBail -from hexrd.imageutil import snip1d, snip1d_quad -from hexrd.material import Material -from hexrd.valunits import valWUnit -from hexrd.constants import keVToAngstrom - -from hexrd import instrument -from hexrd import imageseries -from hexrd.imageseries import omega -from hexrd.projections.polar import PolarView +from hexrd.powder.wppf.phase import Phases_LeBail, Material_LeBail +from hexrd.core.imageutil import snip1d, snip1d_quad +from hexrd.core.material import Material +from hexrd.core.valunits import valWUnit +from hexrd.core.constants import keVToAngstrom + +from hexrd.core import instrument +from hexrd.core import imageseries +from hexrd.core.imageseries import omega +from hexrd.core.projections.polar import PolarView import time class LeBailCalibrator: diff --git a/hexrd/powder/wppf/WPPF.py b/hexrd/powder/wppf/WPPF.py index 5dd8bbddd..1ec8797c1 100644 --- a/hexrd/powder/wppf/WPPF.py +++ b/hexrd/powder/wppf/WPPF.py @@ -17,29 +17,16 @@ # hexrd imports # ------------- -from hexrd import constants -from hexrd.imageutil import snip1d_quad -from hexrd.material import Material -from hexrd.utils.multiprocess_generic import GenericMultiprocessing -from hexrd.valunits import valWUnit -from hexrd.wppf.peakfunctions import ( - calc_rwp, - computespectrum_pvfcj, - computespectrum_pvtch, - computespectrum_pvpink, - calc_Iobs_pvfcj, - calc_Iobs_pvtch, - calc_Iobs_pvpink, -) -from hexrd.wppf import wppfsupport -from hexrd.wppf.spectrum import Spectrum -from hexrd.wppf.parameters import Parameters -from hexrd.wppf.phase import ( - Phases_LeBail, - Phases_Rietveld, - Material_LeBail, - Material_Rietveld, -) +from hexrd.core import constants +from hexrd.core.imageutil import snip1d_quad +from hexrd.core.material import Material +from hexrd.core.utils.multiprocess_generic import GenericMultiprocessing +from hexrd.core.valunits import valWUnit +from hexrd.powder.wppf.peakfunctions import calc_rwp, computespectrum_pvfcj, computespectrum_pvtch, computespectrum_pvpink, calc_Iobs_pvfcj, calc_Iobs_pvtch, calc_Iobs_pvpink +from hexrd.powder.wppf import wppfsupport +from hexrd.powder.wppf.spectrum import Spectrum +from hexrd.powder.wppf.parameters import Parameters +from hexrd.powder.wppf.phase import Phases_LeBail, Phases_Rietveld, Material_LeBail, Material_Rietveld class LeBail: diff --git a/hexrd/powder/wppf/__init__.py b/hexrd/powder/wppf/__init__.py index 40887f145..fb501c91e 100644 --- a/hexrd/powder/wppf/__init__.py +++ b/hexrd/powder/wppf/__init__.py @@ -1,2 +1,2 @@ -from hexrd.wppf.WPPF import LeBail -from hexrd.wppf.WPPF import Rietveld \ No newline at end of file +from hexrd.powder.wppf.WPPF import LeBail +from hexrd.powder.wppf.WPPF import Rietveld diff --git a/hexrd/powder/wppf/derivatives.py b/hexrd/powder/wppf/derivatives.py index d1e4cce33..9ea96ef57 100644 --- a/hexrd/powder/wppf/derivatives.py +++ b/hexrd/powder/wppf/derivatives.py @@ -1,6 +1,6 @@ import numpy as np from numba import njit -from hexrd.wppf.peakfunctions import _unit_gaussian, _unit_lorentzian +from hexrd.powder.wppf.peakfunctions import _unit_gaussian, _unit_lorentzian """ naming convention for the derivative is as follows: diff --git a/hexrd/powder/wppf/peakfunctions.py b/hexrd/powder/wppf/peakfunctions.py index 0eecdce7d..283cace0b 100644 --- a/hexrd/powder/wppf/peakfunctions.py +++ b/hexrd/powder/wppf/peakfunctions.py @@ -27,9 +27,9 @@ import numpy as np import copy -from hexrd import constants +from hexrd.core import constants from numba import vectorize, float64, njit, prange -from hexrd.fitting.peakfunctions import erfc, exp1exp +from hexrd.core.fitting.peakfunctions import erfc, exp1exp # from scipy.special import erfc, exp1 diff --git a/hexrd/powder/wppf/phase.py b/hexrd/powder/wppf/phase.py index 48f9dc08c..5ea0d819d 100644 --- a/hexrd/powder/wppf/phase.py +++ b/hexrd/powder/wppf/phase.py @@ -1,16 +1,16 @@ import numpy as np -from hexrd.valunits import valWUnit -from hexrd.material.spacegroup import Allowed_HKLs, SpaceGroup -from hexrd import constants -from hexrd.material import symmetry, symbols -from hexrd.material import Material -from hexrd.material.unitcell import _rqpDict -from hexrd.wppf import wppfsupport -from hexrd.wppf.xtal import _calc_dspacing, _get_tth, _calcxrsf,\ -_calc_extinction_factor, _calc_absorption_factor +from hexrd.core.valunits import valWUnit +from hexrd.core.material.spacegroup import Allowed_HKLs, SpaceGroup +from hexrd.core import constants +from hexrd.core.material import symmetry, symbols +from hexrd.core.material import Material +from hexrd.hedm.material.unitcell import _rqpDict +from hexrd.core.material.unitcell import _rqpDict +from hexrd.powder.wppf import wppfsupport +from hexrd.powder.wppf.xtal import _calc_dspacing, _get_tth, _calcxrsf, _calc_extinction_factor, _calc_absorption_factor import h5py import importlib.resources -import hexrd.resources +import hexrd.core.resources class Material_LeBail: """ diff --git a/hexrd/powder/wppf/texture.py b/hexrd/powder/wppf/texture.py index 33c5e9f07..e9d3ae6ff 100644 --- a/hexrd/powder/wppf/texture.py +++ b/hexrd/powder/wppf/texture.py @@ -15,7 +15,7 @@ from scipy.spatial import Delaunay # HEXRD imports -import hexrd.resources +import hexrd.core.resources # FIXME: unused imports @saransh13? # from hexrd.transforms.xfcapi import angles_to_gvec # from hexrd.wppf import phase diff --git a/hexrd/powder/wppf/wppfsupport.py b/hexrd/powder/wppf/wppfsupport.py index 81ffffbd7..e21693045 100644 --- a/hexrd/powder/wppf/wppfsupport.py +++ b/hexrd/powder/wppf/wppfsupport.py @@ -32,15 +32,16 @@ classes are put here to minimize code duplication. Some examples include initialize background, generate_default_parameter list etc. """ -from hexrd.material.symbols import pstr_spacegroup -from hexrd.wppf.parameters import Parameters +from hexrd.core.material.symbols import pstr_spacegroup +from hexrd.powder.wppf.parameters import Parameters from lmfit import Parameters as Parameters_lmfit -from hexrd.wppf.phase import Phases_LeBail, Phases_Rietveld -from hexrd.material import Material -from hexrd.material.unitcell import _rqpDict +from hexrd.powder.wppf.phase import Phases_LeBail, Phases_Rietveld +from hexrd.core.material import Material +from hexrd.hedm.material.unitcell import _rqpDict +from hexrd.core.material.unitcell import _rqpDict import hexrd import numpy as np -from hexrd import constants +from hexrd.core import constants import warnings def _generate_default_parameters_pseudovoight(params): diff --git a/hexrd/powder/wppf/xtal.py b/hexrd/powder/wppf/xtal.py index a59a7421d..65b35b642 100644 --- a/hexrd/powder/wppf/xtal.py +++ b/hexrd/powder/wppf/xtal.py @@ -1,7 +1,7 @@ import numpy as np from numba import njit, prange -from hexrd import constants +from hexrd.core import constants @njit(cache=True, nogil=True) From eb2164cc4806c9eb24ba28e62f7b19aecec0f8a9 Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Mon, 20 Jan 2025 11:41:39 -0500 Subject: [PATCH 03/19] fix existing broken import "xrdutils.utils" --- hexrd/core/projections/spherical.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hexrd/core/projections/spherical.py b/hexrd/core/projections/spherical.py index 1cb7ece9d..c2636f499 100644 --- a/hexrd/core/projections/spherical.py +++ b/hexrd/core/projections/spherical.py @@ -2,7 +2,7 @@ from skimage.transform import PiecewiseAffineTransform, warp from hexrd.core import constants -from hexrd.xrdutils.util import zproject_sph_angles +from hexrd.hedm.xrdutil.utils import zproject_sph_angles class SphericalView: From fa992e296111896947dcde9a68f9f658f66da15b Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Mon, 20 Jan 2025 12:23:02 -0500 Subject: [PATCH 04/19] Patch imports Fix duplicate imports update cli build entry point fix this relative import that didn't seem to get patched. All other similar "form . import X" statements are fine. patch extension installation Fix some ast.Name references to hexrd modules Updated documentation that references old modules --- conda.recipe/meta.yaml | 2 +- docs/source/conf.py | 2 +- hexrd/__init__.py | 14 +- hexrd/core/config/root.py | 5 +- hexrd/core/fitting/__init__.py | 3 +- hexrd/core/fitting/calibration/__init__.py | 12 +- .../fitting/calibration/calibrator.py | 0 .../calibration/lmfit_param_handling.py | 3 +- hexrd/core/instrument/cylindrical_detector.py | 1 + hexrd/core/instrument/detector.py | 8 +- hexrd/core/instrument/detector_coatings.py | 2 +- hexrd/core/instrument/hedm_instrument.py | 12 +- hexrd/core/instrument/physics_package.py | 6 +- hexrd/core/material/crystallography.py | 1 - hexrd/core/material/material.py | 4 - hexrd/core/material/mksupport.py | 1 - hexrd/core/material/symmetry.py | 2 +- hexrd/core/material/unitcell.py | 4 +- hexrd/core/material/utils.py | 4 +- hexrd/core/projections/polar.py | 153 ++++--- hexrd/core/projections/spherical.py | 1 + hexrd/core/utils/decorators.py | 4 +- hexrd/core/utils/hkl.py | 3 - hexrd/hed/instrument/detector.py | 7 - hexrd/hed/instrument/hedm_instrument.py | 20 +- hexrd/hed/xrdutil/phutil.py | 8 +- hexrd/hed/xrdutil/utils.py | 9 +- hexrd/hedm/cli/find_orientations.py | 1 - hexrd/hedm/cli/fit_grains.py | 2 +- hexrd/hedm/cli/main.py | 1 + hexrd/hedm/config/findorientations.py | 2 +- hexrd/hedm/config/fitgrains.py | 2 +- hexrd/hedm/config/instrument.py | 2 +- hexrd/hedm/config/root.py | 4 +- hexrd/hedm/findorientations.py | 2 +- .../fitting/calibration/grain.py | 6 +- hexrd/hedm/fitting/calibration/multigrain.py | 393 ++++++++++++++++++ hexrd/hedm/grainmap/nfutil.py | 2 +- hexrd/hedm/indexer.py | 2 +- hexrd/hedm/instrument/detector.py | 7 - hexrd/hedm/instrument/hedm_instrument.py | 21 +- hexrd/hedm/instrument/physics_package.py | 6 +- hexrd/hedm/material/unitcell.py | 2 +- hexrd/hedm/xrdutil/utils.py | 8 +- hexrd/laue/fitting/calibration/laue.py | 9 +- hexrd/laue/instrument/detector.py | 7 - hexrd/laue/instrument/hedm_instrument.py | 22 +- hexrd/laue/material/crystallography.py | 1 - hexrd/laue/xrdutil/utils.py | 8 +- .../powder/fitting/calibration/instrument.py | 4 +- hexrd/powder/fitting/calibration/powder.py | 4 +- .../fitting/calibration/structureless.py | 4 +- hexrd/powder/instrument/detector.py | 7 - hexrd/powder/instrument/hedm_instrument.py | 20 +- hexrd/powder/material/crystallography.py | 1 - hexrd/powder/wppf/phase.py | 3 +- hexrd/powder/wppf/texture.py | 8 +- hexrd/powder/wppf/wppfsupport.py | 5 +- setup.py | 30 +- tests/calibration/test_2xrs_calibration.py | 9 +- tests/calibration/test_calibration.py | 12 +- .../test_instrument_relative_constraints.py | 21 +- tests/common.py | 2 +- tests/config/common.py | 2 +- tests/config/test_instrument.py | 13 +- tests/config/test_material.py | 4 +- tests/config/test_root.py | 2 +- tests/find_orientations_testing.py | 6 +- tests/fit_grains_check.py | 8 +- tests/imageseries/common.py | 2 +- tests/imageseries/test_formats.py | 2 +- tests/imageseries/test_omega.py | 4 +- tests/imageseries/test_pickleable.py | 6 +- tests/imageseries/test_process.py | 4 +- tests/imageseries/test_stats.py | 4 +- tests/matrix_util/test_norms.py | 2 +- tests/matrix_util/test_strain_stress_reps.py | 2 +- .../test_vector_and_matrix_math.py | 2 +- tests/planedata/test_exclusion.py | 3 +- tests/planedata/test_init.py | 3 +- tests/planedata/test_misc.py | 7 +- tests/planedata/test_with_data.py | 6 +- tests/rotations/test_eulers.py | 2 +- tests/rotations/test_quat_math.py | 2 +- tests/rotations/test_utilities.py | 2 +- tests/test_absorption_correction.py | 4 +- tests/test_concurrent.py | 2 +- tests/test_find_orientations.py | 8 +- tests/test_fit-grains.py | 4 +- tests/test_graindata.py | 2 +- tests/test_inverse_distortion.py | 2 +- tests/test_material.py | 2 +- tests/test_matrix_utils.py | 2 +- tests/test_memoize.py | 2 +- tests/test_polar_view.py | 8 +- tests/test_rotations.py | 4 +- tests/test_transforms.py | 2 +- tests/test_utils_json.py | 2 +- tests/test_utils_yaml.py | 2 +- tests/transforms/common.py | 4 +- .../test_angles_to_dvec_from_file.py | 2 +- .../test_angles_to_gvec_from_file.py | 2 +- tests/transforms/test_gvec_to_xy.py | 2 +- tests/transforms/test_gvec_to_xy_from_file.py | 2 +- .../test_make_beam_rmat_from_file.py | 2 +- .../test_make_detector_rmat_from_file.py | 2 +- .../test_make_rmat_of_expmap_from_file.py | 2 +- .../test_make_sample_rmat_from_file.py | 2 +- .../test_quat_distance_from_file.py | 4 +- .../transforms/test_rotate_vecs_about_axis.py | 2 +- tests/transforms/test_unit_vector.py | 2 +- .../test_validate_angle_ranges_from_file.py | 2 +- tests/transforms/test_xy_to_gvec.py | 2 +- tests/transforms/test_xy_to_gvec_from_file.py | 2 +- tests/unitcell/test_vec_math.py | 2 +- 115 files changed, 729 insertions(+), 372 deletions(-) rename hexrd/{powder => core}/fitting/calibration/calibrator.py (100%) rename hexrd/{powder => core}/fitting/calibration/lmfit_param_handling.py (99%) rename hexrd/{core => hedm}/fitting/calibration/grain.py (96%) create mode 100644 hexrd/hedm/fitting/calibration/multigrain.py diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml index b8c64eedc..57f0945fb 100644 --- a/conda.recipe/meta.yaml +++ b/conda.recipe/meta.yaml @@ -9,7 +9,7 @@ build: number: {{ environ.get('GIT_DESCRIBE_NUMBER', 0) }} detect_binary_files_with_prefix: true entry_points: - - hexrd = hexrd.cli.main:main + - hexrd = hexrd.hedm.cli.main:main requirements: build: diff --git a/docs/source/conf.py b/docs/source/conf.py index 4d7928762..8d41c2073 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -13,7 +13,7 @@ import os import sys -from hexrd.constants import __version__ as version +from hexrd.core.constants import __version__ as version sys.path.insert(0, os.path.abspath('../..')) diff --git a/hexrd/__init__.py b/hexrd/__init__.py index 77ca9b19a..a301cb321 100644 --- a/hexrd/__init__.py +++ b/hexrd/__init__.py @@ -1,13 +1,13 @@ import importlib import sys -from .material import crystallography -from .material import jcpds -from .material import mksupport -from .material import spacegroup -from .material import symbols -from .material import symmetry -from .material import unitcell +from .core.material import crystallography +from .core.material import jcpds +from .core.material import mksupport +from .core.material import spacegroup +from .core.material import symbols +from .core.material import symmetry +from .core.material import unitcell # These are aliases for import paths, so we don't break old HEXRD scripts. # We will verify the alias files *do not* exist, to avoid confusion. diff --git a/hexrd/core/config/root.py b/hexrd/core/config/root.py index e698a3f37..a2fed0f6b 100644 --- a/hexrd/core/config/root.py +++ b/hexrd/core/config/root.py @@ -8,8 +8,9 @@ from .config import Config from .instrument import Instrument -from .findorientations import FindOrientationsConfig -from .fitgrains import FitGrainsConfig +# TODO: Resolve extra-core-dependency +from ...hedm.config.findorientations import FindOrientationsConfig +from ...hedm.config.fitgrains import FitGrainsConfig from .material import MaterialConfig logger = logging.getLogger('hexrd.config') diff --git a/hexrd/core/fitting/__init__.py b/hexrd/core/fitting/__init__.py index 139c20cbd..56e658c12 100644 --- a/hexrd/core/fitting/__init__.py +++ b/hexrd/core/fitting/__init__.py @@ -27,7 +27,8 @@ """ Functions for peak fitting """ -from . import grains +# TODO: Resolve extra-workflow dependency +from ...hedm.fitting import grains fitGrain = grains.fitGrain objFuncFitGrain = grains.objFuncFitGrain diff --git a/hexrd/core/fitting/calibration/__init__.py b/hexrd/core/fitting/calibration/__init__.py index f9a099541..47f419cc3 100644 --- a/hexrd/core/fitting/calibration/__init__.py +++ b/hexrd/core/fitting/calibration/__init__.py @@ -1,9 +1,9 @@ -from .grain import GrainCalibrator -from .instrument import InstrumentCalibrator -from .laue import LaueCalibrator -from .lmfit_param_handling import fix_detector_y -from .powder import PowderCalibrator -from .structureless import StructurelessCalibrator +# TODO: Resolve extra-core dependencies +from ....powder.fitting.calibration.instrument import InstrumentCalibrator +from ....laue.fitting.calibration.laue import LaueCalibrator +from ....hedm.fitting.calibration.multigrain import calibrate_instrument_from_sx, generate_parameter_names +from ....powder.fitting.calibration.powder import PowderCalibrator +from ....powder.fitting.calibration.structureless import StructurelessCalibrator # For backward-compatibility, since it used to be named this: StructureLessCalibrator = StructurelessCalibrator diff --git a/hexrd/powder/fitting/calibration/calibrator.py b/hexrd/core/fitting/calibration/calibrator.py similarity index 100% rename from hexrd/powder/fitting/calibration/calibrator.py rename to hexrd/core/fitting/calibration/calibrator.py diff --git a/hexrd/powder/fitting/calibration/lmfit_param_handling.py b/hexrd/core/fitting/calibration/lmfit_param_handling.py similarity index 99% rename from hexrd/powder/fitting/calibration/lmfit_param_handling.py rename to hexrd/core/fitting/calibration/lmfit_param_handling.py index 311fcc7da..98eeb71fc 100644 --- a/hexrd/powder/fitting/calibration/lmfit_param_handling.py +++ b/hexrd/core/fitting/calibration/lmfit_param_handling.py @@ -3,9 +3,8 @@ import lmfit import numpy as np -from hexrd.core.instrument import calc_angles_from_beam_vec, calc_beam_vec, Detector, HEDMInstrument +from hexrd.core.instrument import calc_angles_from_beam_vec, calc_beam_vec, HEDMInstrument from hexrd.core.rotations import angleAxisOfRotMat, expMapOfQuat, make_rmat_euler, quatOfRotMat, RotMatEuler, rotMatOfExpMap -from hexrd.hedm.material.unitcell import _lpname from hexrd.core.material.unitcell import _lpname from .relative_constraints import RelativeConstraints, RelativeConstraintsType diff --git a/hexrd/core/instrument/cylindrical_detector.py b/hexrd/core/instrument/cylindrical_detector.py index 93d7499e2..1edab44f4 100644 --- a/hexrd/core/instrument/cylindrical_detector.py +++ b/hexrd/core/instrument/cylindrical_detector.py @@ -3,6 +3,7 @@ import numpy as np from hexrd.core import constants as ct +# TODO: Resolve extra-core dependency from hexrd.hedm import xrdutil from hexrd.core.utils.decorators import memoize diff --git a/hexrd/core/instrument/detector.py b/hexrd/core/instrument/detector.py index 76aeb9e26..710ac6686 100644 --- a/hexrd/core/instrument/detector.py +++ b/hexrd/core/instrument/detector.py @@ -4,7 +4,6 @@ from typing import Optional from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT -from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba @@ -12,16 +11,11 @@ from hexrd.core import constants as ct from hexrd.core import distortion as distortion_pkg from hexrd.core import matrixutil as mutil +# TODO: Resolve extra-core-dependency from hexrd.hedm import xrdutil from hexrd.core.rotations import mapAngle -from hexrd.hedm.material import crystallography -from hexrd.laue.material import crystallography -from hexrd.powder.material import crystallography from hexrd.core.material import crystallography -from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData from hexrd.core.material.crystallography import PlaneData from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec diff --git a/hexrd/core/instrument/detector_coatings.py b/hexrd/core/instrument/detector_coatings.py index 44b331ae3..11a563f8b 100644 --- a/hexrd/core/instrument/detector_coatings.py +++ b/hexrd/core/instrument/detector_coatings.py @@ -9,7 +9,7 @@ class AbstractLayer: Parameters ---------- - material : str or hexrd.material.Material + material : str or hexrd.core.material.Material either the formula or a hexrd material instance density : float density of element in g/cc diff --git a/hexrd/core/instrument/hedm_instrument.py b/hexrd/core/instrument/hedm_instrument.py index fe6bd619a..144931acc 100644 --- a/hexrd/core/instrument/hedm_instrument.py +++ b/hexrd/core/instrument/hedm_instrument.py @@ -60,10 +60,8 @@ from hexrd.core.gridutil import make_tolerance_grid from hexrd.core import matrixutil as mutil from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +# TODO: Resolve extra-core-dependency from hexrd.hedm import xrdutil -from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData from hexrd.core.material.crystallography import PlaneData from hexrd.core import constants as ct from hexrd.core.rotations import mapAngle @@ -263,7 +261,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): Returns ------- - ims : hexrd.imageseries + ims : hexrd.core.imageseries The desired imageseries object. """ @@ -398,7 +396,7 @@ def max_tth(instr): Parameters ---------- - instr : hexrd.instrument.HEDMInstrument instance + instr : hexrd.hedm.instrument.HEDMInstrument instance the instrument class to evalutate. Returns @@ -1136,9 +1134,9 @@ def extract_line_positions(self, plane_data, imgser_dict, to have any effect. The default is False. tth_distortion : special class, optional for special case of pinhole camera distortions. See - hexrd.xrdutil.phutil.SampleLayerDistortion (only type supported) + hexrd.hedm.xrdutil.phutil.SampleLayerDistortion (only type supported) fitting_kwargs : dict, optional - kwargs passed to hexrd.fitting.utils.fit_ring if do_fitting is True + kwargs passed to hexrd.core.fitting.utils.fit_ring if do_fitting is True Raises ------ diff --git a/hexrd/core/instrument/physics_package.py b/hexrd/core/instrument/physics_package.py index 0a50258b9..7b77f5e10 100644 --- a/hexrd/core/instrument/physics_package.py +++ b/hexrd/core/instrument/physics_package.py @@ -11,7 +11,7 @@ class AbstractPhysicsPackage: Parameters ---------- - sample_material : str or hexrd.material.Material + sample_material : str or hexrd.core.material.Material either the formula or a hexrd material instance sample_density : float density of sample material in g/cc @@ -19,7 +19,7 @@ class AbstractPhysicsPackage: sample thickness in microns sample_geometry : FIXME FIXME - pinhole_material : str or hexrd.material.Material, optional + pinhole_material : str or hexrd.core.material.Material, optional either the formula or a hexrd material instance pinhole_density : float density of pinhole material in g/cc @@ -27,7 +27,7 @@ class AbstractPhysicsPackage: pinhole thickness in microns pinhole_diameter : float pinhole diameter in microns - window_material : str or hexrd.material.Material + window_material : str or hexrd.core.material.Material either the formula or a hexrd material instance window_density : float density of window material in g/cc diff --git a/hexrd/core/material/crystallography.py b/hexrd/core/material/crystallography.py index 48eb857e8..482c625d9 100644 --- a/hexrd/core/material/crystallography.py +++ b/hexrd/core/material/crystallography.py @@ -34,7 +34,6 @@ import numpy as np -from hexrd.hedm.material.unitcell import unitcell from hexrd.core.material.unitcell import unitcell from hexrd.core.deprecation import deprecated from hexrd.core import constants diff --git a/hexrd/core/material/material.py b/hexrd/core/material/material.py index fd21f278a..4500fdadb 100644 --- a/hexrd/core/material/material.py +++ b/hexrd/core/material/material.py @@ -34,12 +34,8 @@ from configparser import SafeConfigParser as Parser import numpy as np -from hexrd.hedm.material.crystallography import PlaneData as PData -from hexrd.laue.material.crystallography import PlaneData as PData -from hexrd.powder.material.crystallography import PlaneData as PData from hexrd.core.material.crystallography import PlaneData as PData from hexrd.core.material import symmetry, unitcell -from hexrd.hedm.material import unitcell from hexrd.core.material.symbols import two_origin_choice from hexrd.core.valunits import valWUnit from hexrd.core.constants import ptable, ptableinverse, chargestate diff --git a/hexrd/core/material/mksupport.py b/hexrd/core/material/mksupport.py index d205ce3e7..17354b39f 100644 --- a/hexrd/core/material/mksupport.py +++ b/hexrd/core/material/mksupport.py @@ -4,7 +4,6 @@ import numpy as np import datetime import getpass -from hexrd.hedm.material.unitcell import _StiffnessDict, _pgDict from hexrd.core.material.unitcell import _StiffnessDict, _pgDict diff --git a/hexrd/core/material/symmetry.py b/hexrd/core/material/symmetry.py index a1a7fd0da..5cb5dfc0f 100644 --- a/hexrd/core/material/symmetry.py +++ b/hexrd/core/material/symmetry.py @@ -33,7 +33,7 @@ from numba import njit from numpy import array, sqrt, pi, vstack, c_, dot, argmax -# from hexrd.rotations import quatOfAngleAxis, quatProductMatrix, fixQuat +# from hexrd.core.rotations import quatOfAngleAxis, quatProductMatrix, fixQuat from hexrd.core import rotations as rot from hexrd.core import constants from hexrd.core.utils.decorators import memoize diff --git a/hexrd/core/material/unitcell.py b/hexrd/core/material/unitcell.py index c6debaa10..5904732e0 100644 --- a/hexrd/core/material/unitcell.py +++ b/hexrd/core/material/unitcell.py @@ -3,6 +3,7 @@ from numba import njit from hexrd.core import constants from hexrd.core.material import spacegroup, symbols, symmetry +# TODO: Resolve extra-core-dependency from hexrd.hedm.ipfcolor import sphere_sector, colorspace from hexrd.core.valunits import valWUnit import hexrd.core.resources @@ -752,7 +753,7 @@ def InitializeInterpTable(self): f_anomalous_data = [] self.pe_cs = {} data = ( - importlib.resources.files(hexrd.resources) + importlib.resources.files(hexrd.core.resources) .joinpath('Anomalous.h5') .open('rb') ) @@ -788,6 +789,7 @@ def InitializeInterpTable(self): self.f_anomalous_data[i, :nd, :] = f_anomalous_data[i] def CalcXRSF(self, hkl): + # TODO: Resolve extra-core dependency from hexrd.powder.wppf.xtal import _calcxrsf ''' the 1E-2 is to convert to A^-2 diff --git a/hexrd/core/material/utils.py b/hexrd/core/material/utils.py index 9d1269ac7..afbdd8f63 100644 --- a/hexrd/core/material/utils.py +++ b/hexrd/core/material/utils.py @@ -83,7 +83,7 @@ def calculate_linear_absorption_length(density, the attenuation length in microns """ - data = importlib.resources.open_binary(hexrd.resources, 'mu_en.h5') + data = importlib.resources.open_binary(hexrd.core.resources, 'mu_en.h5') fid = h5py.File(data, 'r') formula_dict = interpret_formula(formula) @@ -145,7 +145,7 @@ def calculate_energy_absorption_length(density, the attenuation length in microns """ - data = importlib.resources.open_binary(hexrd.resources, 'mu_en.h5') + data = importlib.resources.open_binary(hexrd.core.resources, 'mu_en.h5') fid = h5py.File(data, 'r') formula_dict = interpret_formula(formula) diff --git a/hexrd/core/projections/polar.py b/hexrd/core/projections/polar.py index 9ffc249ef..fc89e9b7b 100644 --- a/hexrd/core/projections/polar.py +++ b/hexrd/core/projections/polar.py @@ -1,13 +1,13 @@ import numpy as np from hexrd.core import constants -from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData from hexrd.core.material.crystallography import PlaneData -from hexrd.hedm.xrdutil.utils import _project_on_detector_cylinder, _project_on_detector_plane -from hexrd.hed.xrdutil.utils import _project_on_detector_cylinder, _project_on_detector_plane -from hexrd.laue.xrdutil.utils import _project_on_detector_cylinder, _project_on_detector_plane + +# TODO: Resolve extra-core-dependency +from hexrd.hedm.xrdutil.utils import ( + _project_on_detector_cylinder, + _project_on_detector_plane, +) from hexrd.core.utils.panel_buffer import panel_buffer_as_2d_array @@ -16,10 +16,15 @@ class PolarView: Create (two-theta, eta) plot of detector images. """ - def __init__(self, plane_data, instrument, - eta_min=0., eta_max=360., - pixel_size=(0.1, 0.25), - cache_coordinate_map=False): + def __init__( + self, + plane_data, + instrument, + eta_min=0.0, + eta_max=360.0, + pixel_size=(0.1, 0.25), + cache_coordinate_map=False, + ): """ Instantiates a PolarView class. @@ -31,7 +36,7 @@ def __init__(self, plane_data, instrument, as defined but the active hkls and the tThWidth (or strainMag). If array_like, the input must be (2, ) specifying the [min, maz] 2theta values explicitly in degrees. - instrument : hexrd.instrument.HEDMInstrument + instrument : hexrd.hedm.instrument.HEDMInstrument The instruemnt object. eta_min : scalar, optional The minimum azimuthal extent in degrees. The default is 0. @@ -71,8 +76,9 @@ def __init__(self, plane_data, instrument, self._eta_min = np.radians(eta_min) self._eta_max = np.radians(eta_max) - assert np.all(np.asarray(pixel_size) > 0), \ - 'pixel sizes must be non-negative' + assert np.all( + np.asarray(pixel_size) > 0 + ), 'pixel sizes must be non-negative' self._tth_pixel_size = pixel_size[0] self._eta_pixel_size = pixel_size[1] @@ -172,12 +178,12 @@ def eta_pixel_size(self, x): @property def ntth(self): # return int(np.ceil(np.degrees(self.tth_range)/self.tth_pixel_size)) - return int(round(np.degrees(self.tth_range)/self.tth_pixel_size)) + return int(round(np.degrees(self.tth_range) / self.tth_pixel_size)) @property def neta(self): # return int(np.ceil(np.degrees(self.eta_range)/self.eta_pixel_size)) - return int(round(np.degrees(self.eta_range)/self.eta_pixel_size)) + return int(round(np.degrees(self.eta_range) / self.eta_pixel_size)) @property def shape(self): @@ -185,19 +191,29 @@ def shape(self): @property def angular_grid(self): - tth_vec = np.radians(self.tth_pixel_size*(np.arange(self.ntth)))\ - + self.tth_min + 0.5*np.radians(self.tth_pixel_size) - eta_vec = np.radians(self.eta_pixel_size*(np.arange(self.neta)))\ - + self.eta_min + 0.5*np.radians(self.eta_pixel_size) + tth_vec = ( + np.radians(self.tth_pixel_size * (np.arange(self.ntth))) + + self.tth_min + + 0.5 * np.radians(self.tth_pixel_size) + ) + eta_vec = ( + np.radians(self.eta_pixel_size * (np.arange(self.neta))) + + self.eta_min + + 0.5 * np.radians(self.eta_pixel_size) + ) return np.meshgrid(eta_vec, tth_vec, indexing='ij') @property def extent(self): ev, tv = self.angular_grid - heps = np.radians(0.5*self.eta_pixel_size) - htps = np.radians(0.5*self.tth_pixel_size) - return [np.min(tv) - htps, np.max(tv) + htps, - np.max(ev) + heps, np.min(ev) - heps] + heps = np.radians(0.5 * self.eta_pixel_size) + htps = np.radians(0.5 * self.tth_pixel_size) + return [ + np.min(tv) - htps, + np.max(tv) + htps, + np.max(ev) + heps, + np.min(ev) - heps, + ] def _func_project_on_detector(self, detector): ''' @@ -211,32 +227,37 @@ def _func_project_on_detector(self, detector): def _args_project_on_detector(self, gvec_angs, detector): kwargs = {'beamVec': detector.bvec} - arg = (gvec_angs, - detector.rmat, - constants.identity_3x3, - self.chi, - detector.tvec, - constants.zeros_3, - self.tvec, - detector.distortion) + arg = ( + gvec_angs, + detector.rmat, + constants.identity_3x3, + self.chi, + detector.tvec, + constants.zeros_3, + self.tvec, + detector.distortion, + ) if detector.detector_type == 'cylindrical': - arg = (gvec_angs, - self.chi, - detector.tvec, - detector.caxis, - detector.paxis, - detector.radius, - detector.physical_size, - detector.angle_extent, - detector.distortion) + arg = ( + gvec_angs, + self.chi, + detector.tvec, + detector.caxis, + detector.paxis, + detector.radius, + detector.physical_size, + detector.angle_extent, + detector.distortion, + ) return arg, kwargs # ========================================================================= # ####### METHODS ####### # ========================================================================= - def warp_image(self, image_dict, pad_with_nans=False, - do_interpolation=True): + def warp_image( + self, image_dict, pad_with_nans=False, do_interpolation=True + ): """ Performs the polar mapping of the input images. @@ -295,22 +316,21 @@ def _generate_coordinate_mapping(self) -> dict[str, dict[str, np.ndarray]]: respective arrays as the values. """ angpts = self.angular_grid - dummy_ome = np.zeros((self.ntth*self.neta)) + dummy_ome = np.zeros((self.ntth * self.neta)) mapping = {} for detector_id, panel in self.detectors.items(): _project_on_detector = self._func_project_on_detector(panel) - gvec_angs = np.vstack([ - angpts[1].flatten(), - angpts[0].flatten(), - dummy_ome]).T + gvec_angs = np.vstack( + [angpts[1].flatten(), angpts[0].flatten(), dummy_ome] + ).T - args, kwargs = self._args_project_on_detector(gvec_angs, - panel) + args, kwargs = self._args_project_on_detector(gvec_angs, panel) - xypts = np.nan*np.ones((len(gvec_angs), 2)) - valid_xys, rmats_s, on_plane = _project_on_detector(*args, - **kwargs) + xypts = np.nan * np.ones((len(gvec_angs), 2)) + valid_xys, rmats_s, on_plane = _project_on_detector( + *args, **kwargs + ) xypts[on_plane, :] = valid_xys _, on_panel = panel.clip_to_panel(xypts, buffer_edges=True) @@ -323,11 +343,12 @@ def _generate_coordinate_mapping(self) -> dict[str, dict[str, np.ndarray]]: return mapping def _warp_image_from_coordinate_map( - self, - image_dict: dict[str, np.ndarray], - coordinate_map: dict[str, dict[str, np.ndarray]], - pad_with_nans: bool = False, - do_interpolation=True) -> np.ma.MaskedArray: + self, + image_dict: dict[str, np.ndarray], + coordinate_map: dict[str, dict[str, np.ndarray]], + pad_with_nans: bool = False, + do_interpolation=True, + ) -> np.ma.MaskedArray: panel_buffer_fill_value = np.nan img_dict = dict.fromkeys(self.detectors) @@ -339,8 +360,9 @@ def _warp_image_from_coordinate_map( # Before warping, mask out any pixels that are invalid, # so that they won't affect the results. buffer = panel_buffer_as_2d_array(panel) - if (np.issubdtype(type(panel_buffer_fill_value), np.floating) and - not np.issubdtype(img.dtype, np.floating)): + if np.issubdtype( + type(panel_buffer_fill_value), np.floating + ) and not np.issubdtype(img.dtype, np.floating): # Convert to float. This is especially important # for nan, since it is a float... img = img.astype(float) @@ -352,13 +374,12 @@ def _warp_image_from_coordinate_map( if do_interpolation: this_img = panel.interpolate_bilinear( - xypts, img, - pad_with_nans=pad_with_nans, - on_panel=on_panel).reshape(self.shape) + xypts, img, pad_with_nans=pad_with_nans, on_panel=on_panel + ).reshape(self.shape) else: this_img = panel.interpolate_nearest( - xypts, img, - pad_with_nans=pad_with_nans).reshape(self.shape) + xypts, img, pad_with_nans=pad_with_nans + ).reshape(self.shape) # It is faster to keep track of the global nans like this # rather than the previous way we were doing it... @@ -373,11 +394,11 @@ def _warp_image_from_coordinate_map( summed_img = np.sum(list(img_dict.values()), axis=0) return np.ma.masked_array( - data=summed_img, mask=nan_mask, fill_value=0. + data=summed_img, mask=nan_mask, fill_value=0.0 ) def tth_to_pixel(self, tth): """ convert two-theta value to pixel value (float) along two-theta axis """ - return np.degrees(tth - self.tth_min)/self.tth_pixel_size + return np.degrees(tth - self.tth_min) / self.tth_pixel_size diff --git a/hexrd/core/projections/spherical.py b/hexrd/core/projections/spherical.py index c2636f499..c98c2b704 100644 --- a/hexrd/core/projections/spherical.py +++ b/hexrd/core/projections/spherical.py @@ -2,6 +2,7 @@ from skimage.transform import PiecewiseAffineTransform, warp from hexrd.core import constants +# TODO: Resolve extra-core-dependency from hexrd.hedm.xrdutil.utils import zproject_sph_angles diff --git a/hexrd/core/utils/decorators.py b/hexrd/core/utils/decorators.py index 965e0b48e..9916562d7 100644 --- a/hexrd/core/utils/decorators.py +++ b/hexrd/core/utils/decorators.py @@ -2,8 +2,8 @@ """Decorators that don't go anywhere else. This module contains decorators that don't really go with another module -in :mod:`hexrd.utils`. Before putting something here please see if it should -go into another topical module in :mod:`hexrd.utils`. +in :mod:`hexrd.core.utils`. Before putting something here please see if it should +go into another topical module in :mod:`hexrd.core.utils`. """ from collections import OrderedDict diff --git a/hexrd/core/utils/hkl.py b/hexrd/core/utils/hkl.py index d25ea5165..ef9a44155 100644 --- a/hexrd/core/utils/hkl.py +++ b/hexrd/core/utils/hkl.py @@ -1,8 +1,5 @@ import numpy as np -from hexrd.hedm.material.crystallography import hklToStr -from hexrd.laue.material.crystallography import hklToStr -from hexrd.powder.material.crystallography import hklToStr from hexrd.core.material.crystallography import hklToStr diff --git a/hexrd/hed/instrument/detector.py b/hexrd/hed/instrument/detector.py index 76aeb9e26..0c2d89d52 100644 --- a/hexrd/hed/instrument/detector.py +++ b/hexrd/hed/instrument/detector.py @@ -4,7 +4,6 @@ from typing import Optional from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT -from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba @@ -15,13 +14,7 @@ from hexrd.hedm import xrdutil from hexrd.core.rotations import mapAngle -from hexrd.hedm.material import crystallography -from hexrd.laue.material import crystallography -from hexrd.powder.material import crystallography from hexrd.core.material import crystallography -from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData from hexrd.core.material.crystallography import PlaneData from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec diff --git a/hexrd/hed/instrument/hedm_instrument.py b/hexrd/hed/instrument/hedm_instrument.py index fe6bd619a..1c85154d1 100644 --- a/hexrd/hed/instrument/hedm_instrument.py +++ b/hexrd/hed/instrument/hedm_instrument.py @@ -60,11 +60,9 @@ from hexrd.core.gridutil import make_tolerance_grid from hexrd.core import matrixutil as mutil from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +# TODO: Resolve extra-workflow-dependency from hexrd.hedm import xrdutil from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData -from hexrd.core.material.crystallography import PlaneData from hexrd.core import constants as ct from hexrd.core.rotations import mapAngle from hexrd.core import distortion as distortion_pkg @@ -72,14 +70,16 @@ from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict from hexrd.core.utils.yaml import NumpyToNativeDumper from hexrd.core.valunits import valWUnit +# TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import LeBail -from .cylindrical_detector import CylindricalDetector -from .detector import beam_energy_DFLT, Detector, max_workers_DFLT -from .planar_detector import PlanarDetector +from ...core.instrument.cylindrical_detector import CylindricalDetector +from ...core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from ...core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise +# TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import wppfsupport try: @@ -263,7 +263,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): Returns ------- - ims : hexrd.imageseries + ims : hexrd.core.imageseries The desired imageseries object. """ @@ -398,7 +398,7 @@ def max_tth(instr): Parameters ---------- - instr : hexrd.instrument.HEDMInstrument instance + instr : hexrd.hedm.instrument.HEDMInstrument instance the instrument class to evalutate. Returns @@ -1136,9 +1136,9 @@ def extract_line_positions(self, plane_data, imgser_dict, to have any effect. The default is False. tth_distortion : special class, optional for special case of pinhole camera distortions. See - hexrd.xrdutil.phutil.SampleLayerDistortion (only type supported) + hexrd.hedm.xrdutil.phutil.SampleLayerDistortion (only type supported) fitting_kwargs : dict, optional - kwargs passed to hexrd.fitting.utils.fit_ring if do_fitting is True + kwargs passed to hexrd.core.fitting.utils.fit_ring if do_fitting is True Raises ------ diff --git a/hexrd/hed/xrdutil/phutil.py b/hexrd/hed/xrdutil/phutil.py index 277c0fc3c..2740bbfb8 100644 --- a/hexrd/hed/xrdutil/phutil.py +++ b/hexrd/hed/xrdutil/phutil.py @@ -151,7 +151,7 @@ def tth_corr_sample_layer(panel, xy_pts, Parameters ---------- - panel : hexrd.instrument.Detector + panel : hexrd.core.instrument.Detector A panel instance. xy_pts : array_like The (n, 2) array of n (x, y) coordinates to be transformed in the raw @@ -221,7 +221,7 @@ def tth_corr_map_sample_layer(instrument, Parameters ---------- - instrument : hexrd.instrument.HEDMInstrument + instrument : hexrd.hedm.instrument.HEDMInstrument The pionhole camera instrument object. layer_standoff : scalar The sample layer standoff from the upstream face of the pinhole @@ -277,7 +277,7 @@ def tth_corr_pinhole(panel, xy_pts, Parameters ---------- - panel : hexrd.instrument.Detector + panel : hexrd.core.instrument.Detector A detector instance. xy_pts : array_like The (n, 2) array of n (x, y) coordinates to be transformed in the raw @@ -342,7 +342,7 @@ def tth_corr_map_pinhole(instrument, pinhole_thickness, pinhole_radius): Parameters ---------- - instrument : hexrd.instrument.HEDMInstrument + instrument : hexrd.hedm.instrument.HEDMInstrument The pionhole camera instrument object. pinhole_thickness : scalar The thickenss (height) of the pinhole (cylinder) in mm diff --git a/hexrd/hed/xrdutil/utils.py b/hexrd/hed/xrdutil/utils.py index 4c31e29ea..2ebb5c936 100644 --- a/hexrd/hed/xrdutil/utils.py +++ b/hexrd/hed/xrdutil/utils.py @@ -28,10 +28,8 @@ from typing import Optional, Union, Any, Generator +# TODO: Resolve extra-workflow dependency from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData -from hexrd.core.material.crystallography import PlaneData from hexrd.core.distortion.distortionabc import DistortionABC import numba @@ -44,9 +42,6 @@ from hexrd.core import gridutil as gutil from hexrd.hedm.material.crystallography import processWavelength, PlaneData -from hexrd.laue.material.crystallography import processWavelength, PlaneData -from hexrd.powder.material.crystallography import processWavelength, PlaneData -from hexrd.core.material.crystallography import processWavelength, PlaneData from hexrd.core.transforms import xfcapi from hexrd.core.valunits import valWUnit @@ -56,7 +51,7 @@ from hexrd.core.deprecation import deprecated -simlp = 'hexrd.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' +simlp = 'hexrd.hedm.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' # ============================================================================= # PARAMETERS diff --git a/hexrd/hedm/cli/find_orientations.py b/hexrd/hedm/cli/find_orientations.py index 5d7bf768f..efad7ec46 100644 --- a/hexrd/hedm/cli/find_orientations.py +++ b/hexrd/hedm/cli/find_orientations.py @@ -8,7 +8,6 @@ from hexrd.core import constants as const from hexrd.hedm import config -from hexrd.core import config from hexrd.core import instrument from hexrd.core.transforms import xfcapi from hexrd.hedm.findorientations import find_orientations, write_scored_orientations diff --git a/hexrd/hedm/cli/fit_grains.py b/hexrd/hedm/cli/fit_grains.py index f99039fb3..1018f433e 100644 --- a/hexrd/hedm/cli/fit_grains.py +++ b/hexrd/hedm/cli/fit_grains.py @@ -32,7 +32,7 @@ class GrainData(_BaseGrainData): """Simple class for storing grain output data To read the grains file, use the `load` method, like this: - > from hexrd.fitgrains import GrainData + > from hexrd.hedm.fitgrains import GrainData > gd = GrainData.load("grains.npz") """ diff --git a/hexrd/hedm/cli/main.py b/hexrd/hedm/cli/main.py index fa09d69f5..6e827b5d1 100644 --- a/hexrd/hedm/cli/main.py +++ b/hexrd/hedm/cli/main.py @@ -71,6 +71,7 @@ def main(): try: import argcomplete + argcomplete.autocomplete(p) except ImportError: pass diff --git a/hexrd/hedm/config/findorientations.py b/hexrd/hedm/config/findorientations.py index 8c75829b3..212e4d6d6 100644 --- a/hexrd/hedm/config/findorientations.py +++ b/hexrd/hedm/config/findorientations.py @@ -4,7 +4,7 @@ import numpy as np -from .config import Config +from ...core.config.config import Config logger = logging.getLogger('hexrd.config') diff --git a/hexrd/hedm/config/fitgrains.py b/hexrd/hedm/config/fitgrains.py index 8a708efec..22b04a067 100644 --- a/hexrd/hedm/config/fitgrains.py +++ b/hexrd/hedm/config/fitgrains.py @@ -1,7 +1,7 @@ import logging import os -from .config import Config +from ...core.config.config import Config from .utils import get_exclusion_parameters diff --git a/hexrd/hedm/config/instrument.py b/hexrd/hedm/config/instrument.py index 31dbbb310..f284fb044 100644 --- a/hexrd/hedm/config/instrument.py +++ b/hexrd/hedm/config/instrument.py @@ -1,7 +1,7 @@ import h5py import yaml -from .config import Config +from ...core.config.config import Config from .loader import NumPyIncludeLoader from hexrd.core import instrument diff --git a/hexrd/hedm/config/root.py b/hexrd/hedm/config/root.py index e698a3f37..cfac10e7c 100644 --- a/hexrd/hedm/config/root.py +++ b/hexrd/hedm/config/root.py @@ -6,11 +6,11 @@ from hexrd.core.constants import shared_ims_key from hexrd.core import imageseries -from .config import Config +from ...core.config.config import Config from .instrument import Instrument from .findorientations import FindOrientationsConfig from .fitgrains import FitGrainsConfig -from .material import MaterialConfig +from ...core.config.material import MaterialConfig logger = logging.getLogger('hexrd.config') diff --git a/hexrd/hedm/findorientations.py b/hexrd/hedm/findorientations.py index 96750cc03..043cd5fe1 100644 --- a/hexrd/hedm/findorientations.py +++ b/hexrd/hedm/findorientations.py @@ -479,7 +479,7 @@ def generate_eta_ome_maps(cfg, hkls=None, save=True): Parameters ---------- - cfg : hexrd.config.root.RootConfig + cfg : hexrd.core.config.root.RootConfig A hexrd far-field HEDM config instance. hkls : array_like, optional If not None, an override for the hkls used to generate maps. This can diff --git a/hexrd/core/fitting/calibration/grain.py b/hexrd/hedm/fitting/calibration/grain.py similarity index 96% rename from hexrd/core/fitting/calibration/grain.py rename to hexrd/hedm/fitting/calibration/grain.py index 1fd9e8f5f..6044a5c13 100644 --- a/hexrd/core/fitting/calibration/grain.py +++ b/hexrd/hedm/fitting/calibration/grain.py @@ -6,9 +6,9 @@ from hexrd.core.rotations import angularDifference from hexrd.core.transforms import xfcapi -from .abstract_grain import AbstractGrainCalibrator -from .lmfit_param_handling import DEFAULT_EULER_CONVENTION -from . import grains as grainutil +from ....core.fitting.calibration.abstract_grain import AbstractGrainCalibrator +from ....core.fitting.calibration.lmfit_param_handling import DEFAULT_EULER_CONVENTION +from .. import grains as grainutil logger = logging.getLogger(__name__) diff --git a/hexrd/hedm/fitting/calibration/multigrain.py b/hexrd/hedm/fitting/calibration/multigrain.py new file mode 100644 index 000000000..afa1ef198 --- /dev/null +++ b/hexrd/hedm/fitting/calibration/multigrain.py @@ -0,0 +1,393 @@ +import logging +import os + +import numpy as np +from scipy.optimize import leastsq, least_squares + +from hexrd.core import constants as cnst +from hexrd.core import matrixutil as mutil +from hexrd.core import rotations +from hexrd.core.transforms import xfcapi + +from .. import grains as grainutil + +logger = logging.getLogger() +logger.setLevel('INFO') + +# grains +grain_flags_DFLT = np.array( + [1, 1, 1, + 1, 0, 1, + 0, 0, 0, 0, 0, 0], + dtype=bool +) + +ext_eta_tol = np.radians(5.) # for HEDM cal, may make this a user param + + +def calibrate_instrument_from_sx( + instr, grain_params, bmat, xyo_det, hkls_idx, + param_flags=None, grain_flags=None, + ome_period=None, + xtol=cnst.sqrt_epsf, ftol=cnst.sqrt_epsf, + factor=10., sim_only=False, use_robust_lsq=False): + """ + arguments xyo_det, hkls_idx are DICTs over panels + + """ + grain_params = np.atleast_2d(grain_params) + ngrains = len(grain_params) + pnames = generate_parameter_names(instr, grain_params) + + # reset parameter flags for instrument as specified + if param_flags is None: + param_flags = instr.calibration_flags + else: + # will throw an AssertionError if wrong length + instr.calibration_flags = param_flags + + # re-map omegas if need be + if ome_period is not None: + for det_key in instr.detectors: + for ig in range(ngrains): + xyo_det[det_key][ig][:, 2] = rotations.mapAngle( + xyo_det[det_key][ig][:, 2], + ome_period + ) + + # first grab the instrument parameters + # 7 global + # 6*num_panels for the detectors + # num_panels*ndp in case of distortion + plist_full = instr.calibration_parameters + + # now handle grains + # reset parameter flags for grains as specified + if grain_flags is None: + grain_flags = np.tile(grain_flags_DFLT, ngrains) + + plist_full = np.concatenate( + [plist_full, np.hstack(grain_params)] + ) + plf_copy = np.copy(plist_full) + + # concatenate refinement flags + refine_flags = np.hstack([param_flags, grain_flags]) + plist_fit = plist_full[refine_flags] + fit_args = (plist_full, + param_flags, grain_flags, + instr, xyo_det, hkls_idx, + bmat, ome_period) + if sim_only: + return sxcal_obj_func( + plist_fit, plist_full, + param_flags, grain_flags, + instr, xyo_det, hkls_idx, + bmat, ome_period, + sim_only=True) + else: + logger.info("Set up to refine:") + for i in np.where(refine_flags)[0]: + logger.info("\t%s = %1.7e" % (pnames[i], plist_full[i])) + + # run optimization + if use_robust_lsq: + result = least_squares( + sxcal_obj_func, plist_fit, args=fit_args, + xtol=xtol, ftol=ftol, + loss='soft_l1', method='trf' + ) + x = result.x + resd = result.fun + mesg = result.message + ierr = result.status + else: + # do least squares problem + x, cov_x, infodict, mesg, ierr = leastsq( + sxcal_obj_func, plist_fit, args=fit_args, + factor=factor, xtol=xtol, ftol=ftol, + full_output=1 + ) + resd = infodict['fvec'] + if ierr not in [1, 2, 3, 4]: + raise RuntimeError(f"solution not found: {ierr=}") + else: + logger.info(f"optimization fininshed successfully with {ierr=}") + logger.info(mesg) + + # ??? output message handling? + fit_params = plist_full + fit_params[refine_flags] = x + + # run simulation with optimized results + sim_final = sxcal_obj_func( + x, plist_full, + param_flags, grain_flags, + instr, xyo_det, hkls_idx, + bmat, ome_period, + sim_only=True) + + # ??? reset instrument here? + instr.update_from_parameter_list(fit_params) + + # report final + logger.info("Optimization Reults:") + for i in np.where(refine_flags)[0]: + logger.info("\t%s = %1.7e --> %1.7e" + % (pnames[i], plf_copy[i], fit_params[i])) + + return fit_params, resd, sim_final + + +def generate_parameter_names(instr, grain_params): + pnames = [ + '{:>24s}'.format('beam energy'), + '{:>24s}'.format('beam azimuth'), + '{:>24s}'.format('beam polar'), + '{:>24s}'.format('chi'), + '{:>24s}'.format('tvec_s[0]'), + '{:>24s}'.format('tvec_s[1]'), + '{:>24s}'.format('tvec_s[2]'), + ] + + for det_key, panel in instr.detectors.items(): + pnames += [ + '{:>24s}'.format('%s tilt[0]' % det_key), + '{:>24s}'.format('%s tilt[1]' % det_key), + '{:>24s}'.format('%s tilt[2]' % det_key), + '{:>24s}'.format('%s tvec[0]' % det_key), + '{:>24s}'.format('%s tvec[1]' % det_key), + '{:>24s}'.format('%s tvec[2]' % det_key), + ] + # now add distortion if there + if panel.distortion is not None: + for j in range(len(panel.distortion.params)): + pnames.append( + '{:>24s}'.format('%s dparam[%d]' % (det_key, j)) + ) + + grain_params = np.atleast_2d(grain_params) + for ig, grain in enumerate(grain_params): + pnames += [ + '{:>24s}'.format('grain %d xi[0]' % ig), + '{:>24s}'.format('grain %d xi[1]' % ig), + '{:>24s}'.format('grain %d xi[2]' % ig), + '{:>24s}'.format('grain %d tvec_c[0]' % ig), + '{:>24s}'.format('grain %d tvec_c[1]' % ig), + '{:>24s}'.format('grain %d tvec_c[2]' % ig), + '{:>24s}'.format('grain %d vinv_s[0]' % ig), + '{:>24s}'.format('grain %d vinv_s[1]' % ig), + '{:>24s}'.format('grain %d vinv_s[2]' % ig), + '{:>24s}'.format('grain %d vinv_s[3]' % ig), + '{:>24s}'.format('grain %d vinv_s[4]' % ig), + '{:>24s}'.format('grain %d vinv_s[5]' % ig) + ] + + return pnames + + +def sxcal_obj_func(plist_fit, plist_full, + param_flags, grain_flags, + instr, xyo_det, hkls_idx, + bmat, ome_period, + sim_only=False, return_value_flag=None): + """ + """ + npi = len(instr.calibration_parameters) + NP_GRN = 12 + + # stack flags and force bool repr + refine_flags = np.array( + np.hstack([param_flags, grain_flags]), + dtype=bool) + + # fill out full parameter list + # !!! no scaling for now + plist_full[refine_flags] = plist_fit + + # instrument update + instr.update_from_parameter_list(plist_full) + + # assign some useful params + wavelength = instr.beam_wavelength + bvec = instr.beam_vector + chi = instr.chi + tvec_s = instr.tvec + + # right now just stuck on the end and assumed + # to all be the same length... FIX THIS + xy_unwarped = {} + meas_omes = {} + calc_omes = {} + calc_xy = {} + + # grain params + grain_params = plist_full[npi:] + if np.mod(len(grain_params), NP_GRN) != 0: + raise RuntimeError("parameter list length is not consistent") + ngrains = len(grain_params) // NP_GRN + grain_params = grain_params.reshape((ngrains, NP_GRN)) + + # loop over panels + npts_tot = 0 + for det_key, panel in instr.detectors.items(): + rmat_d = panel.rmat + tvec_d = panel.tvec + + xy_unwarped[det_key] = [] + meas_omes[det_key] = [] + calc_omes[det_key] = [] + calc_xy[det_key] = [] + + for ig, grain in enumerate(grain_params): + ghkls = hkls_idx[det_key][ig] + xyo = xyo_det[det_key][ig] + + npts_tot += len(xyo) + + xy_unwarped[det_key].append(xyo[:, :2]) + meas_omes[det_key].append(xyo[:, 2]) + if panel.distortion is not None: # do unwarping + xy_unwarped[det_key][ig] = panel.distortion.apply( + xy_unwarped[det_key][ig] + ) + + # transform G-vectors: + # 1) convert inv. stretch tensor from MV notation in to 3x3 + # 2) take reciprocal lattice vectors from CRYSTAL to SAMPLE frame + # 3) apply stretch tensor + # 4) normalize reciprocal lattice vectors in SAMPLE frame + # 5) transform unit reciprocal lattice vetors back to CRYSAL frame + rmat_c = xfcapi.make_rmat_of_expmap(grain[:3]) + tvec_c = grain[3:6] + vinv_s = grain[6:] + gvec_c = np.dot(bmat, ghkls.T) + vmat_s = mutil.vecMVToSymm(vinv_s) + ghat_s = mutil.unitVector(np.dot(vmat_s, np.dot(rmat_c, gvec_c))) + ghat_c = np.dot(rmat_c.T, ghat_s) + + match_omes, calc_omes_tmp = grainutil.matchOmegas( + xyo, ghkls.T, + chi, rmat_c, bmat, wavelength, + vInv=vinv_s, + beamVec=bvec, + omePeriod=ome_period) + + rmat_s_arr = xfcapi.make_sample_rmat( + chi, np.ascontiguousarray(calc_omes_tmp) + ) + calc_xy_tmp = xfcapi.gvec_to_xy( + ghat_c.T, rmat_d, rmat_s_arr, rmat_c, + tvec_d, tvec_s, tvec_c + ) + if np.any(np.isnan(calc_xy_tmp)): + logger.warning("infeasible parameters: may want to scale back " + "finite difference step size") + + calc_omes[det_key].append(calc_omes_tmp) + calc_xy[det_key].append(calc_xy_tmp) + + # return values + if sim_only: + retval = {} + for det_key in calc_xy.keys(): + # ??? calc_xy is always 2-d + retval[det_key] = [] + for ig in range(ngrains): + retval[det_key].append( + np.vstack( + [calc_xy[det_key][ig].T, calc_omes[det_key][ig]] + ).T + ) + else: + meas_xy_all = [] + calc_xy_all = [] + meas_omes_all = [] + calc_omes_all = [] + for det_key in xy_unwarped.keys(): + meas_xy_all.append(np.vstack(xy_unwarped[det_key])) + calc_xy_all.append(np.vstack(calc_xy[det_key])) + meas_omes_all.append(np.hstack(meas_omes[det_key])) + calc_omes_all.append(np.hstack(calc_omes[det_key])) + meas_xy_all = np.vstack(meas_xy_all) + calc_xy_all = np.vstack(calc_xy_all) + meas_omes_all = np.hstack(meas_omes_all) + calc_omes_all = np.hstack(calc_omes_all) + + diff_vecs_xy = calc_xy_all - meas_xy_all + diff_ome = rotations.angularDifference(calc_omes_all, meas_omes_all) + retval = np.hstack( + [diff_vecs_xy, + diff_ome.reshape(npts_tot, 1)] + ).flatten() + if return_value_flag == 1: + retval = sum(abs(retval)) + elif return_value_flag == 2: + denom = npts_tot - len(plist_fit) - 1. + if denom != 0: + nu_fac = 1. / denom + else: + nu_fac = 1. + nu_fac = 1 / (npts_tot - len(plist_fit) - 1.) + retval = nu_fac * sum(retval**2) + return retval + + +def parse_reflection_tables(cfg, instr, grain_ids, refit_idx=None): + """ + make spot dictionaries + """ + hkls = {} + xyo_det = {} + idx_0 = {} + for det_key, panel in instr.detectors.items(): + hkls[det_key] = [] + xyo_det[det_key] = [] + idx_0[det_key] = [] + for ig, grain_id in enumerate(grain_ids): + spots_filename = os.path.join( + cfg.analysis_dir, os.path.join( + det_key, 'spots_%05d.out' % grain_id + ) + ) + + # load pull_spots output table + gtable = np.loadtxt(spots_filename, ndmin=2) + if len(gtable) == 0: + gtable = np.nan*np.ones((1, 17)) + + # apply conditions for accepting valid data + valid_reflections = gtable[:, 0] >= 0 # is indexed + not_saturated = gtable[:, 6] < panel.saturation_level + # throw away extremem etas + p90 = rotations.angularDifference(gtable[:, 8], cnst.piby2) + m90 = rotations.angularDifference(gtable[:, 8], -cnst.piby2) + accept_etas = np.logical_or(p90 > ext_eta_tol, + m90 > ext_eta_tol) + logger.info(f"panel '{det_key}', grain {grain_id}") + logger.info(f"{sum(valid_reflections)} of {len(gtable)} " + "reflections are indexed") + logger.info(f"{sum(not_saturated)} of {sum(valid_reflections)}" + " valid reflections be are below" + + f" saturation threshold of {panel.saturation_level}") + logger.info(f"{sum(accept_etas)} of {len(gtable)}" + " reflections be are greater than " + + f" {np.degrees(ext_eta_tol)} from the rotation axis") + + # valid reflections index + if refit_idx is None: + idx = np.logical_and( + valid_reflections, + np.logical_and(not_saturated, accept_etas) + ) + idx_0[det_key].append(idx) + else: + idx = refit_idx[det_key][ig] + idx_0[det_key].append(idx) + logger.info(f"input reflection specify {sum(idx)} of " + f"{len(gtable)} total valid reflections") + + hkls[det_key].append(gtable[idx, 2:5]) + meas_omes = gtable[idx, 12].reshape(sum(idx), 1) + xyo_det[det_key].append(np.hstack([gtable[idx, -2:], meas_omes])) + return hkls, xyo_det, idx_0 diff --git a/hexrd/hedm/grainmap/nfutil.py b/hexrd/hedm/grainmap/nfutil.py index a59b8c8c7..8030622d8 100644 --- a/hexrd/hedm/grainmap/nfutil.py +++ b/hexrd/hedm/grainmap/nfutil.py @@ -1754,7 +1754,7 @@ def output_grain_map(data_location,data_stems,output_stem,vol_spacing,top_down=T # args = parse_args() # if len(args.inst_profile) > 0: -# from hexrd.utils import profiler +# from hexrd.core.utils import profiler # logging.debug("Instrumenting functions") # profiler.instrument_all(args.inst_profile) diff --git a/hexrd/hedm/indexer.py b/hexrd/hedm/indexer.py index 7033e4754..1c9419a3f 100644 --- a/hexrd/hedm/indexer.py +++ b/hexrd/hedm/indexer.py @@ -80,7 +80,7 @@ def paintGrid( quats : (4, N) ndarray hstacked array of trial orientations in the form of unit quaternions. etaOmeMaps : object - an spherical map object of type `hexrd.instrument.GenerateEtaOmeMaps`. + an spherical map object of type `hexrd.hedm.instrument.GenerateEtaOmeMaps`. threshold : float, optional threshold value on the etaOmeMaps. bMat : (3, 3) ndarray, optional diff --git a/hexrd/hedm/instrument/detector.py b/hexrd/hedm/instrument/detector.py index 76aeb9e26..757d5f44d 100644 --- a/hexrd/hedm/instrument/detector.py +++ b/hexrd/hedm/instrument/detector.py @@ -5,7 +5,6 @@ from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage -from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba @@ -16,13 +15,7 @@ from hexrd.core.rotations import mapAngle from hexrd.hedm.material import crystallography -from hexrd.laue.material import crystallography -from hexrd.powder.material import crystallography -from hexrd.core.material import crystallography from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData -from hexrd.core.material.crystallography import PlaneData from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec diff --git a/hexrd/hedm/instrument/hedm_instrument.py b/hexrd/hedm/instrument/hedm_instrument.py index fe6bd619a..48237211b 100644 --- a/hexrd/hedm/instrument/hedm_instrument.py +++ b/hexrd/hedm/instrument/hedm_instrument.py @@ -62,24 +62,23 @@ from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector from hexrd.hedm import xrdutil from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData -from hexrd.core.material.crystallography import PlaneData from hexrd.core import constants as ct -from hexrd.core.rotations import mapAngle +from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler, mapAngle from hexrd.core import distortion as distortion_pkg from hexrd.core.utils.concurrent import distribute_tasks from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict from hexrd.core.utils.yaml import NumpyToNativeDumper from hexrd.core.valunits import valWUnit +# TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import LeBail -from .cylindrical_detector import CylindricalDetector -from .detector import beam_energy_DFLT, Detector, max_workers_DFLT -from .planar_detector import PlanarDetector +from ...core.instrument.cylindrical_detector import CylindricalDetector +from ...core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from ...core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise +# TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import wppfsupport try: @@ -263,7 +262,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): Returns ------- - ims : hexrd.imageseries + ims : hexrd.core.imageseries The desired imageseries object. """ @@ -398,7 +397,7 @@ def max_tth(instr): Parameters ---------- - instr : hexrd.instrument.HEDMInstrument instance + instr : hexrd.hedm.instrument.HEDMInstrument instance the instrument class to evalutate. Returns @@ -1136,9 +1135,9 @@ def extract_line_positions(self, plane_data, imgser_dict, to have any effect. The default is False. tth_distortion : special class, optional for special case of pinhole camera distortions. See - hexrd.xrdutil.phutil.SampleLayerDistortion (only type supported) + hexrd.hedm.xrdutil.phutil.SampleLayerDistortion (only type supported) fitting_kwargs : dict, optional - kwargs passed to hexrd.fitting.utils.fit_ring if do_fitting is True + kwargs passed to hexrd.core.fitting.utils.fit_ring if do_fitting is True Raises ------ diff --git a/hexrd/hedm/instrument/physics_package.py b/hexrd/hedm/instrument/physics_package.py index 0a50258b9..7b77f5e10 100644 --- a/hexrd/hedm/instrument/physics_package.py +++ b/hexrd/hedm/instrument/physics_package.py @@ -11,7 +11,7 @@ class AbstractPhysicsPackage: Parameters ---------- - sample_material : str or hexrd.material.Material + sample_material : str or hexrd.core.material.Material either the formula or a hexrd material instance sample_density : float density of sample material in g/cc @@ -19,7 +19,7 @@ class AbstractPhysicsPackage: sample thickness in microns sample_geometry : FIXME FIXME - pinhole_material : str or hexrd.material.Material, optional + pinhole_material : str or hexrd.core.material.Material, optional either the formula or a hexrd material instance pinhole_density : float density of pinhole material in g/cc @@ -27,7 +27,7 @@ class AbstractPhysicsPackage: pinhole thickness in microns pinhole_diameter : float pinhole diameter in microns - window_material : str or hexrd.material.Material + window_material : str or hexrd.core.material.Material either the formula or a hexrd material instance window_density : float density of window material in g/cc diff --git a/hexrd/hedm/material/unitcell.py b/hexrd/hedm/material/unitcell.py index c6debaa10..3b7c1e594 100644 --- a/hexrd/hedm/material/unitcell.py +++ b/hexrd/hedm/material/unitcell.py @@ -752,7 +752,7 @@ def InitializeInterpTable(self): f_anomalous_data = [] self.pe_cs = {} data = ( - importlib.resources.files(hexrd.resources) + importlib.resources.files(hexrd.core.resources) .joinpath('Anomalous.h5') .open('rb') ) diff --git a/hexrd/hedm/xrdutil/utils.py b/hexrd/hedm/xrdutil/utils.py index 4c31e29ea..ae21ed756 100644 --- a/hexrd/hedm/xrdutil/utils.py +++ b/hexrd/hedm/xrdutil/utils.py @@ -29,9 +29,6 @@ from typing import Optional, Union, Any, Generator from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData -from hexrd.core.material.crystallography import PlaneData from hexrd.core.distortion.distortionabc import DistortionABC import numba @@ -44,9 +41,6 @@ from hexrd.core import gridutil as gutil from hexrd.hedm.material.crystallography import processWavelength, PlaneData -from hexrd.laue.material.crystallography import processWavelength, PlaneData -from hexrd.powder.material.crystallography import processWavelength, PlaneData -from hexrd.core.material.crystallography import processWavelength, PlaneData from hexrd.core.transforms import xfcapi from hexrd.core.valunits import valWUnit @@ -56,7 +50,7 @@ from hexrd.core.deprecation import deprecated -simlp = 'hexrd.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' +simlp = 'hexrd.hedm.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' # ============================================================================= # PARAMETERS diff --git a/hexrd/laue/fitting/calibration/laue.py b/hexrd/laue/fitting/calibration/laue.py index 03224acf3..972b25d26 100644 --- a/hexrd/laue/fitting/calibration/laue.py +++ b/hexrd/laue/fitting/calibration/laue.py @@ -8,13 +8,18 @@ from skimage import filters from skimage.feature import blob_log +# TODO: Resolve extra-workflow-dependency from hexrd.hedm import xrdutil from hexrd.core.constants import fwhm_to_sigma from hexrd.core.instrument import switch_xray_source +from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler from hexrd.core.transforms import xfcapi +from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl -from .abstract_grain import AbstractGrainCalibrator -from .lmfit_param_handling import DEFAULT_EULER_CONVENTION +# TODO: Resolve extra-workflow-dependency +from ....core.fitting.calibration.calibrator import Calibrator +from ....core.fitting.calibration.abstract_grain import AbstractGrainCalibrator +from ....core.fitting.calibration.lmfit_param_handling import create_grain_params, DEFAULT_EULER_CONVENTION, rename_to_avoid_collision class LaueCalibrator(AbstractGrainCalibrator): diff --git a/hexrd/laue/instrument/detector.py b/hexrd/laue/instrument/detector.py index 76aeb9e26..adf6ef82f 100644 --- a/hexrd/laue/instrument/detector.py +++ b/hexrd/laue/instrument/detector.py @@ -4,7 +4,6 @@ from typing import Optional from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT -from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba @@ -15,14 +14,8 @@ from hexrd.hedm import xrdutil from hexrd.core.rotations import mapAngle -from hexrd.hedm.material import crystallography from hexrd.laue.material import crystallography -from hexrd.powder.material import crystallography -from hexrd.core.material import crystallography -from hexrd.hedm.material.crystallography import PlaneData from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData -from hexrd.core.material.crystallography import PlaneData from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec diff --git a/hexrd/laue/instrument/hedm_instrument.py b/hexrd/laue/instrument/hedm_instrument.py index fe6bd619a..5eb4dda47 100644 --- a/hexrd/laue/instrument/hedm_instrument.py +++ b/hexrd/laue/instrument/hedm_instrument.py @@ -60,26 +60,26 @@ from hexrd.core.gridutil import make_tolerance_grid from hexrd.core import matrixutil as mutil from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +# TODO: Resolve extra-workflow dependency from hexrd.hedm import xrdutil -from hexrd.hedm.material.crystallography import PlaneData from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData -from hexrd.core.material.crystallography import PlaneData from hexrd.core import constants as ct -from hexrd.core.rotations import mapAngle +from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler, mapAngle from hexrd.core import distortion as distortion_pkg from hexrd.core.utils.concurrent import distribute_tasks from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict from hexrd.core.utils.yaml import NumpyToNativeDumper from hexrd.core.valunits import valWUnit +# TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import LeBail -from .cylindrical_detector import CylindricalDetector -from .detector import beam_energy_DFLT, Detector, max_workers_DFLT -from .planar_detector import PlanarDetector +from ...core.instrument.cylindrical_detector import CylindricalDetector +from ...core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from ...core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise +# TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import wppfsupport try: @@ -263,7 +263,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): Returns ------- - ims : hexrd.imageseries + ims : hexrd.core.imageseries The desired imageseries object. """ @@ -398,7 +398,7 @@ def max_tth(instr): Parameters ---------- - instr : hexrd.instrument.HEDMInstrument instance + instr : hexrd.hedm.instrument.HEDMInstrument instance the instrument class to evalutate. Returns @@ -1136,9 +1136,9 @@ def extract_line_positions(self, plane_data, imgser_dict, to have any effect. The default is False. tth_distortion : special class, optional for special case of pinhole camera distortions. See - hexrd.xrdutil.phutil.SampleLayerDistortion (only type supported) + hexrd.hedm.xrdutil.phutil.SampleLayerDistortion (only type supported) fitting_kwargs : dict, optional - kwargs passed to hexrd.fitting.utils.fit_ring if do_fitting is True + kwargs passed to hexrd.core.fitting.utils.fit_ring if do_fitting is True Raises ------ diff --git a/hexrd/laue/material/crystallography.py b/hexrd/laue/material/crystallography.py index 48eb857e8..482c625d9 100644 --- a/hexrd/laue/material/crystallography.py +++ b/hexrd/laue/material/crystallography.py @@ -34,7 +34,6 @@ import numpy as np -from hexrd.hedm.material.unitcell import unitcell from hexrd.core.material.unitcell import unitcell from hexrd.core.deprecation import deprecated from hexrd.core import constants diff --git a/hexrd/laue/xrdutil/utils.py b/hexrd/laue/xrdutil/utils.py index 4c31e29ea..fbc929901 100644 --- a/hexrd/laue/xrdutil/utils.py +++ b/hexrd/laue/xrdutil/utils.py @@ -28,10 +28,7 @@ from typing import Optional, Union, Any, Generator -from hexrd.hedm.material.crystallography import PlaneData from hexrd.laue.material.crystallography import PlaneData -from hexrd.powder.material.crystallography import PlaneData -from hexrd.core.material.crystallography import PlaneData from hexrd.core.distortion.distortionabc import DistortionABC import numba @@ -43,10 +40,7 @@ from hexrd.core import rotations as rot from hexrd.core import gridutil as gutil -from hexrd.hedm.material.crystallography import processWavelength, PlaneData from hexrd.laue.material.crystallography import processWavelength, PlaneData -from hexrd.powder.material.crystallography import processWavelength, PlaneData -from hexrd.core.material.crystallography import processWavelength, PlaneData from hexrd.core.transforms import xfcapi from hexrd.core.valunits import valWUnit @@ -56,7 +50,7 @@ from hexrd.core.deprecation import deprecated -simlp = 'hexrd.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' +simlp = 'hexrd.hedm.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' # ============================================================================= # PARAMETERS diff --git a/hexrd/powder/fitting/calibration/instrument.py b/hexrd/powder/fitting/calibration/instrument.py index 7ca9ecd1b..790eee365 100644 --- a/hexrd/powder/fitting/calibration/instrument.py +++ b/hexrd/powder/fitting/calibration/instrument.py @@ -4,8 +4,8 @@ import lmfit import numpy as np -from .lmfit_param_handling import add_engineering_constraints, create_instr_params, DEFAULT_EULER_CONVENTION, update_instrument_from_params, validate_params_list -from .relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType +from ....core.fitting.calibration.lmfit_param_handling import add_engineering_constraints, create_instr_params, DEFAULT_EULER_CONVENTION, update_instrument_from_params, validate_params_list +from ....core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType logger = logging.getLogger() logger.setLevel('INFO') diff --git a/hexrd/powder/fitting/calibration/powder.py b/hexrd/powder/fitting/calibration/powder.py index dc25f32f3..6820433db 100644 --- a/hexrd/powder/fitting/calibration/powder.py +++ b/hexrd/powder/fitting/calibration/powder.py @@ -7,8 +7,8 @@ from hexrd.core.instrument import calc_angles_from_beam_vec, switch_xray_source from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl -from .calibrator import Calibrator -from .lmfit_param_handling import create_material_params, update_material_from_params +from ....core.fitting.calibration.calibrator import Calibrator +from ....core.fitting.calibration.lmfit_param_handling import create_material_params, update_material_from_params nfields_powder_data = 8 diff --git a/hexrd/powder/fitting/calibration/structureless.py b/hexrd/powder/fitting/calibration/structureless.py index 5316a1fd3..6c5724c0c 100644 --- a/hexrd/powder/fitting/calibration/structureless.py +++ b/hexrd/powder/fitting/calibration/structureless.py @@ -6,8 +6,8 @@ from hexrd.core.instrument import switch_xray_source -from .lmfit_param_handling import add_engineering_constraints, create_instr_params, create_tth_parameters, DEFAULT_EULER_CONVENTION, tth_parameter_prefixes, update_instrument_from_params -from .relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType +from ....core.fitting.calibration.lmfit_param_handling import add_engineering_constraints, create_instr_params, create_tth_parameters, DEFAULT_EULER_CONVENTION, tth_parameter_prefixes, update_instrument_from_params +from ....core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType class StructurelessCalibrator: diff --git a/hexrd/powder/instrument/detector.py b/hexrd/powder/instrument/detector.py index 76aeb9e26..0ebdd9c81 100644 --- a/hexrd/powder/instrument/detector.py +++ b/hexrd/powder/instrument/detector.py @@ -4,7 +4,6 @@ from typing import Optional from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT -from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba @@ -15,14 +14,8 @@ from hexrd.hedm import xrdutil from hexrd.core.rotations import mapAngle -from hexrd.hedm.material import crystallography -from hexrd.laue.material import crystallography from hexrd.powder.material import crystallography -from hexrd.core.material import crystallography -from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData from hexrd.powder.material.crystallography import PlaneData -from hexrd.core.material.crystallography import PlaneData from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec diff --git a/hexrd/powder/instrument/hedm_instrument.py b/hexrd/powder/instrument/hedm_instrument.py index fe6bd619a..149fb0adc 100644 --- a/hexrd/powder/instrument/hedm_instrument.py +++ b/hexrd/powder/instrument/hedm_instrument.py @@ -60,13 +60,11 @@ from hexrd.core.gridutil import make_tolerance_grid from hexrd.core import matrixutil as mutil from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +# TODO: Resolve extra-workflow dependency from hexrd.hedm import xrdutil -from hexrd.hedm.material.crystallography import PlaneData -from hexrd.laue.material.crystallography import PlaneData from hexrd.powder.material.crystallography import PlaneData -from hexrd.core.material.crystallography import PlaneData from hexrd.core import constants as ct -from hexrd.core.rotations import mapAngle +from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler, mapAngle from hexrd.core import distortion as distortion_pkg from hexrd.core.utils.concurrent import distribute_tasks from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict @@ -74,9 +72,9 @@ from hexrd.core.valunits import valWUnit from hexrd.powder.wppf import LeBail -from .cylindrical_detector import CylindricalDetector -from .detector import beam_energy_DFLT, Detector, max_workers_DFLT -from .planar_detector import PlanarDetector +from ...core.instrument.cylindrical_detector import CylindricalDetector +from ...core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from ...core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise @@ -263,7 +261,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): Returns ------- - ims : hexrd.imageseries + ims : hexrd.core.imageseries The desired imageseries object. """ @@ -398,7 +396,7 @@ def max_tth(instr): Parameters ---------- - instr : hexrd.instrument.HEDMInstrument instance + instr : hexrd.hedm.instrument.HEDMInstrument instance the instrument class to evalutate. Returns @@ -1136,9 +1134,9 @@ def extract_line_positions(self, plane_data, imgser_dict, to have any effect. The default is False. tth_distortion : special class, optional for special case of pinhole camera distortions. See - hexrd.xrdutil.phutil.SampleLayerDistortion (only type supported) + hexrd.hedm.xrdutil.phutil.SampleLayerDistortion (only type supported) fitting_kwargs : dict, optional - kwargs passed to hexrd.fitting.utils.fit_ring if do_fitting is True + kwargs passed to hexrd.core.fitting.utils.fit_ring if do_fitting is True Raises ------ diff --git a/hexrd/powder/material/crystallography.py b/hexrd/powder/material/crystallography.py index 48eb857e8..482c625d9 100644 --- a/hexrd/powder/material/crystallography.py +++ b/hexrd/powder/material/crystallography.py @@ -34,7 +34,6 @@ import numpy as np -from hexrd.hedm.material.unitcell import unitcell from hexrd.core.material.unitcell import unitcell from hexrd.core.deprecation import deprecated from hexrd.core import constants diff --git a/hexrd/powder/wppf/phase.py b/hexrd/powder/wppf/phase.py index 5ea0d819d..ed1c93840 100644 --- a/hexrd/powder/wppf/phase.py +++ b/hexrd/powder/wppf/phase.py @@ -4,7 +4,6 @@ from hexrd.core import constants from hexrd.core.material import symmetry, symbols from hexrd.core.material import Material -from hexrd.hedm.material.unitcell import _rqpDict from hexrd.core.material.unitcell import _rqpDict from hexrd.powder.wppf import wppfsupport from hexrd.powder.wppf.xtal import _calc_dspacing, _get_tth, _calcxrsf, _calc_extinction_factor, _calc_absorption_factor @@ -1275,7 +1274,7 @@ def CalcPositions(self): def InitializeInterpTable(self): f_anomalous_data = [] - data = importlib.resources.open_binary(hexrd.resources, 'Anomalous.h5') + data = importlib.resources.open_binary(hexrd.core.resources, 'Anomalous.h5') with h5py.File(data, 'r') as fid: for i in range(0, self.atom_ntype): diff --git a/hexrd/powder/wppf/texture.py b/hexrd/powder/wppf/texture.py index e9d3ae6ff..88d5f8061 100644 --- a/hexrd/powder/wppf/texture.py +++ b/hexrd/powder/wppf/texture.py @@ -17,8 +17,8 @@ # HEXRD imports import hexrd.core.resources # FIXME: unused imports @saransh13? -# from hexrd.transforms.xfcapi import angles_to_gvec -# from hexrd.wppf import phase +# from hexrd.core.transforms.xfcapi import angles_to_gvec +# from hexrd.powder.wppf import phase """ =============================================================================== @@ -39,7 +39,7 @@ =============================================================================== """ -# FIXME: these are available in hexrd.constants @saransh13 +# FIXME: these are available in hexrd.core.constants @saransh13 I3 = np.eye(3) Xl = np.ascontiguousarray(I3[:, 0].reshape(3, 1)) # X in the lab frame @@ -61,7 +61,7 @@ class is initialized just based on the symmetry. the main functions def __init__(self, symmetry): - data = importlib.resources.open_binary(hexrd.resources, "surface_harmonics.h5") + data = importlib.resources.open_binary(hexrd.core.resources, "surface_harmonics.h5") with h5py.File(data, 'r') as fid: gname = f"{symmetry}" diff --git a/hexrd/powder/wppf/wppfsupport.py b/hexrd/powder/wppf/wppfsupport.py index e21693045..6320c3208 100644 --- a/hexrd/powder/wppf/wppfsupport.py +++ b/hexrd/powder/wppf/wppfsupport.py @@ -37,9 +37,8 @@ from lmfit import Parameters as Parameters_lmfit from hexrd.powder.wppf.phase import Phases_LeBail, Phases_Rietveld from hexrd.core.material import Material -from hexrd.hedm.material.unitcell import _rqpDict from hexrd.core.material.unitcell import _rqpDict -import hexrd +import hexrd.core import numpy as np from hexrd.core import constants import warnings @@ -793,7 +792,7 @@ def _add_detector_geometry(params, instr): detector as a parameter to the LeBail class such that those can be refined as well """ - if isinstance(instr, hexrd.instrument.HEDMInstrument): + if isinstance(instr, hexrd.core.instrument.HEDMInstrument): for key,det in instr.detectors.items(): tvec = det.tvec tilt = det.tilt diff --git a/setup.py b/setup.py index e80b3c5f4..23a4788c4 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ 'fabio>=0.11', 'fast-histogram', 'h5py<3.12', # Currently, h5py 3.12 on Windows fails to import. - # We can remove this version pin when that is fixed. + # We can remove this version pin when that is fixed. 'hdf5plugin', 'lmfit', 'matplotlib', @@ -49,9 +49,10 @@ else: compiler_flags = [] + # Extension for convolution from astropy def get_convolution_extensions(): - c_convolve_pkgdir = Path('hexrd') / 'convolution' + c_convolve_pkgdir = Path('hexrd') / 'core/convolution' src_files = [str(c_convolve_pkgdir / 'src/convolve.c')] @@ -59,15 +60,16 @@ def get_convolution_extensions(): # Add '-Rpass-missed=.*' to ``extra_compile_args`` when compiling with # clang to report missed optimizations _convolve_ext = Extension( - name='hexrd.convolution._convolve', + name='hexrd.core/convolution._convolve', sources=src_files, extra_compile_args=extra_compile_args, include_dirs=[numpy.get_include()], - language='c' + language='c', ) return [_convolve_ext] + def get_include_path(library_name): env_var_hint = os.getenv(f"{library_name.upper()}_INCLUDE_DIR") if env_var_hint is not None and os.path.exists(env_var_hint): @@ -100,6 +102,7 @@ def get_include_path(library_name): # It should exist now return full_path + def get_pybind11_include_path(): # If we can import pybind11, use that include path try: @@ -112,8 +115,9 @@ def get_pybind11_include_path(): # Otherwise, we will download the source and include that return get_include_path('pybind11') + def get_cpp_extensions(): - cpp_transform_pkgdir = Path('hexrd') / 'transforms/cpp_sublibrary' + cpp_transform_pkgdir = Path('hexrd') / 'core/transforms/cpp_sublibrary' extra_compile_args = [ '-O3', @@ -142,7 +146,7 @@ def get_cpp_extensions(): ) inverse_distortion_ext = Extension( - name='hexrd.extensions.inverse_distortion', + name='hexrd.core.extensions.inverse_distortion', sources=[str(cpp_transform_pkgdir / 'src/inverse_distortion.cpp')], extra_compile_args=extra_compile_args, include_dirs=include_dirs, @@ -151,12 +155,13 @@ def get_cpp_extensions(): return [transforms_ext, inverse_distortion_ext] + def get_old_xfcapi_extension_modules(): # for transforms srclist = ['transforms_CAPI.c', 'transforms_CFUNC.c'] - srclist = [os.path.join('hexrd/transforms', f) for f in srclist] + srclist = [os.path.join('hexrd/core/transforms', f) for f in srclist] transforms_mod = Extension( - 'hexrd.extensions._transforms_CAPI', + 'hexrd.core.extensions._transforms_CAPI', sources=srclist, include_dirs=[np_include_dir], extra_compile_args=compiler_flags, @@ -164,16 +169,18 @@ def get_old_xfcapi_extension_modules(): return [transforms_mod] + def get_new_xfcapi_extension_modules(): transforms_mod = Extension( - 'hexrd.extensions._new_transforms_capi', - sources=['hexrd/transforms/new_capi/module.c'], + 'hexrd.core.extensions._new_transforms_capi', + sources=['hexrd/core/transforms/new_capi/module.c'], include_dirs=[np_include_dir], extra_compile_args=compiler_flags, ) return [transforms_mod] + def get_extension_modules(): # Flatten the lists return [ @@ -187,10 +194,11 @@ def get_extension_modules(): for item in sublist ] + ext_modules = get_extension_modules() # use entry_points, not scripts: -entry_points = {'console_scripts': ["hexrd = hexrd.cli.main:main"]} +entry_points = {'console_scripts': ["hexrd = hexrd.hedm.cli.main:main"]} setup( name='hexrd', diff --git a/tests/calibration/test_2xrs_calibration.py b/tests/calibration/test_2xrs_calibration.py index b9e4ba852..4c5f66cd0 100644 --- a/tests/calibration/test_2xrs_calibration.py +++ b/tests/calibration/test_2xrs_calibration.py @@ -4,13 +4,10 @@ import pytest -from hexrd.material.material import load_materials_hdf5 -from hexrd.instrument.hedm_instrument import HEDMInstrument +from hexrd.core.material.material import load_materials_hdf5 +from hexrd.hedm.instrument.hedm_instrument import HEDMInstrument -from hexrd.fitting.calibration import ( - InstrumentCalibrator, - PowderCalibrator, -) +from hexrd.core.fitting.calibration import InstrumentCalibrator, PowderCalibrator @pytest.fixture diff --git a/tests/calibration/test_calibration.py b/tests/calibration/test_calibration.py index cae0cf81b..b204fb639 100644 --- a/tests/calibration/test_calibration.py +++ b/tests/calibration/test_calibration.py @@ -6,14 +6,10 @@ import pytest -from hexrd.material.material import load_materials_hdf5 -from hexrd.instrument.hedm_instrument import HEDMInstrument - -from hexrd.fitting.calibration import ( - InstrumentCalibrator, - LaueCalibrator, - PowderCalibrator, -) +from hexrd.core.material.material import load_materials_hdf5 +from hexrd.hedm.instrument.hedm_instrument import HEDMInstrument + +from hexrd.core.fitting.calibration import InstrumentCalibrator, LaueCalibrator, PowderCalibrator @pytest.fixture diff --git a/tests/calibration/test_instrument_relative_constraints.py b/tests/calibration/test_instrument_relative_constraints.py index 208551100..c3a7beb58 100644 --- a/tests/calibration/test_instrument_relative_constraints.py +++ b/tests/calibration/test_instrument_relative_constraints.py @@ -5,19 +5,14 @@ import numpy as np import pytest -from hexrd import imageseries -from hexrd.fitting.calibration import ( - InstrumentCalibrator, - PowderCalibrator, -) -from hexrd.fitting.calibration.relative_constraints import ( - RelativeConstraintsType, -) -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.instrument import HEDMInstrument -from hexrd.material import load_materials_hdf5, Material -from hexrd.rotations import rotMatOfExpMap -from hexrd.utils.hdf5 import unwrap_h5_to_dict +from hexrd.core import imageseries +from hexrd.core.fitting.calibration import InstrumentCalibrator, PowderCalibrator +from hexrd.core.fitting.calibration.relative_constraints import RelativeConstraintsType +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.instrument import HEDMInstrument +from hexrd.core.material import load_materials_hdf5, Material +from hexrd.core.rotations import rotMatOfExpMap +from hexrd.core.utils.hdf5 import unwrap_h5_to_dict @pytest.fixture diff --git a/tests/common.py b/tests/common.py index a56163ca9..0da121375 100644 --- a/tests/common.py +++ b/tests/common.py @@ -2,7 +2,7 @@ import numpy as np -import hexrd.constants as ct +import hexrd.core.constants as ct def convert_axis_angle_to_rmat(axis, angle): diff --git a/tests/config/common.py b/tests/config/common.py index de8b25878..cd1644fa4 100644 --- a/tests/config/common.py +++ b/tests/config/common.py @@ -4,7 +4,7 @@ import logging import unittest -from hexrd import config +from hexrd.hedm import config test_data = { diff --git a/tests/config/test_instrument.py b/tests/config/test_instrument.py index 6bd4d0072..ace5661d0 100644 --- a/tests/config/test_instrument.py +++ b/tests/config/test_instrument.py @@ -1,10 +1,9 @@ import os -import hexrd.instrument +import hexrd.core.instrument from .common import TestConfig, test_data try: - from hexrd.config.instrument import (Instrument, Beam, OscillationStage, - Detector) + from hexrd.hedm.config.instrument import Instrument, Beam, OscillationStage, Detector except: pass @@ -68,19 +67,19 @@ def get_reference_data(cls): def test_beam(self): icfg = Instrument(self.cfgs[1]) b = icfg.beam - self.assertTrue(isinstance(b, hexrd.instrument.beam.Beam), "Failed to produce a Beam instance") + self.assertTrue(isinstance(b, hexrd.core.instrument.beam.Beam), "Failed to produce a Beam instance") def test_oscillation_stage(self): icfg = Instrument(self.cfgs[2]) ostage = icfg.oscillation_stage - self.assertTrue(isinstance(ostage, hexrd.instrument.oscillation_stage.OscillationStage), + self.assertTrue(isinstance(ostage, hexrd.core.instrument.oscillation_stage.OscillationStage), "Failed to produce an OscillationStage instance") def test_detector(self): cfg = self.cfgs[3] icfg = Detector(cfg, 'GE1') det = icfg.detector(Beam(cfg).beam) - self.assertTrue(isinstance(det, hexrd.instrument.PlanarDetector), + self.assertTrue(isinstance(det, hexrd.core.instrument.PlanarDetector), "Failed to produce an Detector instance") def test_detector_dict(self): @@ -90,7 +89,7 @@ def test_detector_dict(self): "Failed to produce an Detector Dictionary instance") for k in dd: d = dd[k] - self.assertTrue(isinstance(d, hexrd.instrument.PlanarDetector), + self.assertTrue(isinstance(d, hexrd.core.instrument.PlanarDetector), "Detector dictionary values are not detector instances") diff --git a/tests/config/test_material.py b/tests/config/test_material.py index b3d6e77bd..dfdfbc36c 100644 --- a/tests/config/test_material.py +++ b/tests/config/test_material.py @@ -1,6 +1,6 @@ from .common import TestConfig, test_data -from hexrd.config.material import TTHW_DFLT, DMIN_DFLT -from hexrd.config.utils import get_exclusion_parameters +from hexrd.core.config.material import TTHW_DFLT, DMIN_DFLT +from hexrd.hedm.config.utils import get_exclusion_parameters reference_data = \ diff --git a/tests/config/test_root.py b/tests/config/test_root.py index db829dde1..b68ad2096 100644 --- a/tests/config/test_root.py +++ b/tests/config/test_root.py @@ -4,7 +4,7 @@ from unittest import skipIf from .common import TestConfig, test_data -from hexrd import config +from hexrd.hedm import config reference_data = \ diff --git a/tests/find_orientations_testing.py b/tests/find_orientations_testing.py index d8a0907f3..7d7fa43b4 100755 --- a/tests/find_orientations_testing.py +++ b/tests/find_orientations_testing.py @@ -11,8 +11,10 @@ import numpy as np -from hexrd.material.crystallography import PlaneData -from hexrd.rotations import misorientation + +# TODO: Check that this test is still sensible after PlaneData change. +from hexrd.core.material.crystallography import PlaneData +from hexrd.core.rotations import misorientation # ============================================================================= diff --git a/tests/fit_grains_check.py b/tests/fit_grains_check.py index 0a89d62ed..e37930706 100755 --- a/tests/fit_grains_check.py +++ b/tests/fit_grains_check.py @@ -9,10 +9,10 @@ import numpy as np -from hexrd import config -from hexrd.fitgrains import fit_grains -from hexrd import matrixutil as mutil -from hexrd import rotations as rot +from hexrd.hedm import config +from hexrd.hedm.fitgrains import fit_grains +from hexrd.core import matrixutil as mutil +from hexrd.core import rotations as rot def compare_grain_fits(fit_grain_params, ref_grain_params, diff --git a/tests/imageseries/common.py b/tests/imageseries/common.py index 1cd81d5a7..540f8f614 100644 --- a/tests/imageseries/common.py +++ b/tests/imageseries/common.py @@ -1,7 +1,7 @@ import numpy as np import unittest -from hexrd import imageseries +from hexrd.core import imageseries _NFXY = (3, 7, 5) diff --git a/tests/imageseries/test_formats.py b/tests/imageseries/test_formats.py index 6d03c6453..bece458fc 100644 --- a/tests/imageseries/test_formats.py +++ b/tests/imageseries/test_formats.py @@ -7,7 +7,7 @@ from .common import ImageSeriesTest from .common import make_array_ims, compare, compare_meta -from hexrd import imageseries +from hexrd.core import imageseries class ImageSeriesFormatTest(ImageSeriesTest): diff --git a/tests/imageseries/test_omega.py b/tests/imageseries/test_omega.py index 85d54e927..51575196c 100644 --- a/tests/imageseries/test_omega.py +++ b/tests/imageseries/test_omega.py @@ -2,8 +2,8 @@ from .common import ImageSeriesTest -from hexrd import imageseries -from hexrd.imageseries.omega import OmegaSeriesError, OmegaImageSeries +from hexrd.core import imageseries +from hexrd.core.imageseries.omega import OmegaSeriesError, OmegaImageSeries class TestOmegaSeries(ImageSeriesTest): diff --git a/tests/imageseries/test_pickleable.py b/tests/imageseries/test_pickleable.py index ebaa05e28..f8f400b3c 100644 --- a/tests/imageseries/test_pickleable.py +++ b/tests/imageseries/test_pickleable.py @@ -4,9 +4,9 @@ import unittest from .common import make_array_ims -from hexrd.imageseries.load.hdf5 import HDF5ImageSeriesAdapter -from hexrd.imageseries.load.framecache import FrameCacheImageSeriesAdapter -from hexrd import imageseries +from hexrd.core.imageseries.load.hdf5 import HDF5ImageSeriesAdapter +from hexrd.core.imageseries.load.framecache import FrameCacheImageSeriesAdapter +from hexrd.core import imageseries class ImageSeriesPickleableTest(unittest.TestCase): diff --git a/tests/imageseries/test_process.py b/tests/imageseries/test_process.py index b80cfca4a..322117fce 100644 --- a/tests/imageseries/test_process.py +++ b/tests/imageseries/test_process.py @@ -2,8 +2,8 @@ from .common import ImageSeriesTest, make_array_ims, make_omega_meta, compare -from hexrd import imageseries -from hexrd.imageseries import process, ImageSeries +from hexrd.core import imageseries +from hexrd.core.imageseries import process, ImageSeries class TestImageSeriesProcess(ImageSeriesTest): diff --git a/tests/imageseries/test_stats.py b/tests/imageseries/test_stats.py index e22254bf0..76e3c0c8a 100644 --- a/tests/imageseries/test_stats.py +++ b/tests/imageseries/test_stats.py @@ -1,7 +1,7 @@ import numpy as np -from hexrd import imageseries -from hexrd.imageseries import stats +from hexrd.core import imageseries +from hexrd.core.imageseries import stats from .common import ImageSeriesTest, make_array_ims diff --git a/tests/matrix_util/test_norms.py b/tests/matrix_util/test_norms.py index 2a33498f4..46e4bd21c 100644 --- a/tests/matrix_util/test_norms.py +++ b/tests/matrix_util/test_norms.py @@ -1,5 +1,5 @@ import numpy as np -from hexrd import matrixutil as mu +from hexrd.core import matrixutil as mu def test_column_norm(n_dim): diff --git a/tests/matrix_util/test_strain_stress_reps.py b/tests/matrix_util/test_strain_stress_reps.py index 0755d2065..ace5f0fde 100644 --- a/tests/matrix_util/test_strain_stress_reps.py +++ b/tests/matrix_util/test_strain_stress_reps.py @@ -4,7 +4,7 @@ """ import numpy as np -from hexrd import matrixutil as mu +from hexrd.core import matrixutil as mu def test_stress_repr(): diff --git a/tests/matrix_util/test_vector_and_matrix_math.py b/tests/matrix_util/test_vector_and_matrix_math.py index ae2ff7ae3..a6a86a247 100644 --- a/tests/matrix_util/test_vector_and_matrix_math.py +++ b/tests/matrix_util/test_vector_and_matrix_math.py @@ -1,5 +1,5 @@ import numpy as np -from hexrd import matrixutil as mu +from hexrd.core import matrixutil as mu def test_cross(): diff --git a/tests/planedata/test_exclusion.py b/tests/planedata/test_exclusion.py index f85c0fc47..4d611c6a5 100644 --- a/tests/planedata/test_exclusion.py +++ b/tests/planedata/test_exclusion.py @@ -1,6 +1,7 @@ import numpy as np -from hexrd.material.crystallography import PlaneData +# TODO: Check that this test is still sensible after PlaneData change. +from hexrd.core.material.crystallography import PlaneData def test_exclusion(): diff --git a/tests/planedata/test_init.py b/tests/planedata/test_init.py index 531424475..673f5a651 100644 --- a/tests/planedata/test_init.py +++ b/tests/planedata/test_init.py @@ -1,7 +1,8 @@ import numpy as np import pytest -from hexrd.material.crystallography import PlaneData +# TODO: Check that this test is still sensible after PlaneData change. +from hexrd.core.material.crystallography import PlaneData def test_init_with_data_and_from_copy(): diff --git a/tests/planedata/test_misc.py b/tests/planedata/test_misc.py index 78a977039..335cfa024 100644 --- a/tests/planedata/test_misc.py +++ b/tests/planedata/test_misc.py @@ -1,9 +1,10 @@ import os import numpy as np -from hexrd.material.crystallography import PlaneData -from hexrd.rotations import quatOfLaueGroup -from hexrd.valunits import valWUnit +# TODO: Check that this test is still sensible after PlaneData change. +from hexrd.core.material.crystallography import PlaneData +from hexrd.core.rotations import quatOfLaueGroup +from hexrd.core.valunits import valWUnit def test_misc(): diff --git a/tests/planedata/test_with_data.py b/tests/planedata/test_with_data.py index 0b4e9d539..82a8d2ccf 100644 --- a/tests/planedata/test_with_data.py +++ b/tests/planedata/test_with_data.py @@ -3,9 +3,9 @@ import pytest -from hexrd.material.crystallography import ltypeOfLaueGroup -from hexrd.material.material import Material -from hexrd.rotations import rotMatOfQuat +from hexrd.laue.material.crystallography import ltypeOfLaueGroup +from hexrd.core.material.material import Material +from hexrd.core.rotations import rotMatOfQuat @pytest.fixture diff --git a/tests/rotations/test_eulers.py b/tests/rotations/test_eulers.py index 48fd957b4..988ba1ed0 100644 --- a/tests/rotations/test_eulers.py +++ b/tests/rotations/test_eulers.py @@ -2,7 +2,7 @@ import numpy as np -from hexrd import rotations +from hexrd.core import rotations def random_rot_mat_euler(): diff --git a/tests/rotations/test_quat_math.py b/tests/rotations/test_quat_math.py index b5bb9e3f7..98751445f 100644 --- a/tests/rotations/test_quat_math.py +++ b/tests/rotations/test_quat_math.py @@ -2,7 +2,7 @@ import numpy as np -from hexrd import rotations +from hexrd.core import rotations def allclose(a, b): diff --git a/tests/rotations/test_utilities.py b/tests/rotations/test_utilities.py index c128be3af..ecb57500c 100644 --- a/tests/rotations/test_utilities.py +++ b/tests/rotations/test_utilities.py @@ -1,4 +1,4 @@ -from hexrd import rotations +from hexrd.core import rotations import numpy as np diff --git a/tests/test_absorption_correction.py b/tests/test_absorption_correction.py index 0c98b5cc0..c8fa5c840 100644 --- a/tests/test_absorption_correction.py +++ b/tests/test_absorption_correction.py @@ -2,8 +2,8 @@ import pytest import yaml -from hexrd.instrument.hedm_instrument import HEDMInstrument -from hexrd.instrument.physics_package import HEDPhysicsPackage +from hexrd.hedm.instrument.hedm_instrument import HEDMInstrument +from hexrd.hedm.instrument.physics_package import HEDPhysicsPackage @pytest.fixture diff --git a/tests/test_concurrent.py b/tests/test_concurrent.py index 8a9a7508b..de69bacb2 100644 --- a/tests/test_concurrent.py +++ b/tests/test_concurrent.py @@ -1,4 +1,4 @@ -from hexrd.utils.concurrent import distribute_tasks +from hexrd.core.utils.concurrent import distribute_tasks def test_distribute_tasks(): diff --git a/tests/test_find_orientations.py b/tests/test_find_orientations.py index c6208f012..e74ac2c60 100644 --- a/tests/test_find_orientations.py +++ b/tests/test_find_orientations.py @@ -8,9 +8,11 @@ import coloredlogs -from hexrd.findorientations import find_orientations, generate_eta_ome_maps -from hexrd import config -from hexrd.material.crystallography import PlaneData +from hexrd.hedm.findorientations import find_orientations, generate_eta_ome_maps +from hexrd.hedm import config + +# TODO: Check that this test is still sensible after PlaneData change. +from hexrd.hedm.material.crystallography import PlaneData import find_orientations_testing as test_utils diff --git a/tests/test_fit-grains.py b/tests/test_fit-grains.py index 307a35354..f7f2a67a0 100644 --- a/tests/test_fit-grains.py +++ b/tests/test_fit-grains.py @@ -8,8 +8,8 @@ import coloredlogs -from hexrd import config -from hexrd.fitgrains import fit_grains +from hexrd.hedm import config +from hexrd.hedm.fitgrains import fit_grains from fit_grains_check import compare_grain_fits diff --git a/tests/test_graindata.py b/tests/test_graindata.py index 80df56041..ad582c2e1 100644 --- a/tests/test_graindata.py +++ b/tests/test_graindata.py @@ -4,7 +4,7 @@ import pytest import numpy as np -from hexrd.cli.fit_grains import GrainData +from hexrd.hedm.cli.fit_grains import GrainData @pytest.fixture diff --git a/tests/test_inverse_distortion.py b/tests/test_inverse_distortion.py index 9029d1bfa..522c638f8 100644 --- a/tests/test_inverse_distortion.py +++ b/tests/test_inverse_distortion.py @@ -1,7 +1,7 @@ import json import numpy as np -from hexrd.extensions import inverse_distortion +from hexrd.core.extensions import inverse_distortion RHO_MAX = 204.8 params = [ diff --git a/tests/test_material.py b/tests/test_material.py index f10a36629..a84179f18 100644 --- a/tests/test_material.py +++ b/tests/test_material.py @@ -2,7 +2,7 @@ import h5py import pytest -from hexrd.material import Material, load_materials_hdf5 +from hexrd.core.material import Material, load_materials_hdf5 # Tolerance for comparing floats FLOAT_TOL = 1.e-8 diff --git a/tests/test_matrix_utils.py b/tests/test_matrix_utils.py index 510eb0f25..a7840025a 100644 --- a/tests/test_matrix_utils.py +++ b/tests/test_matrix_utils.py @@ -1,6 +1,6 @@ import numpy as np -from hexrd import matrixutil as mutil +from hexrd.core import matrixutil as mutil def test_vec_mv_cob_matrix(): diff --git a/tests/test_memoize.py b/tests/test_memoize.py index fbd2f0a86..c67382afb 100644 --- a/tests/test_memoize.py +++ b/tests/test_memoize.py @@ -2,7 +2,7 @@ import numpy as np -from hexrd.utils.decorators import memoize +from hexrd.core.utils.decorators import memoize def test_memoize(): diff --git a/tests/test_polar_view.py b/tests/test_polar_view.py index 3203849d8..501ed00a1 100644 --- a/tests/test_polar_view.py +++ b/tests/test_polar_view.py @@ -4,10 +4,10 @@ import numpy as np import pytest -from hexrd import imageseries -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.instrument import HEDMInstrument -from hexrd.projections.polar import PolarView +from hexrd.core import imageseries +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.instrument import HEDMInstrument +from hexrd.core.projections.polar import PolarView @pytest.fixture diff --git a/tests/test_rotations.py b/tests/test_rotations.py index 1abe5f668..325cca4f0 100644 --- a/tests/test_rotations.py +++ b/tests/test_rotations.py @@ -2,8 +2,8 @@ import numpy as np import pytest -from hexrd.material import symmetry -from hexrd import rotations +from hexrd.core.material import symmetry +from hexrd.core import rotations def test_misorientations(): diff --git a/tests/test_transforms.py b/tests/test_transforms.py index 79417834a..7b4b50256 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -3,7 +3,7 @@ import numpy as np -from hexrd.transforms.xfcapi import gvec_to_xy +from hexrd.core.transforms.xfcapi import gvec_to_xy from common import convert_axis_angle_to_rmat diff --git a/tests/test_utils_json.py b/tests/test_utils_json.py index 3824c9640..068906a7a 100644 --- a/tests/test_utils_json.py +++ b/tests/test_utils_json.py @@ -2,7 +2,7 @@ import numpy as np -from hexrd.utils.json import NumpyDecoder, NumpyEncoder, NumpyToNativeEncoder +from hexrd.core.utils.json import NumpyDecoder, NumpyEncoder, NumpyToNativeEncoder def test_decode_encode(): diff --git a/tests/test_utils_yaml.py b/tests/test_utils_yaml.py index cc8ff1c1f..2b9a385a8 100644 --- a/tests/test_utils_yaml.py +++ b/tests/test_utils_yaml.py @@ -1,7 +1,7 @@ import numpy as np import yaml -from hexrd.utils.yaml import NumpyToNativeDumper +from hexrd.core.utils.yaml import NumpyToNativeDumper def test_numpy_to_native(): diff --git a/tests/transforms/common.py b/tests/transforms/common.py index f50d7fc08..5f10d79af 100644 --- a/tests/transforms/common.py +++ b/tests/transforms/common.py @@ -2,8 +2,8 @@ import numpy as np -import hexrd.constants as ct -from hexrd.transforms.new_capi.xf_new_capi import unit_vector +import hexrd.core.constants as ct +from hexrd.core.transforms.new_capi.xf_new_capi import unit_vector def convert_axis_angle_to_rmat(axis, angle): diff --git a/tests/transforms/test_angles_to_dvec_from_file.py b/tests/transforms/test_angles_to_dvec_from_file.py index e716dd90b..1d9a12338 100644 --- a/tests/transforms/test_angles_to_dvec_from_file.py +++ b/tests/transforms/test_angles_to_dvec_from_file.py @@ -4,7 +4,7 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import angles_to_dvec +from hexrd.core.transforms.new_capi.xf_new_capi import angles_to_dvec # from common import random_rotation_matrix, random_unit_vectors diff --git a/tests/transforms/test_angles_to_gvec_from_file.py b/tests/transforms/test_angles_to_gvec_from_file.py index 5056e90dd..a0c4eee5f 100644 --- a/tests/transforms/test_angles_to_gvec_from_file.py +++ b/tests/transforms/test_angles_to_gvec_from_file.py @@ -5,7 +5,7 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import angles_to_gvec +from hexrd.core.transforms.new_capi.xf_new_capi import angles_to_gvec # from common import random_rotation_matrix, random_unit_vectors diff --git a/tests/transforms/test_gvec_to_xy.py b/tests/transforms/test_gvec_to_xy.py index 8cfc9ff62..a681ca01d 100644 --- a/tests/transforms/test_gvec_to_xy.py +++ b/tests/transforms/test_gvec_to_xy.py @@ -12,7 +12,7 @@ from common import convert_axis_angle_to_rmat -from hexrd.transforms.new_capi.xf_new_capi import gvec_to_xy +from hexrd.core.transforms.new_capi.xf_new_capi import gvec_to_xy # gvec_to_xy intersects vectors from crystal position with the detector plane. diff --git a/tests/transforms/test_gvec_to_xy_from_file.py b/tests/transforms/test_gvec_to_xy_from_file.py index 8b4dedca1..1b402bf19 100644 --- a/tests/transforms/test_gvec_to_xy_from_file.py +++ b/tests/transforms/test_gvec_to_xy_from_file.py @@ -4,7 +4,7 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import gvec_to_xy +from hexrd.core.transforms.new_capi.xf_new_capi import gvec_to_xy # from common import random_rotation_matrix, random_unit_vectors diff --git a/tests/transforms/test_make_beam_rmat_from_file.py b/tests/transforms/test_make_beam_rmat_from_file.py index 07b4589b4..663c5505b 100644 --- a/tests/transforms/test_make_beam_rmat_from_file.py +++ b/tests/transforms/test_make_beam_rmat_from_file.py @@ -5,7 +5,7 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import make_beam_rmat +from hexrd.core.transforms.new_capi.xf_new_capi import make_beam_rmat # from common import random_unit_vectors diff --git a/tests/transforms/test_make_detector_rmat_from_file.py b/tests/transforms/test_make_detector_rmat_from_file.py index a0074dc7e..cc418e988 100644 --- a/tests/transforms/test_make_detector_rmat_from_file.py +++ b/tests/transforms/test_make_detector_rmat_from_file.py @@ -5,7 +5,7 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import make_detector_rmat +from hexrd.core.transforms.new_capi.xf_new_capi import make_detector_rmat def test_make_detector_rmat_from_file(test_data_dir): diff --git a/tests/transforms/test_make_rmat_of_expmap_from_file.py b/tests/transforms/test_make_rmat_of_expmap_from_file.py index a89a2355b..9a2b749d9 100644 --- a/tests/transforms/test_make_rmat_of_expmap_from_file.py +++ b/tests/transforms/test_make_rmat_of_expmap_from_file.py @@ -5,7 +5,7 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import make_rmat_of_expmap +from hexrd.core.transforms.new_capi.xf_new_capi import make_rmat_of_expmap def test_make_rmat_of_expmap_from_file(test_data_dir): diff --git a/tests/transforms/test_make_sample_rmat_from_file.py b/tests/transforms/test_make_sample_rmat_from_file.py index bd7c765af..abad45188 100644 --- a/tests/transforms/test_make_sample_rmat_from_file.py +++ b/tests/transforms/test_make_sample_rmat_from_file.py @@ -4,7 +4,7 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import make_sample_rmat +from hexrd.core.transforms.new_capi.xf_new_capi import make_sample_rmat def test_make_sample_rmat_from_file(test_data_dir): diff --git a/tests/transforms/test_quat_distance_from_file.py b/tests/transforms/test_quat_distance_from_file.py index f34b284d3..808f3dca9 100644 --- a/tests/transforms/test_quat_distance_from_file.py +++ b/tests/transforms/test_quat_distance_from_file.py @@ -4,9 +4,9 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import quat_distance +from hexrd.core.transforms.new_capi.xf_new_capi import quat_distance # from common import random_unit_vectors -# from hexrd.rotations import quatOfLaueGroup +# from hexrd.core.rotations import quatOfLaueGroup def test_quat_distance_from_file(test_data_dir): diff --git a/tests/transforms/test_rotate_vecs_about_axis.py b/tests/transforms/test_rotate_vecs_about_axis.py index 9bb3177f7..94200285f 100644 --- a/tests/transforms/test_rotate_vecs_about_axis.py +++ b/tests/transforms/test_rotate_vecs_about_axis.py @@ -1,4 +1,4 @@ -from hexrd.transforms.new_capi.xf_new_capi import rotate_vecs_about_axis +from hexrd.core.transforms.new_capi.xf_new_capi import rotate_vecs_about_axis import numpy as np diff --git a/tests/transforms/test_unit_vector.py b/tests/transforms/test_unit_vector.py index 440f5885d..bd92d9591 100644 --- a/tests/transforms/test_unit_vector.py +++ b/tests/transforms/test_unit_vector.py @@ -1,4 +1,4 @@ -from hexrd.transforms.xfcapi import unit_vector +from hexrd.core.transforms.xfcapi import unit_vector import numpy as np diff --git a/tests/transforms/test_validate_angle_ranges_from_file.py b/tests/transforms/test_validate_angle_ranges_from_file.py index 289fb9318..49d42d51d 100644 --- a/tests/transforms/test_validate_angle_ranges_from_file.py +++ b/tests/transforms/test_validate_angle_ranges_from_file.py @@ -4,7 +4,7 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import validate_angle_ranges +from hexrd.core.transforms.new_capi.xf_new_capi import validate_angle_ranges def test_validate_angle_ranges_from_file(test_data_dir): diff --git a/tests/transforms/test_xy_to_gvec.py b/tests/transforms/test_xy_to_gvec.py index f831f7e27..54cddf1cf 100644 --- a/tests/transforms/test_xy_to_gvec.py +++ b/tests/transforms/test_xy_to_gvec.py @@ -9,7 +9,7 @@ from collections import namedtuple import pytest import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import xy_to_gvec +from hexrd.core.transforms.new_capi.xf_new_capi import xy_to_gvec Experiment = namedtuple( diff --git a/tests/transforms/test_xy_to_gvec_from_file.py b/tests/transforms/test_xy_to_gvec_from_file.py index 299b638ff..0e366e6ec 100644 --- a/tests/transforms/test_xy_to_gvec_from_file.py +++ b/tests/transforms/test_xy_to_gvec_from_file.py @@ -4,7 +4,7 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.xfcapi import xy_to_gvec +from hexrd.core.transforms.xfcapi import xy_to_gvec # from common import random_rotation_matrix, random_unit_vectors diff --git a/tests/unitcell/test_vec_math.py b/tests/unitcell/test_vec_math.py index 794ad4da5..f2a633a17 100644 --- a/tests/unitcell/test_vec_math.py +++ b/tests/unitcell/test_vec_math.py @@ -1,5 +1,5 @@ from pytest import fixture -from hexrd.material import Material, unitcell +from hexrd.core.material import Material, unitcell import numpy as np From 7abf6de04482c37a4aa5418ee8c2abd164bfc08c Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Mon, 20 Jan 2025 15:26:50 -0500 Subject: [PATCH 05/19] Add project test dependencies so they can be installed and run easily --- pyproject.toml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 6b2e323bd..4de0e1784 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,27 @@ +[project] +name = "hexrd" +dynamic = [ + "version", + "authors", + "description", + "license", + "dependencies", + "scripts", + "classifiers", + "readme", + "requires-python" +] + + [build-system] requires = ["setuptools", "wheel", "numpy<2.0", "setuptools_scm[toml]", "pybind11>=2.11.0"] [tool.black] line-length = 79 skip-string-normalization = true + +[project.optional-dependencies] +test = [ + "pytest", + "coloredlogs", +] \ No newline at end of file From a2b226f3d2ed2417e140f7f22dd4148b7cabffe4 Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Wed, 12 Feb 2025 15:13:59 -0500 Subject: [PATCH 06/19] These files need to be moved to resolve circular dependencies --- hexrd/core/fitting/calibration/instrument.py | 216 +++++++ hexrd/core/fitting/calibration/laue.py | 576 ++++++++++++++++++ hexrd/core/fitting/calibration/multigrain.py | 393 ++++++++++++ hexrd/core/fitting/calibration/powder.py | 380 ++++++++++++ .../core/fitting/calibration/structureless.py | 283 +++++++++ 5 files changed, 1848 insertions(+) create mode 100644 hexrd/core/fitting/calibration/instrument.py create mode 100644 hexrd/core/fitting/calibration/laue.py create mode 100644 hexrd/core/fitting/calibration/multigrain.py create mode 100644 hexrd/core/fitting/calibration/powder.py create mode 100644 hexrd/core/fitting/calibration/structureless.py diff --git a/hexrd/core/fitting/calibration/instrument.py b/hexrd/core/fitting/calibration/instrument.py new file mode 100644 index 000000000..6d4b5f111 --- /dev/null +++ b/hexrd/core/fitting/calibration/instrument.py @@ -0,0 +1,216 @@ +import logging +from typing import Optional + +import lmfit +import numpy as np + +from .lmfit_param_handling import add_engineering_constraints, create_instr_params, DEFAULT_EULER_CONVENTION, update_instrument_from_params, validate_params_list +from ....core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType + +logger = logging.getLogger() +logger.setLevel('INFO') + + +def _normalized_ssqr(resd): + return np.sum(resd * resd) / len(resd) + + +class InstrumentCalibrator: + def __init__(self, *args, engineering_constraints=None, + set_refinements_from_instrument_flags=True, + euler_convention=DEFAULT_EULER_CONVENTION, + relative_constraints_type=RelativeConstraintsType.none): + """ + Model for instrument calibration class as a function of + + Parameters + ---------- + *args : TYPE + DESCRIPTION. + + Returns + ------- + None. + + Notes + ----- + Flags are set on calibrators + """ + assert len(args) > 0, "must have at least one calibrator" + self.calibrators = args + for calib in self.calibrators: + assert calib.instr is self.instr, \ + "all calibrators must refer to the same instrument" + self._engineering_constraints = engineering_constraints + self._relative_constraints = create_relative_constraints( + relative_constraints_type, self.instr) + self.euler_convention = euler_convention + + self.params = self.make_lmfit_params() + if set_refinements_from_instrument_flags: + self.instr.set_calibration_flags_to_lmfit_params(self.params) + + self.fitter = lmfit.Minimizer(self.minimizer_function, + self.params, + nan_policy='omit') + + def make_lmfit_params(self): + params = create_instr_params( + self.instr, + euler_convention=self.euler_convention, + relative_constraints=self.relative_constraints, + ) + + for calibrator in self.calibrators: + # We pass the params to the calibrator so it can ensure it + # creates unique parameter names. The calibrator will keep + # track of the names it chooses itself. + params += calibrator.create_lmfit_params(params) + + # Perform validation on the params before proceeding + validate_params_list(params) + + params_dict = lmfit.Parameters() + params_dict.add_many(*params) + + add_engineering_constraints(params_dict, self.engineering_constraints) + return params_dict + + def update_all_from_params(self, params): + # Update instrument and material from the lmfit parameters + update_instrument_from_params( + self.instr, + params, + self.euler_convention, + self.relative_constraints, + ) + + for calibrator in self.calibrators: + calibrator.update_from_lmfit_params(params) + + @property + def instr(self): + return self.calibrators[0].instr + + @property + def tth_distortion(self): + return self.calibrators[0].tth_distortion + + @tth_distortion.setter + def tth_distortion(self, v): + for calibrator in self.calibrators: + calibrator.tth_distortion = v + + def minimizer_function(self, params): + self.update_all_from_params(params) + return self.residual() + + def residual(self): + return np.hstack([x.residual() for x in self.calibrators]) + + def minimize(self, method='least_squares', odict=None): + if odict is None: + odict = {} + + if method == 'least_squares': + # Set defaults to the odict, if they are missing + odict = { + "ftol": 1e-8, + "xtol": 1e-8, + "gtol": 1e-8, + "verbose": 2, + "max_nfev": 1000, + "x_scale": "jac", + "method": "trf", + "jac": "3-point", + **odict, + } + + result = self.fitter.least_squares(self.params, **odict) + else: + result = self.fitter.scalar_minimize(method=method, + params=self.params, + max_nfev=50000, + **odict) + + return result + + @property + def engineering_constraints(self): + return self._engineering_constraints + + @engineering_constraints.setter + def engineering_constraints(self, v): + if v == self._engineering_constraints: + return + + valid_settings = [ + None, + 'None', + 'TARDIS', + ] + if v not in valid_settings: + valid_str = ', '.join(map(valid_settings, str)) + msg = ( + f'Invalid engineering constraint "{v}". Valid constraints ' + f'are: "{valid_str}"' + ) + raise Exception(msg) + + self._engineering_constraints = v + self.params = self.make_lmfit_params() + + @property + def relative_constraints_type(self): + return self._relative_constraints.type + + @relative_constraints_type.setter + def relative_constraints_type(self, v: Optional[RelativeConstraintsType]): + v = v if v is not None else RelativeConstraintsType.none + + current = getattr(self, '_relative_constraints', None) + if current is None or current.type != v: + self.relative_constraints = create_relative_constraints( + v, self.instr) + + @property + def relative_constraints(self) -> RelativeConstraints: + return self._relative_constraints + + @relative_constraints.setter + def relative_constraints(self, v: RelativeConstraints): + self._relative_constraints = v + self.params = self.make_lmfit_params() + + def reset_lmfit_params(self): + self.params = self.make_lmfit_params() + + def reset_relative_constraint_params(self): + # Set them back to zero. + self.relative_constraints.reset() + + def run_calibration(self, odict): + resd0 = self.residual() + nrm_ssr_0 = _normalized_ssqr(resd0) + + result = self.minimize(odict=odict) + + resd1 = self.residual() + + nrm_ssr_1 = _normalized_ssqr(resd1) + + delta_r = 1. - nrm_ssr_1/nrm_ssr_0 + + if delta_r > 0: + logger.info('OPTIMIZATION SUCCESSFUL') + else: + logger.warning('no improvement in residual') + + logger.info('normalized initial ssr: %.4e' % nrm_ssr_0) + logger.info('normalized final ssr: %.4e' % nrm_ssr_1) + logger.info('change in resdiual: %.4e' % delta_r) + + self.params = result.params + self.update_all_from_params(self.params) + + return result diff --git a/hexrd/core/fitting/calibration/laue.py b/hexrd/core/fitting/calibration/laue.py new file mode 100644 index 000000000..d775119ba --- /dev/null +++ b/hexrd/core/fitting/calibration/laue.py @@ -0,0 +1,576 @@ +import copy +from typing import Optional + +import numpy as np +from scipy import ndimage +from scipy.integrate import nquad +from scipy.optimize import leastsq +from skimage import filters +from skimage.feature import blob_log + +# TODO: Resolve extra-workflow-dependency +from hexrd.hedm import xrdutil +from hexrd.core.constants import fwhm_to_sigma +from hexrd.core.instrument import switch_xray_source +from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler +from hexrd.core.transforms import xfcapi +from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl + +# TODO: Resolve extra-workflow-dependency +from ....powder.fitting.calibration.calibrator import Calibrator +from ....powder.fitting.calibration.lmfit_param_handling import create_grain_params, DEFAULT_EULER_CONVENTION, rename_to_avoid_collision + + +class LaueCalibrator(Calibrator): + type = 'laue' + + def __init__(self, instr, material, grain_params, default_refinements=None, + min_energy=5, max_energy=25, tth_distortion=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + xray_source: Optional[str] = None): + self.instr = instr + self.material = material + self.grain_params = grain_params + self.default_refinements = default_refinements + self.energy_cutoffs = [min_energy, max_energy] + self.euler_convention = euler_convention + self.xray_source = xray_source + + self.data_dict = None + if calibration_picks is not None: + self.calibration_picks = calibration_picks + + self._tth_distortion = tth_distortion + self._update_tth_distortion_panels() + + self.param_names = [] + + @property + def tth_distortion(self): + return self._tth_distortion + + @tth_distortion.setter + def tth_distortion(self, v): + self._tth_distortion = v + self._update_tth_distortion_panels() + + def _update_tth_distortion_panels(self): + # Make sure the panels in the tth distortion are the same + # as those on the instrument, so their beam vectors get modified + # accordingly. + if self._tth_distortion is None: + return + + self._tth_distortion = copy.deepcopy(self._tth_distortion) + for det_key, obj in self._tth_distortion.items(): + obj.panel = self.instr.detectors[det_key] + + def create_lmfit_params(self, current_params): + params = create_grain_params( + self.material.name, + self.grain_params_euler, + self.default_refinements, + ) + + # Ensure there are no name collisions + params, _ = rename_to_avoid_collision(params, current_params) + self.param_names = [x[0] for x in params] + + return params + + def update_from_lmfit_params(self, params_dict): + grain_params = [] + for i, name in enumerate(self.param_names): + grain_params.append(params_dict[name].value) + + self.grain_params_euler = np.asarray(grain_params) + + @property + def grain_params_euler(self): + # Grain parameters with orientation set using Euler angle convention + if self.euler_convention is None: + return self.grain_params + + grain_params = self.grain_params.copy() + rme = RotMatEuler(np.zeros(3), **self.euler_convention) + rme.rmat = xfcapi.make_rmat_of_expmap(grain_params[:3]) + grain_params[:3] = np.degrees(rme.angles) + return grain_params + + @grain_params_euler.setter + def grain_params_euler(self, v): + # Grain parameters with orientation set using Euler angle convention + grain_params = v.copy() + if self.euler_convention is not None: + rme = RotMatEuler(np.zeros(3,), **self.euler_convention) + rme.angles = np.radians(grain_params[:3]) + phi, n = angleAxisOfRotMat(rme.rmat) + grain_params[:3] = phi * n.flatten() + + self.grain_params = grain_params + + @property + def plane_data(self): + return self.material.planeData + + @property + def bmatx(self): + return self.plane_data.latVecOps['B'] + + @property + def energy_cutoffs(self): + return self._energy_cutoffs + + @energy_cutoffs.setter + def energy_cutoffs(self, x): + assert len(x) == 2, "input must have 2 elements" + assert x[1] > x[0], "first element must be < than second" + self._energy_cutoffs = x + self.plane_data.wavelength = self.energy_cutoffs[-1] + self.plane_data.exclusions = None + + @property + def calibration_picks(self): + # Convert this from our internal data dict format + picks = {} + for det_key in self.instr.detectors: + picks[det_key] = {} + + # find valid reflections and recast hkls to int + xys = self.data_dict['pick_xys'][det_key] + hkls = self.data_dict['hkls'][det_key] + + for hkl, xy in zip(hkls, xys): + picks[det_key][hkl_to_str(hkl)] = xy + + return picks + + @calibration_picks.setter + def calibration_picks(self, v): + # Convert this to our internal data dict format + data_dict = { + 'pick_xys': {}, + 'hkls': {}, + } + for det_key, det_picks in v.items(): + data_dict['hkls'][det_key] = [str_to_hkl(x) for x in det_picks] + data_dict['pick_xys'][det_key] = list(det_picks.values()) + + self.data_dict = data_dict + + def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., + npdiv=2, do_smoothing=True, smoothing_sigma=2, + use_blob_detection=True, blob_threshold=0.25, + fit_peaks=True, min_peak_int=1., fit_tth_tol=0.1): + """ + Parameters + ---------- + raw_img_dict : TYPE + DESCRIPTION. + tth_tol : TYPE, optional + DESCRIPTION. The default is 5.. + eta_tol : TYPE, optional + DESCRIPTION. The default is 5.. + npdiv : TYPE, optional + DESCRIPTION. The default is 2. + do_smoothing : TYPE, optional + DESCRIPTION. The default is True. + smoothing_sigma : TYPE, optional + DESCRIPTION. The default is 2. + use_blob_detection : TYPE, optional + DESCRIPTION. The default is True. + blob_threshold : TYPE, optional + DESCRIPTION. The default is 0.25. + fit_peaks : TYPE, optional + DESCRIPTION. The default is True. + + Returns + ------- + None. + + """ + + with switch_xray_source(self.instr, self.xray_source): + return self._autopick_points( + raw_img_dict=raw_img_dict, + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + do_smoothing=do_smoothing, + smoothing_sigma=smoothing_sigma, + use_blob_detection=use_blob_detection, + blob_threshold=blob_threshold, + fit_peaks=fit_peaks, + min_peak_int=min_peak_int, + fit_tth_tol=fit_tth_tol, + ) + + def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., + npdiv=2, do_smoothing=True, smoothing_sigma=2, + use_blob_detection=True, blob_threshold=0.25, + fit_peaks=True, min_peak_int=1., fit_tth_tol=0.1): + labelStructure = ndimage.generate_binary_structure(2, 1) + rmat_s = np.eye(3) # !!! forcing to identity + omega = 0. # !!! same ^^^ + + rmat_c = xfcapi.make_rmat_of_expmap(self.grain_params[:3]) + tvec_c = self.grain_params[3:6] + # vinv_s = self.grain_params[6:12] # !!!: patches don't take this yet + + # run simulation + # ???: could we get this from overlays? + laue_sim = self.instr.simulate_laue_pattern( + self.plane_data, + minEnergy=self.energy_cutoffs[0], + maxEnergy=self.energy_cutoffs[1], + rmat_s=None, grain_params=np.atleast_2d(self.grain_params), + ) + + # loop over detectors for results + refl_dict = dict.fromkeys(self.instr.detectors) + for det_key, det in self.instr.detectors.items(): + det_config = det.config_dict( + chi=self.instr.chi, + tvec=self.instr.tvec, + beam_vector=self.instr.beam_vector + ) + + xy_det, hkls, angles, dspacing, energy = laue_sim[det_key] + ''' + valid_xy = [] + valid_hkls = [] + valid_angs = [] + valid_energy = [] + ''' + # !!! not necessary to loop over grains since we can only handle 1 + # for gid in range(len(xy_det)): + gid = 0 + # find valid reflections + valid_refl = ~np.isnan(xy_det[gid][:, 0]) + valid_xy = xy_det[gid][valid_refl, :] + valid_hkls = hkls[gid][:, valid_refl] + valid_angs = angles[gid][valid_refl, :] + valid_energy = energy[gid][valid_refl] + + # make patches + refl_patches = xrdutil.make_reflection_patches( + det_config, + valid_angs, det.angularPixelSize(valid_xy), + rmat_c=rmat_c, tvec_c=tvec_c, + tth_tol=tth_tol, eta_tol=eta_tol, + npdiv=npdiv, quiet=True) + + reflInfoList = [] + img = raw_img_dict[det_key] + native_area = det.pixel_area + num_patches = len(valid_angs) + meas_xy = np.nan*np.ones((num_patches, 2)) + meas_angs = np.nan*np.ones((num_patches, 2)) + for iRefl, patch in enumerate(refl_patches): + # check for overrun + irow = patch[-1][0] + jcol = patch[-1][1] + if np.any([irow < 0, irow >= det.rows, + jcol < 0, jcol >= det.cols]): + continue + if not np.all( + det.clip_to_panel( + np.vstack([patch[1][0].flatten(), + patch[1][1].flatten()]).T + )[1] + ): + continue + # use nearest interpolation + spot_data = img[irow, jcol] * patch[3] * npdiv**2 / native_area + spot_data -= np.amin(spot_data) + patch_size = spot_data.shape + + sigmax = 0.25*np.min(spot_data.shape) * fwhm_to_sigma + + # optional gaussian smoothing + if do_smoothing: + spot_data = filters.gaussian(spot_data, smoothing_sigma) + + if use_blob_detection: + spot_data_scl = 2.*spot_data/np.max(spot_data) - 1. + + # Compute radii in the 3rd column. + blobs_log = blob_log(spot_data_scl, + min_sigma=2, + max_sigma=min(sigmax, 20), + num_sigma=10, + threshold=blob_threshold, + overlap=0.1) + numPeaks = len(blobs_log) + else: + labels, numPeaks = ndimage.label( + spot_data > np.percentile(spot_data, 99), + structure=labelStructure + ) + slabels = np.arange(1, numPeaks + 1) + tth_edges = patch[0][0][0, :] + eta_edges = patch[0][1][:, 0] + delta_tth = tth_edges[1] - tth_edges[0] + delta_eta = eta_edges[1] - eta_edges[0] + if numPeaks > 0: + peakId = iRefl + if use_blob_detection: + coms = blobs_log[:, :2] + else: + coms = np.array( + ndimage.center_of_mass( + spot_data, labels=labels, index=slabels + ) + ) + if numPeaks > 1: + # + center = np.r_[spot_data.shape]*0.5 + com_diff = coms - np.tile(center, (numPeaks, 1)) + closest_peak_idx = np.argmin( + np.sum(com_diff**2, axis=1) + ) + # + else: + closest_peak_idx = 0 + # + coms = coms[closest_peak_idx] + # + if fit_peaks: + sigm = 0.2*np.min(spot_data.shape) + if use_blob_detection: + sigm = min(blobs_log[closest_peak_idx, 2], sigm) + y0, x0 = coms.flatten() + ampl = float(spot_data[int(y0), int(x0)]) + # y0, x0 = 0.5*np.array(spot_data.shape) + # ampl = np.max(spot_data) + a_par = c_par = 0.5/float(sigm**2) + b_par = 0. + bgx = bgy = 0. + bkg = np.min(spot_data) + params = [ampl, + a_par, b_par, c_par, + x0, y0, bgx, bgy, bkg] + # + result = leastsq(gaussian_2d, params, args=(spot_data)) + # + fit_par = result[0] + # + coms = np.array([fit_par[5], fit_par[4]]) + ''' + print("%s, %d, (%.2f, %.2f), (%d, %d)" + % (det_key, iRefl, coms[0], coms[1], + patch_size[0], patch_size[1])) + ''' + row_cen = fit_tth_tol * patch_size[0] + col_cen = fit_tth_tol * patch_size[1] + if np.any( + [coms[0] < row_cen, + coms[0] >= patch_size[0] - row_cen, + coms[1] < col_cen, + coms[1] >= patch_size[1] - col_cen] + ): + continue + if (fit_par[0] < min_peak_int): + continue + + # intensities + spot_intensity, int_err = nquad( + gaussian_2d_int, + [[0., 2.*y0], [0., 2.*x0]], + args=fit_par) + com_angs = np.hstack([ + tth_edges[0] + (0.5 + coms[1])*delta_tth, + eta_edges[0] + (0.5 + coms[0])*delta_eta + ]) + + # grab intensities + if not fit_peaks: + if use_blob_detection: + spot_intensity = 10 + max_intensity = 10 + else: + spot_intensity = np.sum( + spot_data[labels == slabels[closest_peak_idx]] + ) + max_intensity = np.max( + spot_data[labels == slabels[closest_peak_idx]] + ) + else: + max_intensity = np.max(spot_data) + # need xy coords + # !!! forcing ome = 0. -- could be inconsistent with rmat_s + cmv = np.atleast_2d(np.hstack([com_angs, omega])) + gvec_c = xfcapi.angles_to_gvec( + cmv, + chi=self.instr.chi, + rmat_c=rmat_c, + beam_vec=self.instr.beam_vector) + new_xy = xfcapi.gvec_to_xy( + gvec_c, + det.rmat, rmat_s, rmat_c, + det.tvec, self.instr.tvec, tvec_c, + beam_vec=self.instr.beam_vector) + meas_xy[iRefl, :] = new_xy + if det.distortion is not None: + meas_xy[iRefl, :] = det.distortion.apply_inverse( + meas_xy[iRefl, :] + ) + meas_angs[iRefl, :] = com_angs + else: + peakId = -999 + # + spot_intensity = np.nan + max_intensity = np.nan + reflInfoList.append([peakId, valid_hkls[:, iRefl], + (spot_intensity, max_intensity), + valid_energy[iRefl], + valid_angs[iRefl, :], + meas_angs[iRefl, :], + meas_xy[iRefl, :]]) + reflInfo = np.array( + [tuple(i) for i in reflInfoList], + dtype=reflInfo_dtype) + refl_dict[det_key] = reflInfo + + # Convert to our data_dict format + data_dict = { + 'pick_xys': {}, + 'hkls': {}, + } + for det, det_picks in refl_dict.items(): + data_dict['pick_xys'].setdefault(det, []) + data_dict['hkls'].setdefault(det, []) + for entry in det_picks: + hkl = entry[1].astype(int).tolist() + cart = entry[6] + data_dict['hkls'][det].append(hkl) + data_dict['pick_xys'][det].append(cart) + + self.data_dict = data_dict + return data_dict + + def _evaluate(self): + data_dict = self.data_dict + + # grab reflection data from picks input + pick_hkls_dict = {} + pick_xys_dict = {} + for det_key in self.instr.detectors: + # find valid reflections and recast hkls to int + xys = np.asarray(data_dict['pick_xys'][det_key], dtype=float) + hkls = np.asarray(data_dict['hkls'][det_key], dtype=int) + + valid_idx = ~np.isnan(xys[:, 0]) + + # fill local dicts + pick_hkls_dict[det_key] = np.atleast_2d(hkls[valid_idx, :]).T + pick_xys_dict[det_key] = np.atleast_2d(xys[valid_idx, :]) + + return pick_hkls_dict, pick_xys_dict + + def residual(self): + with switch_xray_source(self.instr, self.xray_source): + return self._residual() + + def _residual(self): + # need this for laue obj + pick_hkls_dict, pick_xys_dict = self._evaluate() + + # munge energy cutoffs + energy_cutoffs = np.r_[0.5, 1.5] * np.asarray(self.energy_cutoffs) + + return sxcal_obj_func( + [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, + self.bmatx, energy_cutoffs + ) + + def model(self): + with switch_xray_source(self.instr, self.xray_source): + return self._model() + + def _model(self): + # need this for laue obj + pick_hkls_dict, pick_xys_dict = self._evaluate() + + return sxcal_obj_func( + [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, + self.bmatx, self.energy_cutoffs, sim_only=True + ) + + +# Objective function for Laue fitting +def sxcal_obj_func(grain_params, instr, meas_xy, hkls_idx, + bmat, energy_cutoffs, sim_only=False): + """ + Objective function for Laue-based fitting. + + + energy_cutoffs are [minEnergy, maxEnergy] where min/maxEnergy can be lists + + """ + # right now just stuck on the end and assumed + # to all be the same length... FIX THIS + calc_xy = {} + calc_ang = {} + for det_key, panel in instr.detectors.items(): + # Simulate Laue pattern: + # returns xy_det, hkls_in, angles, dspacing, energy + sim_results = panel.simulate_laue_pattern( + [hkls_idx[det_key], bmat], + minEnergy=energy_cutoffs[0], maxEnergy=energy_cutoffs[1], + grain_params=grain_params, + beam_vec=instr.beam_vector + ) + + calc_xy_tmp = sim_results[0][0] + + idx = ~np.isnan(calc_xy_tmp[:, 0]) + calc_xy[det_key] = calc_xy_tmp[idx, :] + + if sim_only: + # Grab angles too. We dont use them otherwise. + # FIXME: might need tth correction if there is a distortion. + calc_angs_tmp = sim_results[2][0] + calc_ang[det_key] = calc_angs_tmp[idx, :] + + # return values + if sim_only: + return {k: [calc_xy[k], calc_ang[k]] for k in calc_xy} + + meas_xy_all = np.vstack(list(meas_xy.values())) + calc_xy_all = np.vstack(list(calc_xy.values())) + + diff_vecs_xy = calc_xy_all - meas_xy_all + return diff_vecs_xy.flatten() + + +def gaussian_2d(p, data): + shape = data.shape + x, y = np.meshgrid(range(shape[1]), range(shape[0])) + func = p[0]*np.exp( + -(p[1]*(x-p[4])*(x-p[4]) + + p[2]*(x-p[4])*(y-p[5]) + + p[3]*(y-p[5])*(y-p[5])) + ) + p[6]*(x-p[4]) + p[7]*(y-p[5]) + p[8] + return func.flatten() - data.flatten() + + +def gaussian_2d_int(y, x, *p): + func = p[0]*np.exp( + -(p[1]*(x-p[4])*(x-p[4]) + + p[2]*(x-p[4])*(y-p[5]) + + p[3]*(y-p[5])*(y-p[5])) + ) + return func.flatten() + + +reflInfo_dtype = [ + ('iRefl', int), + ('hkl', (int, 3)), + ('intensity', (float, 2)), + ('energy', float), + ('predAngles', (float, 2)), + ('measAngles', (float, 2)), + ('measXY', (float, 2)), +] diff --git a/hexrd/core/fitting/calibration/multigrain.py b/hexrd/core/fitting/calibration/multigrain.py new file mode 100644 index 000000000..afa1ef198 --- /dev/null +++ b/hexrd/core/fitting/calibration/multigrain.py @@ -0,0 +1,393 @@ +import logging +import os + +import numpy as np +from scipy.optimize import leastsq, least_squares + +from hexrd.core import constants as cnst +from hexrd.core import matrixutil as mutil +from hexrd.core import rotations +from hexrd.core.transforms import xfcapi + +from .. import grains as grainutil + +logger = logging.getLogger() +logger.setLevel('INFO') + +# grains +grain_flags_DFLT = np.array( + [1, 1, 1, + 1, 0, 1, + 0, 0, 0, 0, 0, 0], + dtype=bool +) + +ext_eta_tol = np.radians(5.) # for HEDM cal, may make this a user param + + +def calibrate_instrument_from_sx( + instr, grain_params, bmat, xyo_det, hkls_idx, + param_flags=None, grain_flags=None, + ome_period=None, + xtol=cnst.sqrt_epsf, ftol=cnst.sqrt_epsf, + factor=10., sim_only=False, use_robust_lsq=False): + """ + arguments xyo_det, hkls_idx are DICTs over panels + + """ + grain_params = np.atleast_2d(grain_params) + ngrains = len(grain_params) + pnames = generate_parameter_names(instr, grain_params) + + # reset parameter flags for instrument as specified + if param_flags is None: + param_flags = instr.calibration_flags + else: + # will throw an AssertionError if wrong length + instr.calibration_flags = param_flags + + # re-map omegas if need be + if ome_period is not None: + for det_key in instr.detectors: + for ig in range(ngrains): + xyo_det[det_key][ig][:, 2] = rotations.mapAngle( + xyo_det[det_key][ig][:, 2], + ome_period + ) + + # first grab the instrument parameters + # 7 global + # 6*num_panels for the detectors + # num_panels*ndp in case of distortion + plist_full = instr.calibration_parameters + + # now handle grains + # reset parameter flags for grains as specified + if grain_flags is None: + grain_flags = np.tile(grain_flags_DFLT, ngrains) + + plist_full = np.concatenate( + [plist_full, np.hstack(grain_params)] + ) + plf_copy = np.copy(plist_full) + + # concatenate refinement flags + refine_flags = np.hstack([param_flags, grain_flags]) + plist_fit = plist_full[refine_flags] + fit_args = (plist_full, + param_flags, grain_flags, + instr, xyo_det, hkls_idx, + bmat, ome_period) + if sim_only: + return sxcal_obj_func( + plist_fit, plist_full, + param_flags, grain_flags, + instr, xyo_det, hkls_idx, + bmat, ome_period, + sim_only=True) + else: + logger.info("Set up to refine:") + for i in np.where(refine_flags)[0]: + logger.info("\t%s = %1.7e" % (pnames[i], plist_full[i])) + + # run optimization + if use_robust_lsq: + result = least_squares( + sxcal_obj_func, plist_fit, args=fit_args, + xtol=xtol, ftol=ftol, + loss='soft_l1', method='trf' + ) + x = result.x + resd = result.fun + mesg = result.message + ierr = result.status + else: + # do least squares problem + x, cov_x, infodict, mesg, ierr = leastsq( + sxcal_obj_func, plist_fit, args=fit_args, + factor=factor, xtol=xtol, ftol=ftol, + full_output=1 + ) + resd = infodict['fvec'] + if ierr not in [1, 2, 3, 4]: + raise RuntimeError(f"solution not found: {ierr=}") + else: + logger.info(f"optimization fininshed successfully with {ierr=}") + logger.info(mesg) + + # ??? output message handling? + fit_params = plist_full + fit_params[refine_flags] = x + + # run simulation with optimized results + sim_final = sxcal_obj_func( + x, plist_full, + param_flags, grain_flags, + instr, xyo_det, hkls_idx, + bmat, ome_period, + sim_only=True) + + # ??? reset instrument here? + instr.update_from_parameter_list(fit_params) + + # report final + logger.info("Optimization Reults:") + for i in np.where(refine_flags)[0]: + logger.info("\t%s = %1.7e --> %1.7e" + % (pnames[i], plf_copy[i], fit_params[i])) + + return fit_params, resd, sim_final + + +def generate_parameter_names(instr, grain_params): + pnames = [ + '{:>24s}'.format('beam energy'), + '{:>24s}'.format('beam azimuth'), + '{:>24s}'.format('beam polar'), + '{:>24s}'.format('chi'), + '{:>24s}'.format('tvec_s[0]'), + '{:>24s}'.format('tvec_s[1]'), + '{:>24s}'.format('tvec_s[2]'), + ] + + for det_key, panel in instr.detectors.items(): + pnames += [ + '{:>24s}'.format('%s tilt[0]' % det_key), + '{:>24s}'.format('%s tilt[1]' % det_key), + '{:>24s}'.format('%s tilt[2]' % det_key), + '{:>24s}'.format('%s tvec[0]' % det_key), + '{:>24s}'.format('%s tvec[1]' % det_key), + '{:>24s}'.format('%s tvec[2]' % det_key), + ] + # now add distortion if there + if panel.distortion is not None: + for j in range(len(panel.distortion.params)): + pnames.append( + '{:>24s}'.format('%s dparam[%d]' % (det_key, j)) + ) + + grain_params = np.atleast_2d(grain_params) + for ig, grain in enumerate(grain_params): + pnames += [ + '{:>24s}'.format('grain %d xi[0]' % ig), + '{:>24s}'.format('grain %d xi[1]' % ig), + '{:>24s}'.format('grain %d xi[2]' % ig), + '{:>24s}'.format('grain %d tvec_c[0]' % ig), + '{:>24s}'.format('grain %d tvec_c[1]' % ig), + '{:>24s}'.format('grain %d tvec_c[2]' % ig), + '{:>24s}'.format('grain %d vinv_s[0]' % ig), + '{:>24s}'.format('grain %d vinv_s[1]' % ig), + '{:>24s}'.format('grain %d vinv_s[2]' % ig), + '{:>24s}'.format('grain %d vinv_s[3]' % ig), + '{:>24s}'.format('grain %d vinv_s[4]' % ig), + '{:>24s}'.format('grain %d vinv_s[5]' % ig) + ] + + return pnames + + +def sxcal_obj_func(plist_fit, plist_full, + param_flags, grain_flags, + instr, xyo_det, hkls_idx, + bmat, ome_period, + sim_only=False, return_value_flag=None): + """ + """ + npi = len(instr.calibration_parameters) + NP_GRN = 12 + + # stack flags and force bool repr + refine_flags = np.array( + np.hstack([param_flags, grain_flags]), + dtype=bool) + + # fill out full parameter list + # !!! no scaling for now + plist_full[refine_flags] = plist_fit + + # instrument update + instr.update_from_parameter_list(plist_full) + + # assign some useful params + wavelength = instr.beam_wavelength + bvec = instr.beam_vector + chi = instr.chi + tvec_s = instr.tvec + + # right now just stuck on the end and assumed + # to all be the same length... FIX THIS + xy_unwarped = {} + meas_omes = {} + calc_omes = {} + calc_xy = {} + + # grain params + grain_params = plist_full[npi:] + if np.mod(len(grain_params), NP_GRN) != 0: + raise RuntimeError("parameter list length is not consistent") + ngrains = len(grain_params) // NP_GRN + grain_params = grain_params.reshape((ngrains, NP_GRN)) + + # loop over panels + npts_tot = 0 + for det_key, panel in instr.detectors.items(): + rmat_d = panel.rmat + tvec_d = panel.tvec + + xy_unwarped[det_key] = [] + meas_omes[det_key] = [] + calc_omes[det_key] = [] + calc_xy[det_key] = [] + + for ig, grain in enumerate(grain_params): + ghkls = hkls_idx[det_key][ig] + xyo = xyo_det[det_key][ig] + + npts_tot += len(xyo) + + xy_unwarped[det_key].append(xyo[:, :2]) + meas_omes[det_key].append(xyo[:, 2]) + if panel.distortion is not None: # do unwarping + xy_unwarped[det_key][ig] = panel.distortion.apply( + xy_unwarped[det_key][ig] + ) + + # transform G-vectors: + # 1) convert inv. stretch tensor from MV notation in to 3x3 + # 2) take reciprocal lattice vectors from CRYSTAL to SAMPLE frame + # 3) apply stretch tensor + # 4) normalize reciprocal lattice vectors in SAMPLE frame + # 5) transform unit reciprocal lattice vetors back to CRYSAL frame + rmat_c = xfcapi.make_rmat_of_expmap(grain[:3]) + tvec_c = grain[3:6] + vinv_s = grain[6:] + gvec_c = np.dot(bmat, ghkls.T) + vmat_s = mutil.vecMVToSymm(vinv_s) + ghat_s = mutil.unitVector(np.dot(vmat_s, np.dot(rmat_c, gvec_c))) + ghat_c = np.dot(rmat_c.T, ghat_s) + + match_omes, calc_omes_tmp = grainutil.matchOmegas( + xyo, ghkls.T, + chi, rmat_c, bmat, wavelength, + vInv=vinv_s, + beamVec=bvec, + omePeriod=ome_period) + + rmat_s_arr = xfcapi.make_sample_rmat( + chi, np.ascontiguousarray(calc_omes_tmp) + ) + calc_xy_tmp = xfcapi.gvec_to_xy( + ghat_c.T, rmat_d, rmat_s_arr, rmat_c, + tvec_d, tvec_s, tvec_c + ) + if np.any(np.isnan(calc_xy_tmp)): + logger.warning("infeasible parameters: may want to scale back " + "finite difference step size") + + calc_omes[det_key].append(calc_omes_tmp) + calc_xy[det_key].append(calc_xy_tmp) + + # return values + if sim_only: + retval = {} + for det_key in calc_xy.keys(): + # ??? calc_xy is always 2-d + retval[det_key] = [] + for ig in range(ngrains): + retval[det_key].append( + np.vstack( + [calc_xy[det_key][ig].T, calc_omes[det_key][ig]] + ).T + ) + else: + meas_xy_all = [] + calc_xy_all = [] + meas_omes_all = [] + calc_omes_all = [] + for det_key in xy_unwarped.keys(): + meas_xy_all.append(np.vstack(xy_unwarped[det_key])) + calc_xy_all.append(np.vstack(calc_xy[det_key])) + meas_omes_all.append(np.hstack(meas_omes[det_key])) + calc_omes_all.append(np.hstack(calc_omes[det_key])) + meas_xy_all = np.vstack(meas_xy_all) + calc_xy_all = np.vstack(calc_xy_all) + meas_omes_all = np.hstack(meas_omes_all) + calc_omes_all = np.hstack(calc_omes_all) + + diff_vecs_xy = calc_xy_all - meas_xy_all + diff_ome = rotations.angularDifference(calc_omes_all, meas_omes_all) + retval = np.hstack( + [diff_vecs_xy, + diff_ome.reshape(npts_tot, 1)] + ).flatten() + if return_value_flag == 1: + retval = sum(abs(retval)) + elif return_value_flag == 2: + denom = npts_tot - len(plist_fit) - 1. + if denom != 0: + nu_fac = 1. / denom + else: + nu_fac = 1. + nu_fac = 1 / (npts_tot - len(plist_fit) - 1.) + retval = nu_fac * sum(retval**2) + return retval + + +def parse_reflection_tables(cfg, instr, grain_ids, refit_idx=None): + """ + make spot dictionaries + """ + hkls = {} + xyo_det = {} + idx_0 = {} + for det_key, panel in instr.detectors.items(): + hkls[det_key] = [] + xyo_det[det_key] = [] + idx_0[det_key] = [] + for ig, grain_id in enumerate(grain_ids): + spots_filename = os.path.join( + cfg.analysis_dir, os.path.join( + det_key, 'spots_%05d.out' % grain_id + ) + ) + + # load pull_spots output table + gtable = np.loadtxt(spots_filename, ndmin=2) + if len(gtable) == 0: + gtable = np.nan*np.ones((1, 17)) + + # apply conditions for accepting valid data + valid_reflections = gtable[:, 0] >= 0 # is indexed + not_saturated = gtable[:, 6] < panel.saturation_level + # throw away extremem etas + p90 = rotations.angularDifference(gtable[:, 8], cnst.piby2) + m90 = rotations.angularDifference(gtable[:, 8], -cnst.piby2) + accept_etas = np.logical_or(p90 > ext_eta_tol, + m90 > ext_eta_tol) + logger.info(f"panel '{det_key}', grain {grain_id}") + logger.info(f"{sum(valid_reflections)} of {len(gtable)} " + "reflections are indexed") + logger.info(f"{sum(not_saturated)} of {sum(valid_reflections)}" + " valid reflections be are below" + + f" saturation threshold of {panel.saturation_level}") + logger.info(f"{sum(accept_etas)} of {len(gtable)}" + " reflections be are greater than " + + f" {np.degrees(ext_eta_tol)} from the rotation axis") + + # valid reflections index + if refit_idx is None: + idx = np.logical_and( + valid_reflections, + np.logical_and(not_saturated, accept_etas) + ) + idx_0[det_key].append(idx) + else: + idx = refit_idx[det_key][ig] + idx_0[det_key].append(idx) + logger.info(f"input reflection specify {sum(idx)} of " + f"{len(gtable)} total valid reflections") + + hkls[det_key].append(gtable[idx, 2:5]) + meas_omes = gtable[idx, 12].reshape(sum(idx), 1) + xyo_det[det_key].append(np.hstack([gtable[idx, -2:], meas_omes])) + return hkls, xyo_det, idx_0 diff --git a/hexrd/core/fitting/calibration/powder.py b/hexrd/core/fitting/calibration/powder.py new file mode 100644 index 000000000..dc25f32f3 --- /dev/null +++ b/hexrd/core/fitting/calibration/powder.py @@ -0,0 +1,380 @@ +import copy +from typing import Optional + +import numpy as np + +from hexrd.core import matrixutil as mutil +from hexrd.core.instrument import calc_angles_from_beam_vec, switch_xray_source +from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl + +from .calibrator import Calibrator +from .lmfit_param_handling import create_material_params, update_material_from_params + +nfields_powder_data = 8 + + +class PowderCalibrator(Calibrator): + type = 'powder' + + def __init__(self, instr, material, img_dict, default_refinements=None, + tth_tol=None, eta_tol=0.25, + fwhm_estimate=None, min_pk_sep=1e-3, min_ampl=0., + pktype='pvoigt', bgtype='linear', + tth_distortion=None, calibration_picks=None, + xray_source: Optional[str] = None): + assert list(instr.detectors.keys()) == list(img_dict.keys()), \ + "instrument and image dict must have the same keys" + + self.instr = instr + self.material = material + self.img_dict = img_dict + self.default_refinements = default_refinements + self.xray_source = xray_source + + # for polar interpolation + if tth_tol is not None: + # This modifies the width on the plane data. Default to whatever + # is on the plane data, so only set it if it is not None. + self.tth_tol = tth_tol + + self.eta_tol = eta_tol + self.fwhm_estimate = fwhm_estimate + self.min_pk_sep = min_pk_sep + self.min_ampl = min_ampl + self.pktype = pktype + self.bgtype = bgtype + + self._tth_distortion = tth_distortion + self._update_tth_distortion_panels() + + self.plane_data.wavelength = instr.xrs_beam_energy(xray_source) + + self.param_names = [] + + self.data_dict = None + if calibration_picks is not None: + # container for calibration data + self.calibration_picks = calibration_picks + + @property + def tth_distortion(self): + return self._tth_distortion + + @tth_distortion.setter + def tth_distortion(self, v): + self._tth_distortion = v + self._update_tth_distortion_panels() + + def _update_tth_distortion_panels(self): + # Make sure the panels in the tth distortion are the same + # as those on the instrument, so their beam vectors get modified + # accordingly. + if self._tth_distortion is None: + return + + self._tth_distortion = copy.deepcopy(self._tth_distortion) + for det_key, obj in self._tth_distortion.items(): + obj.panel = self.instr.detectors[det_key] + + def create_lmfit_params(self, current_params): + # There shouldn't be more than one calibrator for a given material, so + # just assume we have a unique name... + params = create_material_params(self.material, + self.default_refinements) + + # If multiple powder calibrators were used for the same material (such + # as in 2XRS), then don't add params again. + param_names = [x[0] for x in current_params] + params = [x for x in params if x[0] not in param_names] + + self.param_names = [x[0] for x in params] + return params + + def update_from_lmfit_params(self, params_dict): + if self.param_names: + update_material_from_params(params_dict, self.material) + + @property + def plane_data(self): + return self.material.planeData + + @property + def tth_tol(self): + tth_tol = self.plane_data.tThWidth + return np.degrees(tth_tol) if tth_tol is not None else tth_tol + + @tth_tol.setter + def tth_tol(self, x): + assert np.isscalar(x), "tth_tol must be a scalar value" + self.plane_data.tThWidth = np.radians(self.tth_tol) + + @property + def spectrum_kwargs(self): + return dict(pktype=self.pktype, + bgtype=self.bgtype, + fwhm_init=self.fwhm_estimate, + min_ampl=self.min_ampl, + min_pk_sep=self.min_pk_sep) + + @property + def calibration_picks(self): + # Convert this from our internal data dict format + picks = {} + for det_key, data in self.data_dict.items(): + picks[det_key] = {} + for ringset in data: + for row in ringset: + # Rows 3, 4, and 5 are the hkl + hkl_str = hkl_to_str(row[3:6].astype(int)) + picks[det_key].setdefault(hkl_str, []) + # Rows 0 and 1 are the xy coordinates + picks[det_key][hkl_str].append(row[:2].tolist()) + + return picks + + @calibration_picks.setter + def calibration_picks(self, v): + # Convert this to our internal data dict format + data_dict = {} + for det_key, hkl_picks in v.items(): + data_dict[det_key] = [] + for hkl_str, picks in hkl_picks.items(): + if len(picks) == 0: + # Just skip over it + continue + + data = np.zeros((len(picks), 8), dtype=np.float64) + # Rows 0 and 1 are the xy coordinates + data[:, :2] = np.asarray(picks) + # Rows 3, 4, and 5 are the hkl + data[:, 3:6] = str_to_hkl(hkl_str) + data_dict[det_key].append(data) + + self.data_dict = data_dict + + def autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): + """ + return the RHS for the instrument DOF and image dict + + The format is a dict over detectors, each containing + + [index over ring sets] + [index over azimuthal patch] + [xy_meas, tth_meas, hkl, dsp_ref, eta_ref] + + FIXME: can not yet handle tth ranges with multiple peaks! + """ + # If needed, change the x-ray source before proceeding. + # This does nothing for single x-ray sources. + with switch_xray_source(self.instr, self.xray_source): + return self._autopick_points(fit_tth_tol, int_cutoff) + + def _autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): + # ideal tth + dsp_ideal = np.atleast_1d(self.plane_data.getPlaneSpacings()) + hkls_ref = self.plane_data.hkls.T + dsp0 = [] + hkls = [] + for idx in self.plane_data.getMergedRanges()[0]: + if len(idx) > 1: + eqv, uidx = mutil.findDuplicateVectors( + np.atleast_2d(dsp_ideal[idx]) + ) + if len(uidx) < len(idx): + # if here, at least one peak is degenerate + uidx = np.asarray(idx)[uidx] + else: + uidx = np.asarray(idx) + else: + uidx = np.asarray(idx) + dsp0.append(dsp_ideal[uidx]) + hkls.append(hkls_ref[uidx]) + + # Perform interpolation and fitting + fitting_kwargs = { + 'int_cutoff': int_cutoff, + 'fit_tth_tol': fit_tth_tol, + 'spectrum_kwargs': self.spectrum_kwargs, + } + kwargs = { + 'plane_data': self.plane_data, + 'imgser_dict': self.img_dict, + 'tth_tol': self.tth_tol, + 'eta_tol': self.eta_tol, + 'npdiv': 2, + 'collapse_eta': True, + 'collapse_tth': False, + 'do_interpolation': True, + 'do_fitting': True, + 'fitting_kwargs': fitting_kwargs, + 'tth_distortion': self.tth_distortion, + } + powder_lines = self.instr.extract_line_positions(**kwargs) + + # Now loop over the ringsets and convert to the calibration format + rhs = {} + for det_key, panel in self.instr.detectors.items(): + rhs[det_key] = [] + for i_ring, ringset in enumerate(powder_lines[det_key]): + this_dsp0 = dsp0[i_ring] + this_hkl = hkls[i_ring] + npeaks = len(this_dsp0) + + ret = [] + for angs, intensities, tth_meas in ringset: + if len(intensities) == 0: + continue + + # We only run this on one image. Grab that one. + tth_meas = tth_meas[0] + if tth_meas is None: + continue + + # Convert to radians + tth_meas = np.radians(tth_meas) + + # reference eta + eta_ref_tile = np.tile(angs[1], npeaks) + + # push back through mapping to cartesian (x, y) + xy_meas = panel.angles_to_cart( + np.vstack([tth_meas, eta_ref_tile]).T, + tvec_s=self.instr.tvec, + apply_distortion=True, + ) + + # cat results + output = np.hstack([ + xy_meas, + tth_meas.reshape(npeaks, 1), + this_hkl, + this_dsp0.reshape(npeaks, 1), + eta_ref_tile.reshape(npeaks, 1), + ]) + ret.append(output) + + if not ret: + ret.append(np.empty((0, nfields_powder_data))) + + rhs[det_key].append(np.vstack(ret)) + + self.data_dict = rhs + return rhs + + def _evaluate(self, output='residual'): + """ + Evaluate the powder diffraction model. + + Parameters + ---------- + output : TYPE, optional + DESCRIPTION. The default is 'residual'. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + # In case the beam energy was modified, ensure it is updated + # on the plane data as well. + self.plane_data.wavelength = self.instr.beam_energy + + # need this for dsp + bmat = self.plane_data.latVecOps['B'] + wlen = self.instr.beam_wavelength + + # build residual + retval = np.array([], dtype=float) + for det_key, panel in self.instr.detectors.items(): + if len(self.data_dict[det_key]) == 0: + continue + else: + # recast as array + pdata = np.vstack(self.data_dict[det_key]) + + """ + Here is the strategy: + 1. remap the feature points from raw cartesian to + (tth, eta) under the current mapping + 2. use the lattice and hkls to calculate the ideal tth0 + 3. push the (tth0, eta) values back through the mapping to + raw cartesian coordinates + 4. build residual on the measured and recalculated (x, y) + """ + # push measured (x, y) ring points through current mapping + # to (tth, eta) + meas_xy = pdata[:, :2] + updated_angles, _ = panel.cart_to_angles( + meas_xy, + tvec_s=self.instr.tvec, + apply_distortion=True + ) + + # derive ideal tth positions from additional ring point info + hkls = pdata[:, 3:6] + gvecs = np.dot(hkls, bmat.T) + dsp0 = 1./np.sqrt(np.sum(gvecs*gvecs, axis=1)) + + # updated reference Bragg angles + tth0 = 2.*np.arcsin(0.5*wlen/dsp0) + + # !!! get eta from mapped markers rather than ref + # eta0 = pdata[:, -1] + eta0 = updated_angles[:, 1] + + # apply tth distortion + if self.tth_distortion is not None: + # !!! sd has ref to detector so is updated + sd = self.tth_distortion[det_key] + tmp = sd.apply(meas_xy, return_nominal=False) + corr_angs = tmp + np.vstack([tth0, np.zeros_like(tth0)]).T + tth0, eta0 = corr_angs.T + + # map updated (tth0, eta0) back to cartesian coordinates + tth_eta = np.vstack([tth0, eta0]).T + + # output + if output == 'residual': + # retval = np.append( + # retval, + # meas_xy.flatten() - calc_xy.flatten() + # ) + retval = np.append( + retval, + updated_angles[:, 0].flatten() - tth0.flatten() + ) + elif output == 'model': + calc_xy = panel.angles_to_cart( + tth_eta, + tvec_s=self.instr.tvec, + apply_distortion=True + ) + retval = np.append( + retval, + calc_xy.flatten() + ) + else: + raise RuntimeError( + "unrecognized output flag '%s'" + % output + ) + + return retval + + def residual(self): + # If needed, change the x-ray source before proceeding. + # This does nothing for single x-ray sources. + with switch_xray_source(self.instr, self.xray_source): + return self._evaluate(output='residual') + + def model(self): + # If needed, change the x-ray source before proceeding. + # This does nothing for single x-ray sources. + with switch_xray_source(self.instr, self.xray_source): + return self._evaluate(output='model') diff --git a/hexrd/core/fitting/calibration/structureless.py b/hexrd/core/fitting/calibration/structureless.py new file mode 100644 index 000000000..4771f976a --- /dev/null +++ b/hexrd/core/fitting/calibration/structureless.py @@ -0,0 +1,283 @@ +import copy +from typing import Optional + +import lmfit +import numpy as np + +from hexrd.core.instrument import switch_xray_source + +from .lmfit_param_handling import add_engineering_constraints, create_instr_params, create_tth_parameters, DEFAULT_EULER_CONVENTION, tth_parameter_prefixes, update_instrument_from_params +from ....core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType + + +class StructurelessCalibrator: + """ + this class implements the equivalent of the + powder calibrator but without constraining + the optimization to a structure. in this + implementation, the location of the constant + two theta line that a set of points lie on + is also an optimization parameter. + + unlike the previous implementations, this routine + is based on the lmfit module to implement the + more complicated constraints for the TARDIS box + + if TARDIS_constraints are set to True, then the following + additional linear constraint is added to the calibration + + 22.83 mm <= |IMAGE-PLATE-2 tvec[1]| + |IMAGE-PLATE-2 tvec[1]| <= 23.43 mm + + """ + def __init__(self, + instr, + data, + tth_distortion=None, + engineering_constraints=None, + relative_constraints_type=RelativeConstraintsType.none, + euler_convention=DEFAULT_EULER_CONVENTION): + + self._instr = instr + self._data = data + self._tth_distortion = tth_distortion + self._engineering_constraints = engineering_constraints + self._relative_constraints = create_relative_constraints( + relative_constraints_type, self.instr) + self.euler_convention = euler_convention + self._update_tth_distortion_panels() + self.make_lmfit_params() + self.set_minimizer() + + def make_lmfit_params(self): + params = [] + params += create_instr_params( + self.instr, + self.euler_convention, + self.relative_constraints, + ) + params += create_tth_parameters(self.instr, self.meas_angles) + + params_dict = lmfit.Parameters() + params_dict.add_many(*params) + + add_engineering_constraints(params_dict, self.engineering_constraints) + self.params = params_dict + return params_dict + + def calc_residual(self, params): + update_instrument_from_params( + self.instr, + params, + self.euler_convention, + self.relative_constraints, + ) + + # Store these in variables so they are only computed once. + meas_angles = self.meas_angles + tth_correction = self.tth_correction + + residual = [] + prefixes = tth_parameter_prefixes(self.instr) + for xray_source in self.data: + prefix = prefixes[xray_source] + for ii, (rng, corr_rng) in enumerate(zip( + meas_angles[xray_source], + tth_correction[xray_source] + )): + for det_name, panel in self.instr.detectors.items(): + if rng[det_name] is None or rng[det_name].size == 0: + continue + + tth_rng = params[f'{prefix}{ii}'].value + tth_updated = np.degrees(rng[det_name][:, 0]) + delta_tth = tth_updated - tth_rng + if corr_rng[det_name] is not None: + delta_tth -= np.degrees(corr_rng[det_name]) + residual.append(delta_tth) + + return np.hstack(residual) + + def set_minimizer(self): + self.fitter = lmfit.Minimizer(self.calc_residual, + self.params, + nan_policy='omit') + + def run_calibration(self, + method='least_squares', + odict=None): + """ + odict is the options dictionary + """ + if odict is None: + odict = {} + + if method == 'least_squares': + fdict = { + "ftol": 1e-8, + "xtol": 1e-8, + "gtol": 1e-8, + "verbose": 2, + "max_nfev": 1000, + "x_scale": "jac", + "method": "trf", + "jac": "3-point", + } + fdict.update(odict) + + self.res = self.fitter.least_squares(self.params, + **fdict) + else: + fdict = odict + self.res = self.fitter.scalar_minimize(method=method, + params=self.params, + max_nfev=50000, + **fdict) + + self.params = self.res.params + # res = self.fitter.least_squares(**fdict) + return self.res + + @property + def tth_distortion(self): + return self._tth_distortion + + @tth_distortion.setter + def tth_distortion(self, v): + self._tth_distortion = v + self._update_tth_distortion_panels() + # No need to update lmfit parameters + + def _update_tth_distortion_panels(self): + # Make sure the panels in the tth distortion are the same + # as those on the instrument, so their beam vectors get modified + # accordingly. + if self._tth_distortion is None: + return + + self._tth_distortion = copy.deepcopy(self._tth_distortion) + for det_key, obj in self.tth_distortion.items(): + obj.panel = self.instr.detectors[det_key] + + @property + def relative_constraints_type(self): + return self._relative_constraints.type + + @relative_constraints_type.setter + def relative_constraints_type(self, v: Optional[RelativeConstraintsType]): + v = v if v is not None else RelativeConstraintsType.none + + current = getattr(self, '_relative_constraints', None) + if current is None or current.type != v: + self.relative_constraints = create_relative_constraints( + v, self.instr) + + @property + def relative_constraints(self) -> RelativeConstraints: + return self._relative_constraints + + @relative_constraints.setter + def relative_constraints(self, v: RelativeConstraints): + self._relative_constraints = v + self.params = self.make_lmfit_params() + + @property + def engineering_constraints(self): + return self._engineering_constraints + + @engineering_constraints.setter + def engineering_constraints(self, v): + if v == self._engineering_constraints: + return + + valid_settings = [ + None, + 'None', + 'TARDIS', + ] + if v not in valid_settings: + valid_str = ', '.join(map(valid_settings, str)) + msg = ( + f'Invalid engineering constraint "{v}". Valid constraints ' + f'are: "{valid_str}"' + ) + raise Exception(msg) + + self._engineering_constraints = v + self.make_lmfit_params() + + @property + def instr(self): + return self._instr + + @instr.setter + def instr(self, ins): + self._instr = ins + self.make_lmfit_params() + self._update_tth_distortion_panels() + + @property + def data(self): + return self._data + + @data.setter + def data(self, dat): + self._data = dat + self.make_lmfit_params() + + @property + def residual(self): + return self.calc_residual(self.params) + + @property + def meas_angles(self) -> dict: + """ + this property will return a dictionary + of angles based on current instrument + parameters. + """ + angles_dict = {} + for xray_source, rings in self.data.items(): + with switch_xray_source(self.instr, xray_source): + ang_list = [] + for rng in rings: + ang_dict = dict.fromkeys(self.instr.detectors) + for det_name, meas_xy in rng.items(): + + panel = self.instr.detectors[det_name] + angles, _ = panel.cart_to_angles( + meas_xy, + tvec_s=self.instr.tvec, + apply_distortion=True) + ang_dict[det_name] = angles + ang_list.append(ang_dict) + + angles_dict[xray_source] = ang_list + + return angles_dict + + @property + def tth_correction(self) -> dict: + ret = {} + for xray_source, rings in self.data.items(): + with switch_xray_source(self.instr, xray_source): + corr_list = [] + for rng in rings: + corr_dict = dict.fromkeys(self.instr.detectors) + if self.tth_distortion is not None: + for det_name, meas_xy in rng.items(): + # !!! sd has ref to detector so is updated + sd = self.tth_distortion[det_name] + tth_corr = sd.apply( + meas_xy, + return_nominal=False, + )[:, 0] + corr_dict[det_name] = tth_corr + corr_list.append(corr_dict) + + ret[xray_source] = corr_list + + return ret + + @property + def two_XRS(self): + return self.instr.has_multi_beam From 225f1f39dc330a99ea2eec51e7cfc5b129355cc4 Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Mon, 20 Jan 2025 15:27:19 -0500 Subject: [PATCH 07/19] Create module aliasing based on the generated file map Forgot to add the "hexrd." when I moved this. Replace "/" with "." in C extension path Fix unpickling error in non-Windows env Change to a human readable file_table instead of a pickle object. Also addresses the unpickling issue across platforms. mypy doesn't like the reassignment. hand patches for the preprocess file updates. + slightly more robust module_map Had to add to update module_map to support maintaining all imports. put a TYPE_CHECKING guard around this, so we don't have a circular import These files need to be copied across workflows as well to preserve imports and resolve circular dependencies. Fix the import mapping to allow for unpickling use cases. switch these things to absolute import. Since we are changing the actual depth of these modules, it can cause issues due to the following: * We need to register these modules with the old names as well so that comparisons work correctly * Importlib seems to use the key in sys.modules to determine if a relative import goes "outside of the module" Since we changed things like `hexrd.thing` -> `hexrd.core.thing`. If you think the module is @ `hexrd.thing` and you import `. . .other` it will look like you are escaping the package. (note, number of dots and effective depth depends on what `thing` actually is) Move this __init__ + fix the file_table a bit --- file_table.tsv | 532 ++++++++++++++++++ fix-file-map.py | 23 + hexrd/__init__.py | 16 + hexrd/core/config/root.py | 4 +- hexrd/core/extensions/__init__.py | 3 + hexrd/core/fitting/__init__.py | 2 +- hexrd/core/fitting/calibration/__init__.py | 18 +- hexrd/core/fitting/calibration/instrument.py | 2 +- hexrd/core/fitting/calibration/laue.py | 4 +- .../calibration/lmfit_param_handling.py | 1 + .../core/fitting/calibration/structureless.py | 2 +- hexrd/hed/instrument/hedm_instrument.py | 6 +- hexrd/hedm/config/__init__.py | 5 + hexrd/hedm/config/findorientations.py | 2 +- hexrd/hedm/config/fitgrains.py | 2 +- hexrd/hedm/config/instrument.py | 2 +- hexrd/hedm/config/root.py | 4 +- hexrd/hedm/instrument/hedm_instrument.py | 6 +- .../preprocess/argument_classes_factory.py | 6 +- hexrd/laue/instrument/hedm_instrument.py | 6 +- hexrd/module_map.py | 152 +++++ hexrd/powder/instrument/hedm_instrument.py | 6 +- setup.py | 2 +- tests/test_preprocess.py | 2 +- 24 files changed, 775 insertions(+), 33 deletions(-) create mode 100644 file_table.tsv create mode 100644 fix-file-map.py create mode 100644 hexrd/module_map.py diff --git a/file_table.tsv b/file_table.tsv new file mode 100644 index 000000000..c9da106c7 --- /dev/null +++ b/file_table.tsv @@ -0,0 +1,532 @@ +setup.py setup.py +docs/source/conf.py docs/source/conf.py +scripts/install/install_build_dependencies.py scripts/install/install_build_dependencies.py +hexrd/valunits.py hexrd/core/valunits.py +hexrd/fitgrains.py hexrd/hedm/fitgrains.py +hexrd/imageutil.py hexrd/core/imageutil.py +hexrd/rotations.py hexrd/core/rotations.py +hexrd/findorientations.py hexrd/hedm/findorientations.py +hexrd/deprecation.py hexrd/core/deprecation.py +hexrd/indexer.py hexrd/hedm/indexer.py +hexrd/matrixutil.py hexrd/core/matrixutil.py +hexrd/constants.py hexrd/core/constants.py +hexrd/gridutil.py hexrd/core/gridutil.py +hexrd/resources/__init__.py hexrd/core/resources/__init__.py +hexrd/resources/detector_templates/__init__.py hexrd/core/resources/detector_templates/__init__.py +hexrd/resources/instrument_templates/__init__.py hexrd/core/resources/instrument_templates/__init__.py +hexrd/cli/help.py hexrd/hedm/cli/help.py +hexrd/cli/find_orientations.py hexrd/hedm/cli/find_orientations.py +hexrd/cli/fit_grains.py hexrd/hedm/cli/fit_grains.py +hexrd/cli/__init__.py hexrd/hedm/cli/__init__.py +hexrd/cli/main.py hexrd/hedm/cli/main.py +hexrd/cli/test.py hexrd/hedm/cli/test.py +hexrd/cli/pickle23.py hexrd/hedm/cli/pickle23.py +hexrd/cli/documentation.py hexrd/hedm/cli/documentation.py +hexrd/config/fitgrains.py hexrd/hedm/config/fitgrains.py +hexrd/config/material.py hexrd/core/config/material.py +hexrd/config/root.py hexrd/hedm/config/root.py +hexrd/config/root.py hexrd/core/config/root.py +hexrd/config/dumper.py hexrd/hedm/config/dumper.py +hexrd/config/dumper.py hexrd/core/config/dumper.py +hexrd/config/loader.py hexrd/hedm/config/loader.py +hexrd/config/loader.py hexrd/core/config/loader.py +hexrd/config/__init__.py hexrd/hedm/config/__init__.py +hexrd/config/__init__.py hexrd/core/config/__init__.py +hexrd/config/findorientations.py hexrd/hedm/config/findorientations.py +hexrd/config/config.py hexrd/core/config/config.py +hexrd/config/utils.py hexrd/hedm/config/utils.py +hexrd/config/utils.py hexrd/core/config/utils.py +hexrd/config/instrument.py hexrd/hedm/config/instrument.py +hexrd/config/instrument.py hexrd/core/config/instrument.py +hexrd/config/beam.py hexrd/core/config/beam.py +hexrd/config/imageseries.py hexrd/core/config/imageseries.py +hexrd/sampleOrientations/conversions.py hexrd/hedm/sampleOrientations/conversions.py +hexrd/sampleOrientations/rfz.py hexrd/hedm/sampleOrientations/rfz.py +hexrd/sampleOrientations/__init__.py hexrd/hedm/sampleOrientations/__init__.py +hexrd/sampleOrientations/sampleRFZ.py hexrd/hedm/sampleOrientations/sampleRFZ.py +hexrd/transforms/xf.py hexrd/core/transforms/xf.py +hexrd/transforms/xfcapi.py hexrd/core/transforms/xfcapi.py +hexrd/transforms/__init__.py hexrd/core/transforms/__init__.py +hexrd/transforms/old_xfcapi.py hexrd/core/transforms/old_xfcapi.py +hexrd/transforms/new_capi/xf_new_capi.py hexrd/core/transforms/new_capi/xf_new_capi.py +hexrd/transforms/new_capi/reference.py hexrd/core/transforms/new_capi/reference.py +hexrd/extensions/__init__.py hexrd/core/extensions/__init__.py +hexrd/imageseries/imageseriesiter.py hexrd/core/imageseries/imageseriesiter.py +hexrd/imageseries/process.py hexrd/core/imageseries/process.py +hexrd/imageseries/stats.py hexrd/core/imageseries/stats.py +hexrd/imageseries/baseclass.py hexrd/core/imageseries/baseclass.py +hexrd/imageseries/__init__.py hexrd/core/imageseries/__init__.py +hexrd/imageseries/imageseriesabc.py hexrd/core/imageseries/imageseriesabc.py +hexrd/imageseries/omega.py hexrd/core/imageseries/omega.py +hexrd/imageseries/save.py hexrd/core/imageseries/save.py +hexrd/imageseries/load/hdf5.py hexrd/core/imageseries/load/hdf5.py +hexrd/imageseries/load/eiger_stream_v1.py hexrd/core/imageseries/load/eiger_stream_v1.py +hexrd/imageseries/load/framecache.py hexrd/core/imageseries/load/framecache.py +hexrd/imageseries/load/imagefiles.py hexrd/core/imageseries/load/imagefiles.py +hexrd/imageseries/load/rawimage.py hexrd/core/imageseries/load/rawimage.py +hexrd/imageseries/load/array.py hexrd/core/imageseries/load/array.py +hexrd/imageseries/load/__init__.py hexrd/core/imageseries/load/__init__.py +hexrd/imageseries/load/function.py hexrd/core/imageseries/load/function.py +hexrd/imageseries/load/registry.py hexrd/core/imageseries/load/registry.py +hexrd/imageseries/load/trivial.py hexrd/core/imageseries/load/trivial.py +hexrd/imageseries/load/metadata.py hexrd/core/imageseries/load/metadata.py +hexrd/projections/polar.py hexrd/core/projections/polar.py +hexrd/projections/__init__.py hexrd/core/projections/__init__.py +hexrd/projections/spherical.py hexrd/core/projections/spherical.py +hexrd/wppf/LeBailCalibration.py hexrd/powder/wppf/LeBailCalibration.py +hexrd/wppf/spectrum.py hexrd/powder/wppf/spectrum.py +hexrd/wppf/xtal.py hexrd/powder/wppf/xtal.py +hexrd/wppf/derivatives.py hexrd/powder/wppf/derivatives.py +hexrd/wppf/WPPF.py hexrd/powder/wppf/WPPF.py +hexrd/wppf/RietveldHEDM.py hexrd/powder/wppf/RietveldHEDM.py +hexrd/wppf/__init__.py hexrd/powder/wppf/__init__.py +hexrd/wppf/wppfsupport.py hexrd/powder/wppf/wppfsupport.py +hexrd/wppf/peakfunctions.py hexrd/powder/wppf/peakfunctions.py +hexrd/wppf/texture.py hexrd/powder/wppf/texture.py +hexrd/wppf/parameters.py hexrd/powder/wppf/parameters.py +hexrd/wppf/phase.py hexrd/powder/wppf/phase.py +hexrd/material/mksupport.py hexrd/core/material/mksupport.py +hexrd/material/symmetry.py hexrd/core/material/symmetry.py +hexrd/material/material.py hexrd/core/material/material.py +hexrd/material/crystallography.py hexrd/hedm/material/crystallography.py +hexrd/material/crystallography.py hexrd/laue/material/crystallography.py +hexrd/material/crystallography.py hexrd/powder/material/crystallography.py +hexrd/material/crystallography.py hexrd/core/material/crystallography.py +hexrd/material/unitcell.py hexrd/hedm/material/unitcell.py +hexrd/material/unitcell.py hexrd/core/material/unitcell.py +hexrd/material/__init__.py hexrd/core/material/__init__.py +hexrd/material/symbols.py hexrd/core/material/symbols.py +hexrd/material/utils.py hexrd/core/material/utils.py +hexrd/material/jcpds.py hexrd/core/material/jcpds.py +hexrd/material/spacegroup.py hexrd/core/material/spacegroup.py +hexrd/utils/profiler.py hexrd/core/utils/profiler.py +hexrd/utils/hdf5.py hexrd/core/utils/hdf5.py +hexrd/utils/progressbar.py hexrd/core/utils/progressbar.py +hexrd/utils/warnings.py hexrd/core/utils/warnings.py +hexrd/utils/concurrent.py hexrd/core/utils/concurrent.py +hexrd/utils/multiprocess_generic.py hexrd/core/utils/multiprocess_generic.py +hexrd/utils/__init__.py hexrd/core/utils/__init__.py +hexrd/utils/json.py hexrd/core/utils/json.py +hexrd/utils/compatibility.py hexrd/core/utils/compatibility.py +hexrd/utils/yaml.py hexrd/core/utils/yaml.py +hexrd/utils/hkl.py hexrd/core/utils/hkl.py +hexrd/utils/decorators.py hexrd/core/utils/decorators.py +hexrd/instrument/hedm_instrument.py hexrd/hed/instrument/hedm_instrument.py +hexrd/instrument/hedm_instrument.py hexrd/hedm/instrument/hedm_instrument.py +hexrd/instrument/hedm_instrument.py hexrd/laue/instrument/hedm_instrument.py +hexrd/instrument/hedm_instrument.py hexrd/powder/instrument/hedm_instrument.py +hexrd/instrument/hedm_instrument.py hexrd/core/instrument/hedm_instrument.py +hexrd/instrument/physics_package.py hexrd/hedm/instrument/physics_package.py +hexrd/instrument/physics_package.py hexrd/core/instrument/physics_package.py +hexrd/instrument/detector.py hexrd/core/instrument/detector.py +hexrd/instrument/detector.py hexrd/hed/instrument/detector.py +hexrd/instrument/detector.py hexrd/hedm/instrument/detector.py +hexrd/instrument/detector.py hexrd/powder/instrument/detector.py +hexrd/instrument/detector.py hexrd/laue/instrument/detector.py +hexrd/instrument/cylindrical_detector.py hexrd/core/instrument/cylindrical_detector.py +hexrd/instrument/__init__.py hexrd/core/instrument/__init__.py +hexrd/instrument/planar_detector.py hexrd/core/instrument/planar_detector.py +hexrd/instrument/detector_coatings.py hexrd/core/instrument/detector_coatings.py +hexrd/instrument/constants.py hexrd/core/instrument/constants.py +hexrd/fitting/fitpeak.py hexrd/core/fitting/fitpeak.py +hexrd/fitting/spectrum.py hexrd/core/fitting/spectrum.py +hexrd/fitting/grains.py hexrd/hedm/fitting/grains.py +hexrd/fitting/__init__.py hexrd/core/fitting/__init__.py +hexrd/fitting/peakfunctions.py hexrd/core/fitting/peakfunctions.py +hexrd/fitting/utils.py hexrd/core/fitting/utils.py +hexrd/fitting/calibration/laue.py hexrd/laue/fitting/calibration/laue.py +hexrd/fitting/calibration/structureless.py hexrd/powder/fitting/calibration/structureless.py +hexrd/fitting/calibration/powder.py hexrd/powder/fitting/calibration/powder.py +hexrd/fitting/calibration/__init__.py hexrd/core/fitting/calibration/__init__.py +hexrd/fitting/calibration/multigrain.py hexrd/hedm/fitting/calibration/multigrain.py +hexrd/fitting/calibration/calibrator.py hexrd/powder/fitting/calibration/calibrator.py +hexrd/fitting/calibration/lmfit_param_handling.py hexrd/powder/fitting/calibration/lmfit_param_handling.py +hexrd/fitting/calibration/instrument.py hexrd/powder/fitting/calibration/instrument.py +hexrd/ipfcolor/sphere_sector.py hexrd/hedm/ipfcolor/sphere_sector.py +hexrd/ipfcolor/colorspace.py hexrd/hedm/ipfcolor/colorspace.py +hexrd/ipfcolor/__init__.py hexrd/hedm/ipfcolor/__init__.py +hexrd/xrdutil/__init__.py hexrd/hedm/xrdutil/__init__.py +hexrd/xrdutil/utils.py hexrd/hedm/xrdutil/utils.py +hexrd/xrdutil/utils.py hexrd/hed/xrdutil/utils.py +hexrd/xrdutil/utils.py hexrd/laue/xrdutil/utils.py +hexrd/xrdutil/phutil.py hexrd/hed/xrdutil/phutil.py +hexrd/distortion/ge_41rt.py hexrd/core/distortion/ge_41rt.py +hexrd/distortion/identity.py hexrd/core/distortion/identity.py +hexrd/distortion/dexela_2923.py hexrd/core/distortion/dexela_2923.py +hexrd/distortion/__init__.py hexrd/core/distortion/__init__.py +hexrd/distortion/registry.py hexrd/core/distortion/registry.py +hexrd/distortion/distortionabc.py hexrd/core/distortion/distortionabc.py +hexrd/distortion/utils.py hexrd/core/distortion/utils.py +hexrd/distortion/nyi.py hexrd/core/distortion/nyi.py +hexrd/grainmap/tomoutil.py hexrd/hedm/grainmap/tomoutil.py +hexrd/grainmap/__init__.py hexrd/hedm/grainmap/__init__.py +hexrd/grainmap/vtkutil.py hexrd/hedm/grainmap/vtkutil.py +hexrd/grainmap/nfutil.py hexrd/hedm/grainmap/nfutil.py +hexrd/convolution/__init__.py hexrd/core/convolution/__init__.py +hexrd/convolution/utils.py hexrd/core/convolution/utils.py +hexrd/convolution/convolve.py hexrd/core/convolution/convolve.py +tests/test_material.py tests/test_material.py +tests/test_graindata.py tests/test_graindata.py +tests/test_utils_json.py tests/test_utils_json.py +tests/test_utils_yaml.py tests/test_utils_yaml.py +tests/common.py tests/common.py +tests/test_rotations.py tests/test_rotations.py +tests/test_inverse_distortion.py tests/test_inverse_distortion.py +tests/conftest.py tests/conftest.py +tests/test_absorption_correction.py tests/test_absorption_correction.py +tests/test_memoize.py tests/test_memoize.py +tests/test_transforms.py tests/test_transforms.py +tests/test_matrix_utils.py tests/test_matrix_utils.py +tests/fit_grains_check.py tests/fit_grains_check.py +tests/test_fit-grains.py tests/test_fit-grains.py +tests/find_orientations_testing.py tests/find_orientations_testing.py +tests/test_concurrent.py tests/test_concurrent.py +tests/test_find_orientations.py tests/test_find_orientations.py +tests/config/test_material.py tests/config/test_material.py +tests/config/test_instrument.py tests/config/test_instrument.py +tests/config/test_fit_grains.py tests/config/test_fit_grains.py +tests/config/common.py tests/config/common.py +tests/config/test_root.py tests/config/test_root.py +tests/config/__init__.py tests/config/__init__.py +tests/config/test_find_orientations.py tests/config/test_find_orientations.py +tests/config/test_image_series.py tests/config/test_image_series.py +tests/transforms/test_rotate_vecs_about_axis.py tests/transforms/test_rotate_vecs_about_axis.py +tests/transforms/test_quat_distance_from_file.py tests/transforms/test_quat_distance_from_file.py +tests/transforms/test_angles_to_dvec_from_file.py tests/transforms/test_angles_to_dvec_from_file.py +tests/transforms/test_xy_to_gvec_from_file.py tests/transforms/test_xy_to_gvec_from_file.py +tests/transforms/test_make_detector_rmat_from_file.py tests/transforms/test_make_detector_rmat_from_file.py +tests/transforms/common.py tests/transforms/common.py +tests/transforms/test_validate_angle_ranges_from_file.py tests/transforms/test_validate_angle_ranges_from_file.py +tests/transforms/test_gvec_to_xy.py tests/transforms/test_gvec_to_xy.py +tests/transforms/test_make_sample_rmat_from_file.py tests/transforms/test_make_sample_rmat_from_file.py +tests/transforms/test_angles_to_gvec_from_file.py tests/transforms/test_angles_to_gvec_from_file.py +tests/transforms/test_gvec_to_xy_from_file.py tests/transforms/test_gvec_to_xy_from_file.py +tests/transforms/test_make_rmat_of_expmap_from_file.py tests/transforms/test_make_rmat_of_expmap_from_file.py +tests/transforms/test_make_beam_rmat_from_file.py tests/transforms/test_make_beam_rmat_from_file.py +tests/transforms/test_unit_vector.py tests/transforms/test_unit_vector.py +tests/transforms/test_xy_to_gvec.py tests/transforms/test_xy_to_gvec.py +tests/unitcell/test_vec_math.py tests/unitcell/test_vec_math.py +tests/planedata/test_init.py tests/planedata/test_init.py +tests/planedata/test_exclusion.py tests/planedata/test_exclusion.py +tests/planedata/test_with_data.py tests/planedata/test_with_data.py +tests/planedata/test_misc.py tests/planedata/test_misc.py +tests/matrix_util/test_strain_stress_reps.py tests/matrix_util/test_strain_stress_reps.py +tests/matrix_util/test_norms.py tests/matrix_util/test_norms.py +tests/matrix_util/test_vector_and_matrix_math.py tests/matrix_util/test_vector_and_matrix_math.py +tests/imageseries/test_stats.py tests/imageseries/test_stats.py +tests/imageseries/common.py tests/imageseries/common.py +tests/imageseries/test_formats.py tests/imageseries/test_formats.py +tests/imageseries/__init__.py tests/imageseries/__init__.py +tests/imageseries/test_omega.py tests/imageseries/test_omega.py +tests/imageseries/test_process.py tests/imageseries/test_process.py +tests/imageseries/test_properties.py tests/imageseries/test_properties.py +tests/rotations/test_eulers.py tests/rotations/test_eulers.py +tests/rotations/test_quat_math.py tests/rotations/test_quat_math.py +.codecov.yml .codecov.yml +.gitattributes .gitattributes +.github/workflows/container_build.sh .github/workflows/container_build.sh +.github/workflows/package.yml .github/workflows/package.yml +.github/workflows/test.yml .github/workflows/test.yml +.gitignore .gitignore +.readthedocs.yml .readthedocs.yml +conda.recipe/bld.bat conda.recipe/bld.bat +conda.recipe/build.sh conda.recipe/build.sh +conda.recipe/conda_build_config.yaml conda.recipe/conda_build_config.yaml +conda.recipe/meta.yaml conda.recipe/meta.yaml +docs/Makefile docs/Makefile +docs/README.md docs/README.md +docs/requirements.txt docs/requirements.txt +docs/run_apidoc.sh docs/run_apidoc.sh +docs/run_sphinx.sh docs/run_sphinx.sh +docs/source/.gitignore docs/source/.gitignore +docs/source/dev/.gitignore docs/source/dev/.gitignore +docs/source/users/transforms.md docs/source/users/transforms.md +docs/source/_static/transforms.pdf docs/source/_static/transforms.pdf +environment.yml environment.yml +hexrd/convolution/src/convolve.c hexrd/core/convolution/src/convolve.c +hexrd/convolution/src/convolve.h hexrd/core/convolution/src/convolve.h +hexrd/copyright.py hexrd/copyright.py +hexrd/fitting/calibration/relative_constraints.py hexrd/core/fitting/calibration/relative_constraints.py +hexrd/resources/Anomalous.h5 hexrd/core/resources/Anomalous.h5 +hexrd/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt hexrd/core/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt +hexrd/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt hexrd/core/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt +hexrd/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt hexrd/core/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt +hexrd/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt hexrd/core/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt +hexrd/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt hexrd/core/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt +hexrd/resources/characteristic_xray_energies.h5 hexrd/core/resources/characteristic_xray_energies.h5 +hexrd/resources/detector_templates/dexela-2923-detector-subpanel.yml hexrd/core/resources/detector_templates/dexela-2923-detector-subpanel.yml +hexrd/resources/detector_templates/dexela-2923-detector.yml hexrd/core/resources/detector_templates/dexela-2923-detector.yml +hexrd/resources/detector_templates/GE-detector.yml hexrd/core/resources/detector_templates/GE-detector.yml +hexrd/resources/detector_templates/Hydra_Feb19.yml hexrd/core/resources/detector_templates/Hydra_Feb19.yml +hexrd/resources/detector_templates/Pilatus3X_2M-detector.yml hexrd/core/resources/detector_templates/Pilatus3X_2M-detector.yml +hexrd/resources/detector_templates/Pixirad2-detector.yml hexrd/core/resources/detector_templates/Pixirad2-detector.yml +hexrd/resources/detector_templates/Varex_4343CT-detector.yml hexrd/core/resources/detector_templates/Varex_4343CT-detector.yml +hexrd/resources/instrument_templates/dcs.yml hexrd/core/resources/instrument_templates/dcs.yml +hexrd/resources/instrument_templates/dual_dexelas.yml hexrd/core/resources/instrument_templates/dual_dexelas.yml +hexrd/resources/instrument_templates/rigaku.yml hexrd/core/resources/instrument_templates/rigaku.yml +hexrd/resources/instrument_templates/varex.yml hexrd/core/resources/instrument_templates/varex.yml +hexrd/resources/mu_en.h5 hexrd/core/resources/mu_en.h5 +hexrd/resources/pinhole_materials.h5 hexrd/core/resources/pinhole_materials.h5 +hexrd/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt hexrd/core/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt +hexrd/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt hexrd/core/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt +hexrd/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt hexrd/core/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt +hexrd/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt hexrd/core/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt +hexrd/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt hexrd/core/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt +hexrd/resources/pxrdip_reference_config.yml hexrd/core/resources/pxrdip_reference_config.yml +hexrd/resources/surface_harmonics.h5 hexrd/core/resources/surface_harmonics.h5 +hexrd/resources/tardis_2xrs_reference_config.yml hexrd/core/resources/tardis_2xrs_reference_config.yml +hexrd/resources/TARDIS_IMAGE-PLATE-2_bnd.txt hexrd/core/resources/TARDIS_IMAGE-PLATE-2_bnd.txt +hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd.txt hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd.txt +hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt +hexrd/resources/TARDIS_IMAGE-PLATE-4_bnd.txt hexrd/core/resources/TARDIS_IMAGE-PLATE-4_bnd.txt +hexrd/resources/tardis_reference_config.yml hexrd/core/resources/tardis_reference_config.yml +hexrd/resources/window_materials.h5 hexrd/core/resources/window_materials.h5 +hexrd/transforms/cpp_sublibrary/Makefile hexrd/core/transforms/cpp_sublibrary/Makefile +hexrd/transforms/cpp_sublibrary/src/inverse_distortion.cpp hexrd/core/transforms/cpp_sublibrary/src/inverse_distortion.cpp +hexrd/transforms/debug_helpers.h hexrd/core/transforms/debug_helpers.h +hexrd/transforms/Makefile hexrd/core/transforms/Makefile +hexrd/transforms/new_capi/angles_to_dvec.c hexrd/core/transforms/new_capi/angles_to_dvec.c +hexrd/transforms/new_capi/angles_to_gvec.c hexrd/core/transforms/new_capi/angles_to_gvec.c +hexrd/transforms/new_capi/gvec_to_xy.c hexrd/core/transforms/new_capi/gvec_to_xy.c +hexrd/transforms/new_capi/make_beam_rmat.c hexrd/core/transforms/new_capi/make_beam_rmat.c +hexrd/transforms/new_capi/make_binary_rmat.c hexrd/core/transforms/new_capi/make_binary_rmat.c +hexrd/transforms/new_capi/make_detector_rmat.c hexrd/core/transforms/new_capi/make_detector_rmat.c +hexrd/transforms/new_capi/make_rmat_of_expmap.c hexrd/core/transforms/new_capi/make_rmat_of_expmap.c +hexrd/transforms/new_capi/make_sample_rmat.c hexrd/core/transforms/new_capi/make_sample_rmat.c +hexrd/transforms/new_capi/module.c hexrd/core/transforms/new_capi/module.c +hexrd/transforms/new_capi/ndargs_helper.c hexrd/core/transforms/new_capi/ndargs_helper.c +hexrd/transforms/new_capi/ndargs_helper.h hexrd/core/transforms/new_capi/ndargs_helper.h +hexrd/transforms/new_capi/new_func.c hexrd/core/transforms/new_capi/new_func.c +hexrd/transforms/new_capi/oscill_angles_of_HKLs.c hexrd/core/transforms/new_capi/oscill_angles_of_HKLs.c +hexrd/transforms/new_capi/quat_distance.c hexrd/core/transforms/new_capi/quat_distance.c +hexrd/transforms/new_capi/README.md hexrd/core/transforms/new_capi/README.md +hexrd/transforms/new_capi/rotate_vecs_about_axis.c hexrd/core/transforms/new_capi/rotate_vecs_about_axis.c +hexrd/transforms/new_capi/transforms_prototypes.h hexrd/core/transforms/new_capi/transforms_prototypes.h +hexrd/transforms/new_capi/transforms_types.h hexrd/core/transforms/new_capi/transforms_types.h +hexrd/transforms/new_capi/transforms_utils.h hexrd/core/transforms/new_capi/transforms_utils.h +hexrd/transforms/new_capi/unit_row_vector.c hexrd/core/transforms/new_capi/unit_row_vector.c +hexrd/transforms/new_capi/validate_angle_ranges.c hexrd/core/transforms/new_capi/validate_angle_ranges.c +hexrd/transforms/new_capi/xy_to_gvec.c hexrd/core/transforms/new_capi/xy_to_gvec.c +hexrd/transforms/stdbool.h hexrd/core/transforms/stdbool.h +hexrd/transforms/transforms_CAPI.c hexrd/core/transforms/transforms_CAPI.c +hexrd/transforms/transforms_CAPI.h hexrd/core/transforms/transforms_CAPI.h +hexrd/transforms/transforms_CFUNC.c hexrd/core/transforms/transforms_CFUNC.c +hexrd/transforms/transforms_CFUNC.h hexrd/core/transforms/transforms_CFUNC.h +hexrd/__init__.py hexrd/__init__.py +LICENSE LICENSE +NOTICE NOTICE +pyproject.toml pyproject.toml +README.md README.md +tests/calibration/test_2xrs_calibration.py tests/calibration/test_2xrs_calibration.py +tests/calibration/test_calibration.py tests/calibration/test_calibration.py +tests/calibration/test_relative_constraints.py tests/calibration/test_relative_constraints.py +tests/data/calibration_expected.npy tests/data/calibration_expected.npy +tests/data/gvec_to_xy.json tests/data/gvec_to_xy.json +tests/data/ideal_tardis_transmissions.npy tests/data/ideal_tardis_transmissions.npy +tests/data/inverse_distortion_in_out.json tests/data/inverse_distortion_in_out.json +tests/data/materials/Ag(TeMo)6.cif tests/data/materials/Ag(TeMo)6.cif +tests/data/materials/Al2SiO5.cif tests/data/materials/Al2SiO5.cif +tests/data/materials/AlCuO2.cif tests/data/materials/AlCuO2.cif +tests/data/materials/C.cif tests/data/materials/C.cif +tests/data/materials/Cs.cif tests/data/materials/Cs.cif +tests/data/materials/Mg.cif tests/data/materials/Mg.cif +tests/data/materials/Si.cif tests/data/materials/Si.cif +tests/data/materials/U.cif tests/data/materials/U.cif +tests/data/plane_data_test.npy tests/data/plane_data_test.npy +tests/data/testmat.h5 tests/data/testmat.h5 +tests/data/test_correct_angles_to_dvec.npy tests/data/test_correct_angles_to_dvec.npy +tests/data/test_correct_angles_to_gvec.npy tests/data/test_correct_angles_to_gvec.npy +tests/data/test_correct_gvec_to_xy.npy tests/data/test_correct_gvec_to_xy.npy +tests/data/test_correct_make_beam_rmat.npy tests/data/test_correct_make_beam_rmat.npy +tests/data/test_correct_make_detector_rmat.npy tests/data/test_correct_make_detector_rmat.npy +tests/data/test_correct_make_rmat_of_expmap.npy tests/data/test_correct_make_rmat_of_expmap.npy +tests/data/test_correct_make_sample_rmat.npy tests/data/test_correct_make_sample_rmat.npy +tests/data/test_correct_quat_distance.npy tests/data/test_correct_quat_distance.npy +tests/data/test_correct_validate_angle_ranges.npy tests/data/test_correct_validate_angle_ranges.npy +tests/data/test_correct_xy_to_gvec.npy tests/data/test_correct_xy_to_gvec.npy +tests/data/test_polar_view_expected.npy tests/data/test_polar_view_expected.npy +tests/imageseries/test_pickleable.py tests/imageseries/test_pickleable.py +tests/requirements-dev.txt tests/requirements-dev.txt +tests/rotations/test_utilities.py tests/rotations/test_utilities.py +tests/test_polar_view.py tests/test_polar_view.py +file_table.tsv file_table.tsv +hexrd\core\config\__init__.py hexrd\core\config\__init__.py +hexrd\core\config\beam.py hexrd\core\config\beam.py +hexrd\core\config\config.py hexrd\core\config\config.py +hexrd\core\config\dumper.py hexrd\core\config\dumper.py +hexrd\core\config\imageseries.py hexrd\core\config\imageseries.py +hexrd\core\config\instrument.py hexrd\core\config\instrument.py +hexrd\core\config\loader.py hexrd\core\config\loader.py +hexrd\core\config\material.py hexrd\core\config\material.py +hexrd\core\config\root.py hexrd\core\config\root.py +hexrd\core\config\utils.py hexrd\core\config\utils.py +hexrd\core\constants.py hexrd\core\constants.py +hexrd\core\convolution\__init__.py hexrd\core\convolution\__init__.py +hexrd\core\convolution\convolve.py hexrd\core\convolution\convolve.py +hexrd\core\convolution\src\convolve.c hexrd\core\convolution\src\convolve.c +hexrd\core\convolution\src\convolve.h hexrd\core\convolution\src\convolve.h +hexrd\core\convolution\utils.py hexrd\core\convolution\utils.py +hexrd\core\deprecation.py hexrd\core\deprecation.py +hexrd\core\distortion\__init__.py hexrd\core\distortion\__init__.py +hexrd\core\distortion\dexela_2923.py hexrd\core\distortion\dexela_2923.py +hexrd\core\distortion\distortionabc.py hexrd\core\distortion\distortionabc.py +hexrd\core\distortion\ge_41rt.py hexrd\core\distortion\ge_41rt.py +hexrd\core\distortion\identity.py hexrd\core\distortion\identity.py +hexrd\core\distortion\nyi.py hexrd\core\distortion\nyi.py +hexrd\core\distortion\registry.py hexrd\core\distortion\registry.py +hexrd\core\distortion\utils.py hexrd\core\distortion\utils.py +hexrd\core\extensions\__init__.py hexrd\core\extensions\__init__.py +hexrd\core\fitting\__init__.py hexrd\core\fitting\__init__.py +hexrd\core\fitting\calibration\__init__.py hexrd\core\fitting\calibration\__init__.py +hexrd\core\fitting\calibration\relative_constraints.py hexrd\core\fitting\calibration\relative_constraints.py +hexrd\core\fitting\fitpeak.py hexrd\core\fitting\fitpeak.py +hexrd\core\fitting\peakfunctions.py hexrd\core\fitting\peakfunctions.py +hexrd\core\fitting\spectrum.py hexrd\core\fitting\spectrum.py +hexrd\core\fitting\utils.py hexrd\core\fitting\utils.py +hexrd\core\gridutil.py hexrd\core\gridutil.py +hexrd\core\imageseries\__init__.py hexrd\core\imageseries\__init__.py +hexrd\core\imageseries\baseclass.py hexrd\core\imageseries\baseclass.py +hexrd\core\imageseries\imageseriesabc.py hexrd\core\imageseries\imageseriesabc.py +hexrd\core\imageseries\imageseriesiter.py hexrd\core\imageseries\imageseriesiter.py +hexrd\core\imageseries\load\__init__.py hexrd\core\imageseries\load\__init__.py +hexrd\core\imageseries\load\array.py hexrd\core\imageseries\load\array.py +hexrd\core\imageseries\load\eiger_stream_v1.py hexrd\core\imageseries\load\eiger_stream_v1.py +hexrd\core\imageseries\load\framecache.py hexrd\core\imageseries\load\framecache.py +hexrd\core\imageseries\load\function.py hexrd\core\imageseries\load\function.py +hexrd\core\imageseries\load\hdf5.py hexrd\core\imageseries\load\hdf5.py +hexrd\core\imageseries\load\imagefiles.py hexrd\core\imageseries\load\imagefiles.py +hexrd\core\imageseries\load\metadata.py hexrd\core\imageseries\load\metadata.py +hexrd\core\imageseries\load\rawimage.py hexrd\core\imageseries\load\rawimage.py +hexrd\core\imageseries\load\registry.py hexrd\core\imageseries\load\registry.py +hexrd\core\imageseries\load\trivial.py hexrd\core\imageseries\load\trivial.py +hexrd\core\imageseries\omega.py hexrd\core\imageseries\omega.py +hexrd\core\imageseries\process.py hexrd\core\imageseries\process.py +hexrd\core\imageseries\save.py hexrd\core\imageseries\save.py +hexrd\core\imageseries\stats.py hexrd\core\imageseries\stats.py +hexrd\core\imageutil.py hexrd\core\imageutil.py +hexrd\core\instrument\__init__.py hexrd\core\instrument\__init__.py +hexrd\core\instrument\constants.py hexrd\core\instrument\constants.py +hexrd\core\instrument\cylindrical_detector.py hexrd\core\instrument\cylindrical_detector.py +hexrd\core\instrument\detector.py hexrd\core\instrument\detector.py +hexrd\core\instrument\detector_coatings.py hexrd\core\instrument\detector_coatings.py +hexrd\core\instrument\hedm_instrument.py hexrd\core\instrument\hedm_instrument.py +hexrd\core\instrument\physics_package.py hexrd\core\instrument\physics_package.py +hexrd\core\instrument\planar_detector.py hexrd\core\instrument\planar_detector.py +hexrd\core\material\__init__.py hexrd\core\material\__init__.py +hexrd\core\material\crystallography.py hexrd\core\material\crystallography.py +hexrd\core\material\jcpds.py hexrd\core\material\jcpds.py +hexrd\core\material\material.py hexrd\core\material\material.py +hexrd\core\material\mksupport.py hexrd\core\material\mksupport.py +hexrd\core\material\spacegroup.py hexrd\core\material\spacegroup.py +hexrd\core\material\symbols.py hexrd\core\material\symbols.py +hexrd\core\material\symmetry.py hexrd\core\material\symmetry.py +hexrd\core\material\unitcell.py hexrd\core\material\unitcell.py +hexrd\core\material\utils.py hexrd\core\material\utils.py +hexrd\core\matrixutil.py hexrd\core\matrixutil.py +hexrd\core\projections\__init__.py hexrd\core\projections\__init__.py +hexrd\core\projections\polar.py hexrd\core\projections\polar.py +hexrd\core\projections\spherical.py hexrd\core\projections\spherical.py +hexrd\core\resources\__init__.py hexrd\core\resources\__init__.py +hexrd\core\resources\Anomalous.h5 hexrd\core\resources\Anomalous.h5 +hexrd\core\resources\BBXRD_IMAGE-PLATE-BACK_bnd.txt hexrd\core\resources\BBXRD_IMAGE-PLATE-BACK_bnd.txt +hexrd\core\resources\BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt hexrd\core\resources\BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt +hexrd\core\resources\BBXRD_IMAGE-PLATE-LEFT_bnd.txt hexrd\core\resources\BBXRD_IMAGE-PLATE-LEFT_bnd.txt +hexrd\core\resources\BBXRD_IMAGE-PLATE-RIGHT_bnd.txt hexrd\core\resources\BBXRD_IMAGE-PLATE-RIGHT_bnd.txt +hexrd\core\resources\BBXRD_IMAGE-PLATE-TOP_bnd.txt hexrd\core\resources\BBXRD_IMAGE-PLATE-TOP_bnd.txt +hexrd\core\resources\characteristic_xray_energies.h5 hexrd\core\resources\characteristic_xray_energies.h5 +hexrd\core\resources\detector_templates\__init__.py hexrd\core\resources\detector_templates\__init__.py +hexrd\core\resources\detector_templates\dexela-2923-detector-subpanel.yml hexrd\core\resources\detector_templates\dexela-2923-detector-subpanel.yml +hexrd\core\resources\detector_templates\dexela-2923-detector.yml hexrd\core\resources\detector_templates\dexela-2923-detector.yml +hexrd\core\resources\detector_templates\GE-detector.yml hexrd\core\resources\detector_templates\GE-detector.yml +hexrd\core\resources\detector_templates\Hydra_Feb19.yml hexrd\core\resources\detector_templates\Hydra_Feb19.yml +hexrd\core\resources\detector_templates\Pilatus3X_2M-detector.yml hexrd\core\resources\detector_templates\Pilatus3X_2M-detector.yml +hexrd\core\resources\detector_templates\Pixirad2-detector.yml hexrd\core\resources\detector_templates\Pixirad2-detector.yml +hexrd\core\resources\detector_templates\Varex_4343CT-detector.yml hexrd\core\resources\detector_templates\Varex_4343CT-detector.yml +hexrd\core\resources\instrument_templates\__init__.py hexrd\core\resources\instrument_templates\__init__.py +hexrd\core\resources\instrument_templates\dcs.yml hexrd\core\resources\instrument_templates\dcs.yml +hexrd\core\resources\instrument_templates\dual_dexelas.yml hexrd\core\resources\instrument_templates\dual_dexelas.yml +hexrd\core\resources\instrument_templates\rigaku.hexrd hexrd\core\resources\instrument_templates\rigaku.hexrd +hexrd\core\resources\instrument_templates\rigaku.yml hexrd\core\resources\instrument_templates\rigaku.yml +hexrd\core\resources\instrument_templates\varex.yml hexrd\core\resources\instrument_templates\varex.yml +hexrd\core\resources\mu_en.h5 hexrd\core\resources\mu_en.h5 +hexrd\core\resources\pinhole_materials.h5 hexrd\core\resources\pinhole_materials.h5 +hexrd\core\resources\PXRDIP_IMAGE-PLATE-B_bnd.txt hexrd\core\resources\PXRDIP_IMAGE-PLATE-B_bnd.txt +hexrd\core\resources\PXRDIP_IMAGE-PLATE-D_bnd.txt hexrd\core\resources\PXRDIP_IMAGE-PLATE-D_bnd.txt +hexrd\core\resources\PXRDIP_IMAGE-PLATE-L_bnd.txt hexrd\core\resources\PXRDIP_IMAGE-PLATE-L_bnd.txt +hexrd\core\resources\PXRDIP_IMAGE-PLATE-R_bnd.txt hexrd\core\resources\PXRDIP_IMAGE-PLATE-R_bnd.txt +hexrd\core\resources\PXRDIP_IMAGE-PLATE-U_bnd.txt hexrd\core\resources\PXRDIP_IMAGE-PLATE-U_bnd.txt +hexrd\core\resources\pxrdip_reference_config.yml hexrd\core\resources\pxrdip_reference_config.yml +hexrd\core\resources\surface_harmonics.h5 hexrd\core\resources\surface_harmonics.h5 +hexrd\core\resources\tardis_2xrs_reference_config.yml hexrd\core\resources\tardis_2xrs_reference_config.yml +hexrd\core\resources\TARDIS_IMAGE-PLATE-2_bnd.txt hexrd\core\resources\TARDIS_IMAGE-PLATE-2_bnd.txt +hexrd\core\resources\TARDIS_IMAGE-PLATE-3_bnd.txt hexrd\core\resources\TARDIS_IMAGE-PLATE-3_bnd.txt +hexrd\core\resources\TARDIS_IMAGE-PLATE-3_bnd_cropped.txt hexrd\core\resources\TARDIS_IMAGE-PLATE-3_bnd_cropped.txt +hexrd\core\resources\TARDIS_IMAGE-PLATE-4_bnd.txt hexrd\core\resources\TARDIS_IMAGE-PLATE-4_bnd.txt +hexrd\core\resources\tardis_reference_config.yml hexrd\core\resources\tardis_reference_config.yml +hexrd\core\resources\window_materials.h5 hexrd\core\resources\window_materials.h5 +hexrd\core\rotations.py hexrd\core\rotations.py +hexrd\core\transforms\__init__.py hexrd\core\transforms\__init__.py +hexrd\core\transforms\cpp_sublibrary\Makefile hexrd\core\transforms\cpp_sublibrary\Makefile +hexrd\core\transforms\cpp_sublibrary\src\inverse_distortion.cpp hexrd\core\transforms\cpp_sublibrary\src\inverse_distortion.cpp +hexrd\core\transforms\debug_helpers.h hexrd\core\transforms\debug_helpers.h +hexrd\core\transforms\Makefile hexrd\core\transforms\Makefile +hexrd\core\transforms\new_capi\angles_to_dvec.c hexrd\core\transforms\new_capi\angles_to_dvec.c +hexrd\core\transforms\new_capi\angles_to_gvec.c hexrd\core\transforms\new_capi\angles_to_gvec.c +hexrd\core\transforms\new_capi\gvec_to_xy.c hexrd\core\transforms\new_capi\gvec_to_xy.c +hexrd\core\transforms\new_capi\make_beam_rmat.c hexrd\core\transforms\new_capi\make_beam_rmat.c +hexrd\core\transforms\new_capi\make_binary_rmat.c hexrd\core\transforms\new_capi\make_binary_rmat.c +hexrd\core\transforms\new_capi\make_detector_rmat.c hexrd\core\transforms\new_capi\make_detector_rmat.c +hexrd\core\transforms\new_capi\make_rmat_of_expmap.c hexrd\core\transforms\new_capi\make_rmat_of_expmap.c +hexrd\core\transforms\new_capi\make_sample_rmat.c hexrd\core\transforms\new_capi\make_sample_rmat.c +hexrd\core\transforms\new_capi\module.c hexrd\core\transforms\new_capi\module.c +hexrd\core\transforms\new_capi\ndargs_helper.c hexrd\core\transforms\new_capi\ndargs_helper.c +hexrd\core\transforms\new_capi\ndargs_helper.h hexrd\core\transforms\new_capi\ndargs_helper.h +hexrd\core\transforms\new_capi\new_func.c hexrd\core\transforms\new_capi\new_func.c +hexrd\core\transforms\new_capi\oscill_angles_of_HKLs.c hexrd\core\transforms\new_capi\oscill_angles_of_HKLs.c +hexrd\core\transforms\new_capi\quat_distance.c hexrd\core\transforms\new_capi\quat_distance.c +hexrd\core\transforms\new_capi\README.md hexrd\core\transforms\new_capi\README.md +hexrd\core\transforms\new_capi\reference.py hexrd\core\transforms\new_capi\reference.py +hexrd\core\transforms\new_capi\rotate_vecs_about_axis.c hexrd\core\transforms\new_capi\rotate_vecs_about_axis.c +hexrd\core\transforms\new_capi\transforms_prototypes.h hexrd\core\transforms\new_capi\transforms_prototypes.h +hexrd\core\transforms\new_capi\transforms_types.h hexrd\core\transforms\new_capi\transforms_types.h +hexrd\core\transforms\new_capi\transforms_utils.h hexrd\core\transforms\new_capi\transforms_utils.h +hexrd\core\transforms\new_capi\unit_row_vector.c hexrd\core\transforms\new_capi\unit_row_vector.c +hexrd\core\transforms\new_capi\validate_angle_ranges.c hexrd\core\transforms\new_capi\validate_angle_ranges.c +hexrd\core\transforms\new_capi\xf_new_capi.py hexrd\core\transforms\new_capi\xf_new_capi.py +hexrd\core\transforms\new_capi\xy_to_gvec.c hexrd\core\transforms\new_capi\xy_to_gvec.c +hexrd\core\transforms\old_xfcapi.py hexrd\core\transforms\old_xfcapi.py +hexrd\core\transforms\stdbool.h hexrd\core\transforms\stdbool.h +hexrd\core\transforms\transforms_CAPI.c hexrd\core\transforms\transforms_CAPI.c +hexrd\core\transforms\transforms_CAPI.h hexrd\core\transforms\transforms_CAPI.h +hexrd\core\transforms\transforms_CFUNC.c hexrd\core\transforms\transforms_CFUNC.c +hexrd\core\transforms\transforms_CFUNC.h hexrd\core\transforms\transforms_CFUNC.h +hexrd\core\transforms\xf.py hexrd\core\transforms\xf.py +hexrd\core\transforms\xfcapi.py hexrd\core\transforms\xfcapi.py +hexrd\core\utils\__init__.py hexrd\core\utils\__init__.py +hexrd\core\utils\compatibility.py hexrd\core\utils\compatibility.py +hexrd\core\utils\concurrent.py hexrd\core\utils\concurrent.py +hexrd\core\utils\decorators.py hexrd\core\utils\decorators.py +hexrd\core\utils\hdf5.py hexrd\core\utils\hdf5.py +hexrd\core\utils\hkl.py hexrd\core\utils\hkl.py +hexrd\core\utils\json.py hexrd\core\utils\json.py +hexrd\core\utils\multiprocess_generic.py hexrd\core\utils\multiprocess_generic.py +hexrd\core\utils\profiler.py hexrd\core\utils\profiler.py +hexrd\core\utils\progressbar.py hexrd\core\utils\progressbar.py +hexrd\core\utils\warnings.py hexrd\core\utils\warnings.py +hexrd\core\utils\yaml.py hexrd\core\utils\yaml.py +hexrd\core\valunits.py hexrd\core\valunits.py +hexrd\hed\instrument\hedm_instrument.py hexrd\hed\instrument\hedm_instrument.py +hexrd\hed\xrdutil\phutil.py hexrd\hed\xrdutil\phutil.py +hexrd\hed\xrdutil\utils.py hexrd\hed\xrdutil\utils.py +hexrd\cli\preprocess.py hexrd\hedm\cli\preprocess.py +hexrd\module_map.py hexrd\module_map.py +hexrd\preprocess\argument_classes_factory.py hexrd\hedm\preprocess\argument_classes_factory.py +hexrd\preprocess\preprocessors.py hexrd\hedm\preprocess\preprocessors.py +hexrd\preprocess\profiles.py hexrd\hedm\preprocess\profiles.py +hexrd\preprocess\yaml_internals.py hexrd\hedm\preprocess\yaml_internals.py +tests\test_preprocess.py tests\test_preprocess.py +tests\test_polar_view.py tests\test_polar_view.py +hexrd\transforms\new_capi hexrd\core\transforms\new_capi +hexrd\transforms\cpp_sublibrary hexrd\core\transforms\cpp_sublibrary +hexrd\transforms\cpp_sublibrary\src hexrd\core\transforms\cpp_sublibrary\src +hexrd\convolution\src hexrd\core\convolution\src diff --git a/fix-file-map.py b/fix-file-map.py new file mode 100644 index 000000000..919457e80 --- /dev/null +++ b/fix-file-map.py @@ -0,0 +1,23 @@ +# %% +import pickle + +fm = pickle.load(open('file_map.pkl', 'rb')) +# %% +print(fm) +# %% +from pathlib import Path + +file_table = [] +k: Path +v: Path +for k, vs in fm.items(): + for v in vs: + file_table.append([k.as_posix(), v.as_posix()]) + +print(file_table) +# %% + +with open('file_table.tsv', 'w') as f: + for row in file_table: + f.write('\t'.join(row) + '\n') +# %% diff --git a/hexrd/__init__.py b/hexrd/__init__.py index a301cb321..2658d6793 100644 --- a/hexrd/__init__.py +++ b/hexrd/__init__.py @@ -1,4 +1,6 @@ import importlib +import importlib.abc +import importlib.machinery import sys from .core.material import crystallography @@ -30,3 +32,17 @@ raise Exception(f'"{alias}" is an alias path and should not exist') sys.modules[alias] = module + + +from . import module_map + + +def __getattr__(name): + # __getattr__ is only called if the attribute doesn't exist + module = module_map.get("hexrd." + name) + if module is not None: + if isinstance(module, str): + return importlib.import_module(module) + return module + raise AttributeError(f"Module `hexrd` has no attribute {name}") + \ No newline at end of file diff --git a/hexrd/core/config/root.py b/hexrd/core/config/root.py index a2fed0f6b..f76c9c585 100644 --- a/hexrd/core/config/root.py +++ b/hexrd/core/config/root.py @@ -9,8 +9,8 @@ from .config import Config from .instrument import Instrument # TODO: Resolve extra-core-dependency -from ...hedm.config.findorientations import FindOrientationsConfig -from ...hedm.config.fitgrains import FitGrainsConfig +from hexrd.hedm.config.findorientations import FindOrientationsConfig +from hexrd.hedm.config.fitgrains import FitGrainsConfig from .material import MaterialConfig logger = logging.getLogger('hexrd.config') diff --git a/hexrd/core/extensions/__init__.py b/hexrd/core/extensions/__init__.py index e69de29bb..9f9cfc8d9 100644 --- a/hexrd/core/extensions/__init__.py +++ b/hexrd/core/extensions/__init__.py @@ -0,0 +1,3 @@ +from . import _new_transforms_capi +from . import _transforms_CAPI +from . import inverse_distortion \ No newline at end of file diff --git a/hexrd/core/fitting/__init__.py b/hexrd/core/fitting/__init__.py index 56e658c12..cb35de12f 100644 --- a/hexrd/core/fitting/__init__.py +++ b/hexrd/core/fitting/__init__.py @@ -28,7 +28,7 @@ Functions for peak fitting """ # TODO: Resolve extra-workflow dependency -from ...hedm.fitting import grains +from hexrd.hedm.fitting import grains fitGrain = grains.fitGrain objFuncFitGrain = grains.objFuncFitGrain diff --git a/hexrd/core/fitting/calibration/__init__.py b/hexrd/core/fitting/calibration/__init__.py index 47f419cc3..74c112ee5 100644 --- a/hexrd/core/fitting/calibration/__init__.py +++ b/hexrd/core/fitting/calibration/__init__.py @@ -1,9 +1,17 @@ # TODO: Resolve extra-core dependencies -from ....powder.fitting.calibration.instrument import InstrumentCalibrator -from ....laue.fitting.calibration.laue import LaueCalibrator -from ....hedm.fitting.calibration.multigrain import calibrate_instrument_from_sx, generate_parameter_names -from ....powder.fitting.calibration.powder import PowderCalibrator -from ....powder.fitting.calibration.structureless import StructurelessCalibrator +# from ....powder.fitting.calibration.instrument import InstrumentCalibrator +# from ....laue.fitting.calibration.laue import LaueCalibrator +# from ....hedm.fitting.calibration.multigrain import calibrate_instrument_from_sx, generate_parameter_names +# from ....powder.fitting.calibration.powder import PowderCalibrator +# from ....powder.fitting.calibration.structureless import StructurelessCalibrator + +# These were temporarily copied over from the above imports +from .instrument import InstrumentCalibrator +from .powder import PowderCalibrator +from .structureless import StructurelessCalibrator +from .multigrain import calibrate_instrument_from_sx, generate_parameter_names +from .laue import LaueCalibrator + # For backward-compatibility, since it used to be named this: StructureLessCalibrator = StructurelessCalibrator diff --git a/hexrd/core/fitting/calibration/instrument.py b/hexrd/core/fitting/calibration/instrument.py index 6d4b5f111..a413538d6 100644 --- a/hexrd/core/fitting/calibration/instrument.py +++ b/hexrd/core/fitting/calibration/instrument.py @@ -5,7 +5,7 @@ import numpy as np from .lmfit_param_handling import add_engineering_constraints, create_instr_params, DEFAULT_EULER_CONVENTION, update_instrument_from_params, validate_params_list -from ....core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType +from hexrd.core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType logger = logging.getLogger() logger.setLevel('INFO') diff --git a/hexrd/core/fitting/calibration/laue.py b/hexrd/core/fitting/calibration/laue.py index d775119ba..aa5878298 100644 --- a/hexrd/core/fitting/calibration/laue.py +++ b/hexrd/core/fitting/calibration/laue.py @@ -17,8 +17,8 @@ from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl # TODO: Resolve extra-workflow-dependency -from ....powder.fitting.calibration.calibrator import Calibrator -from ....powder.fitting.calibration.lmfit_param_handling import create_grain_params, DEFAULT_EULER_CONVENTION, rename_to_avoid_collision +from hexrd.powder.fitting.calibration.calibrator import Calibrator +from hexrd.powder.fitting.calibration.lmfit_param_handling import create_grain_params, DEFAULT_EULER_CONVENTION, rename_to_avoid_collision class LaueCalibrator(Calibrator): diff --git a/hexrd/core/fitting/calibration/lmfit_param_handling.py b/hexrd/core/fitting/calibration/lmfit_param_handling.py index 98eeb71fc..9a3f95964 100644 --- a/hexrd/core/fitting/calibration/lmfit_param_handling.py +++ b/hexrd/core/fitting/calibration/lmfit_param_handling.py @@ -7,6 +7,7 @@ from hexrd.core.rotations import angleAxisOfRotMat, expMapOfQuat, make_rmat_euler, quatOfRotMat, RotMatEuler, rotMatOfExpMap from hexrd.core.material.unitcell import _lpname from .relative_constraints import RelativeConstraints, RelativeConstraintsType +from hexrd.core.fitting.calibration.relative_constraints import RelativeConstraints, RelativeConstraintsType # First is the axes_order, second is extrinsic diff --git a/hexrd/core/fitting/calibration/structureless.py b/hexrd/core/fitting/calibration/structureless.py index 4771f976a..2ceafeaa0 100644 --- a/hexrd/core/fitting/calibration/structureless.py +++ b/hexrd/core/fitting/calibration/structureless.py @@ -7,7 +7,7 @@ from hexrd.core.instrument import switch_xray_source from .lmfit_param_handling import add_engineering_constraints, create_instr_params, create_tth_parameters, DEFAULT_EULER_CONVENTION, tth_parameter_prefixes, update_instrument_from_params -from ....core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType +from hexrd.core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType class StructurelessCalibrator: diff --git a/hexrd/hed/instrument/hedm_instrument.py b/hexrd/hed/instrument/hedm_instrument.py index 1c85154d1..6671d2755 100644 --- a/hexrd/hed/instrument/hedm_instrument.py +++ b/hexrd/hed/instrument/hedm_instrument.py @@ -73,9 +73,9 @@ # TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import LeBail -from ...core.instrument.cylindrical_detector import CylindricalDetector -from ...core.instrument.detector import beam_energy_DFLT, max_workers_DFLT -from ...core.instrument.planar_detector import PlanarDetector +from hexrd.core.instrument.cylindrical_detector import CylindricalDetector +from hexrd.core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from hexrd.core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise diff --git a/hexrd/hedm/config/__init__.py b/hexrd/hedm/config/__init__.py index 075f33989..c46374847 100644 --- a/hexrd/hedm/config/__init__.py +++ b/hexrd/hedm/config/__init__.py @@ -5,6 +5,11 @@ from . import root from . import utils + +# The following were moved to core +from hexrd.core.config import config +from hexrd.core.config import material + """ Note that we need to use the open() builtin in what was formerly the "open()" function. So we define the _open(), and then redefine open() to the new diff --git a/hexrd/hedm/config/findorientations.py b/hexrd/hedm/config/findorientations.py index 212e4d6d6..09a78a315 100644 --- a/hexrd/hedm/config/findorientations.py +++ b/hexrd/hedm/config/findorientations.py @@ -4,7 +4,7 @@ import numpy as np -from ...core.config.config import Config +from hexrd.core.config.config import Config logger = logging.getLogger('hexrd.config') diff --git a/hexrd/hedm/config/fitgrains.py b/hexrd/hedm/config/fitgrains.py index 22b04a067..7755ec0ca 100644 --- a/hexrd/hedm/config/fitgrains.py +++ b/hexrd/hedm/config/fitgrains.py @@ -1,7 +1,7 @@ import logging import os -from ...core.config.config import Config +from hexrd.core.config.config import Config from .utils import get_exclusion_parameters diff --git a/hexrd/hedm/config/instrument.py b/hexrd/hedm/config/instrument.py index f284fb044..651e66341 100644 --- a/hexrd/hedm/config/instrument.py +++ b/hexrd/hedm/config/instrument.py @@ -1,7 +1,7 @@ import h5py import yaml -from ...core.config.config import Config +from hexrd.core.config.config import Config from .loader import NumPyIncludeLoader from hexrd.core import instrument diff --git a/hexrd/hedm/config/root.py b/hexrd/hedm/config/root.py index cfac10e7c..3f8c17d50 100644 --- a/hexrd/hedm/config/root.py +++ b/hexrd/hedm/config/root.py @@ -6,11 +6,11 @@ from hexrd.core.constants import shared_ims_key from hexrd.core import imageseries -from ...core.config.config import Config +from hexrd.core.config.config import Config from .instrument import Instrument from .findorientations import FindOrientationsConfig from .fitgrains import FitGrainsConfig -from ...core.config.material import MaterialConfig +from hexrd.core.config.material import MaterialConfig logger = logging.getLogger('hexrd.config') diff --git a/hexrd/hedm/instrument/hedm_instrument.py b/hexrd/hedm/instrument/hedm_instrument.py index 48237211b..d2e95f7f9 100644 --- a/hexrd/hedm/instrument/hedm_instrument.py +++ b/hexrd/hedm/instrument/hedm_instrument.py @@ -72,9 +72,9 @@ # TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import LeBail -from ...core.instrument.cylindrical_detector import CylindricalDetector -from ...core.instrument.detector import beam_energy_DFLT, max_workers_DFLT -from ...core.instrument.planar_detector import PlanarDetector +from hexrd.core.instrument.cylindrical_detector import CylindricalDetector +from hexrd.core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from hexrd.core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise diff --git a/hexrd/hedm/preprocess/argument_classes_factory.py b/hexrd/hedm/preprocess/argument_classes_factory.py index b188bd9f5..727d29ae8 100644 --- a/hexrd/hedm/preprocess/argument_classes_factory.py +++ b/hexrd/hedm/preprocess/argument_classes_factory.py @@ -1,5 +1,7 @@ -import hexrd.hedm.preprocess.profiles as profiles -from typing import Type +from typing import Type, TYPE_CHECKING + +if TYPE_CHECKING: + import hexrd.hedm.preprocess.profiles as profiles class ArgumentClassesFactory: diff --git a/hexrd/laue/instrument/hedm_instrument.py b/hexrd/laue/instrument/hedm_instrument.py index 5eb4dda47..cefdd4ae3 100644 --- a/hexrd/laue/instrument/hedm_instrument.py +++ b/hexrd/laue/instrument/hedm_instrument.py @@ -73,9 +73,9 @@ # TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import LeBail -from ...core.instrument.cylindrical_detector import CylindricalDetector -from ...core.instrument.detector import beam_energy_DFLT, max_workers_DFLT -from ...core.instrument.planar_detector import PlanarDetector +from hexrd.core.instrument.cylindrical_detector import CylindricalDetector +from hexrd.core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from hexrd.core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise diff --git a/hexrd/module_map.py b/hexrd/module_map.py new file mode 100644 index 000000000..4f1e92b01 --- /dev/null +++ b/hexrd/module_map.py @@ -0,0 +1,152 @@ +# The following dynamically generates aliases for the remapped modules based +# on the file_map +import pickle +import importlib +import importlib.util +import importlib.abc +import importlib.machinery +import sys +from pathlib import Path +from collections import defaultdict + + +def path_to_module(path: Path) -> str: + """ + Convert a path to a module name. + + + e.g. + * "package_remapper/remapper.py" -> "package_remapper.remapper" + * "package_remapper/__init__.py" -> "package_remapper" + + """ + if path.suffix not in (".py", ""): + raise ValueError(f"Expected a .py file, got {path}") + + path = path.with_suffix("") + if path.parts[-1] == "__init__": + path = path.parent + return path.as_posix().replace("/", ".") + + +HEXRD_PACKAGE_PATH = Path(__file__).parent.parent +file_map: dict[Path, list[Path]] = defaultdict(list) +with open(HEXRD_PACKAGE_PATH / "file_table.tsv", "r") as f: + for line in f: + if not line.strip(): + continue + kv = line.strip().split() + if len(kv) != 2: + continue + k, v = line.strip().split() + file_map[Path(k)].append(Path(v)) + +module_map: dict[str, tuple[str, Path]] = {} + +for old_path, new_paths in file_map.items(): + if old_path.suffix not in ("", ".py") or not "hexrd" in old_path.parts: + continue + old_module_path = path_to_module(old_path) + # TODO: This just picks one. We should probably pick the right one? We should know the right one after + # We finish the refactor. + module_map[old_module_path] = ( + path_to_module(new_paths[0]), + HEXRD_PACKAGE_PATH / new_paths[0], + ) + + +class ModuleAlias: + def __init__(self, current_path: list[str]): + self.current_path = current_path + + def __getattr__(self, name): + full_path = self.current_path + [name] + full_name = ".".join(full_path) + if full_name in module_map: + module, _fp = module_map[full_name] + if isinstance(module, ModuleAlias): + return module + else: + return importlib.import_module(module) + current_module = ".".join(self.current_path) + raise AttributeError( + f"Module `{current_module}` has no attribute {name}" + ) + + +flattened_module_map: dict[str, ModuleAlias | str] = {} + +for key, (mapped_module, _mapped_fp) in module_map.items(): + parts = mapped_module.split(".") + for i in range(len(parts) - 1): + module = ".".join(parts[: i + 1]) + if module not in flattened_module_map: + flattened_module_map[module] = ModuleAlias(parts[:i]) + flattened_module_map[key] = mapped_module + + +def get(alias: str) -> ModuleAlias | str | None: + """ + Returns the the module or an alias to it if it exists. + """ + if alias in flattened_module_map: + return flattened_module_map[alias] + return None + + + +class ModuleSpecWithParent(importlib.machinery.ModuleSpec): + def __init__(self, name, loader, *, origin=None, parent=None, is_package=False): + super().__init__(name, loader, origin=origin, is_package=is_package) + self._parent = parent + + @property + def parent(self): + return self._parent +class ModuleAliasFinder(importlib.abc.MetaPathFinder): + def find_spec(self, fullname, path, target=None): + if fullname in module_map: + mapped_module, mapped_fp = module_map[fullname] + + if mapped_fp.name != "__init__.py": + parent = mapped_module.rsplit(".", 1)[0] + else: + parent = mapped_module + + + # Need to set these to be the exact same module so that class comparison + # works correctly if you are comparing classes that are imported one way with classes + # that are imported the mapped way. + sys.modules[fullname] = importlib.import_module(mapped_module) + + # We have to totally change the structure of the package, so we need a custom submodule for ModuleSpec + # ModuleSpec.parent is used for relative imports. + if mapped_fp.is_file(): + spec = ModuleSpecWithParent( + mapped_module, + importlib.machinery.SourceFileLoader( + mapped_module, mapped_fp.as_posix() + ), + origin=mapped_fp.as_posix(), + parent=parent, + is_package=mapped_fp.name == "__init__.py", + ) + # Need to set this, since ModuleSpec doesn't by defualt. + # This tells importlib to set __file__, which is used by a few things in here. + spec.has_location = True + else: + spec = ModuleSpecWithParent( + mapped_module, + importlib.machinery.NamespaceLoader( + mapped_module, + list(mapped_fp.parts), + path_finder=importlib.machinery.PathFinder.find_spec, # type: ignore + ), + parent=parent, + is_package=True + ) + return spec + return None + + +sys.meta_path.append(ModuleAliasFinder()) diff --git a/hexrd/powder/instrument/hedm_instrument.py b/hexrd/powder/instrument/hedm_instrument.py index 149fb0adc..373114e1b 100644 --- a/hexrd/powder/instrument/hedm_instrument.py +++ b/hexrd/powder/instrument/hedm_instrument.py @@ -72,9 +72,9 @@ from hexrd.core.valunits import valWUnit from hexrd.powder.wppf import LeBail -from ...core.instrument.cylindrical_detector import CylindricalDetector -from ...core.instrument.detector import beam_energy_DFLT, max_workers_DFLT -from ...core.instrument.planar_detector import PlanarDetector +from hexrd.core.instrument.cylindrical_detector import CylindricalDetector +from hexrd.core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from hexrd.core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise diff --git a/setup.py b/setup.py index 23a4788c4..96722c297 100644 --- a/setup.py +++ b/setup.py @@ -60,7 +60,7 @@ def get_convolution_extensions(): # Add '-Rpass-missed=.*' to ``extra_compile_args`` when compiling with # clang to report missed optimizations _convolve_ext = Extension( - name='hexrd.core/convolution._convolve', + name='hexrd.core.convolution._convolve', sources=src_files, extra_compile_args=extra_compile_args, include_dirs=[numpy.get_include()], diff --git a/tests/test_preprocess.py b/tests/test_preprocess.py index 0f01f27e8..a03ad5e6d 100644 --- a/tests/test_preprocess.py +++ b/tests/test_preprocess.py @@ -1,4 +1,4 @@ -from hexrd.preprocess.profiles import ( +from hexrd.hedm.preprocess.profiles import ( Eiger_Arguments, Dexelas_Arguments, HexrdPPScript_Arguments, From c5fd696e48791bdbe69c2231a699ed6957504cdd Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Wed, 12 Feb 2025 13:00:32 -0500 Subject: [PATCH 08/19] black reformatting Also copied over the detector.py --- file_table.tsv | 1 + hexrd/__init__.py | 1 - hexrd/copyright.py | 24 +- hexrd/core/config/config.py | 14 +- hexrd/core/config/dumper.py | 8 +- hexrd/core/config/imageseries.py | 2 +- hexrd/core/config/instrument.py | 4 +- hexrd/core/config/material.py | 8 +- hexrd/core/config/root.py | 30 +- hexrd/core/config/utils.py | 39 +- hexrd/core/constants.py | 4499 ++++++++++++++--- hexrd/core/convolution/__init__.py | 1 - hexrd/core/convolution/convolve.py | 232 +- hexrd/core/convolution/utils.py | 3 +- hexrd/core/deprecation.py | 1 + hexrd/core/distortion/__init__.py | 1 + hexrd/core/distortion/dexela_2923.py | 21 +- hexrd/core/distortion/ge_41rt.py | 1 + hexrd/core/distortion/identity.py | 1 + hexrd/core/distortion/nyi.py | 1 + hexrd/core/distortion/registry.py | 2 + hexrd/core/extensions/__init__.py | 2 +- .../fitting/calibration/abstract_grain.py | 25 +- hexrd/core/fitting/calibration/instrument.py | 52 +- hexrd/core/fitting/calibration/laue.py | 276 +- .../calibration/lmfit_param_handling.py | 230 +- hexrd/core/fitting/calibration/multigrain.py | 206 +- hexrd/core/fitting/calibration/powder.py | 93 +- .../calibration/relative_constraints.py | 7 +- .../core/fitting/calibration/structureless.py | 72 +- hexrd/core/fitting/fitpeak.py | 254 +- hexrd/core/fitting/peakfunctions.py | 246 +- hexrd/core/fitting/spectrum.py | 182 +- hexrd/core/fitting/utils.py | 160 +- hexrd/core/gridutil.py | 68 +- hexrd/core/imageseries/__init__.py | 3 + hexrd/core/imageseries/baseclass.py | 4 +- hexrd/core/imageseries/imageseriesabc.py | 1 + hexrd/core/imageseries/imageseriesiter.py | 1 + hexrd/core/imageseries/load/__init__.py | 24 +- hexrd/core/imageseries/load/array.py | 11 +- .../core/imageseries/load/eiger_stream_v1.py | 9 +- hexrd/core/imageseries/load/framecache.py | 55 +- hexrd/core/imageseries/load/function.py | 2 + hexrd/core/imageseries/load/hdf5.py | 10 +- hexrd/core/imageseries/load/imagefiles.py | 57 +- hexrd/core/imageseries/load/metadata.py | 24 +- hexrd/core/imageseries/load/rawimage.py | 9 +- hexrd/core/imageseries/load/registry.py | 6 +- hexrd/core/imageseries/load/trivial.py | 2 + hexrd/core/imageseries/omega.py | 30 +- hexrd/core/imageseries/process.py | 13 +- hexrd/core/imageseries/stats.py | 20 +- hexrd/core/imageutil.py | 75 +- hexrd/core/instrument/__init__.py | 13 +- hexrd/core/instrument/constants.py | 41 +- hexrd/core/instrument/cylindrical_detector.py | 260 +- hexrd/core/instrument/detector.py | 280 +- hexrd/core/instrument/detector_coatings.py | 46 +- hexrd/core/instrument/hedm_instrument.py | 1027 ++-- hexrd/core/instrument/physics_package.py | 55 +- hexrd/core/instrument/planar_detector.py | 208 +- hexrd/core/material/__init__.py | 8 +- hexrd/core/material/crystallography.py | 19 +- hexrd/core/material/jcpds.py | 85 +- hexrd/core/material/material.py | 50 +- hexrd/core/material/mksupport.py | 249 +- hexrd/core/material/spacegroup.py | 529 +- hexrd/core/material/symbols.py | 104 +- hexrd/core/material/symmetry.py | 99 +- hexrd/core/material/unitcell.py | 592 ++- hexrd/core/material/utils.py | 109 +- hexrd/core/matrixutil.py | 140 +- hexrd/core/projections/spherical.py | 113 +- hexrd/core/rotations.py | 72 +- hexrd/core/transforms/__init__.py | 25 +- hexrd/core/transforms/new_capi/reference.py | 3 +- hexrd/core/transforms/old_xfcapi.py | 128 +- hexrd/core/transforms/xf.py | 7 +- hexrd/core/transforms/xfcapi.py | 39 +- hexrd/core/utils/decorators.py | 3 +- hexrd/core/utils/hdf5.py | 8 +- hexrd/core/utils/json.py | 4 +- hexrd/core/utils/multiprocess_generic.py | 23 +- hexrd/core/utils/panel_buffer.py | 16 +- hexrd/core/utils/profiler.py | 22 +- hexrd/core/utils/progressbar.py | 3 +- hexrd/core/utils/yaml.py | 1 + hexrd/core/valunits.py | 83 +- hexrd/hed/instrument/__init__.py | 13 + hexrd/hed/instrument/detector.py | 280 +- hexrd/hed/instrument/hedm_instrument.py | 1032 ++-- hexrd/hed/xrdutil/phutil.py | 333 +- hexrd/hed/xrdutil/utils.py | 28 +- hexrd/hedm/cli/__init__.py | 1 - hexrd/hedm/cli/documentation.py | 1 + hexrd/hedm/cli/find_orientations.py | 79 +- hexrd/hedm/cli/fit_grains.py | 123 +- hexrd/hedm/cli/help.py | 17 +- hexrd/hedm/cli/pickle23.py | 14 +- hexrd/hedm/cli/test.py | 13 +- hexrd/hedm/config/dumper.py | 8 +- hexrd/hedm/config/findorientations.py | 93 +- hexrd/hedm/config/fitgrains.py | 9 +- hexrd/hedm/config/instrument.py | 4 +- hexrd/hedm/config/root.py | 29 +- hexrd/hedm/config/utils.py | 39 +- hexrd/hedm/findorientations.py | 264 +- hexrd/hedm/fitgrains.py | 176 +- hexrd/hedm/fitting/calibration/grain.py | 74 +- hexrd/hedm/fitting/calibration/multigrain.py | 206 +- hexrd/hedm/fitting/grains.py | 154 +- hexrd/hedm/grainmap/__init__.py | 25 +- hexrd/hedm/grainmap/nfutil.py | 1081 ++-- hexrd/hedm/grainmap/tomoutil.py | 341 +- hexrd/hedm/grainmap/vtkutil.py | 220 +- hexrd/hedm/instrument/__init__.py | 13 + hexrd/hedm/instrument/detector.py | 287 +- hexrd/hedm/instrument/hedm_instrument.py | 1043 ++-- hexrd/hedm/instrument/physics_package.py | 55 +- hexrd/hedm/ipfcolor/colorspace.py | 56 +- hexrd/hedm/ipfcolor/sphere_sector.py | 206 +- hexrd/hedm/material/crystallography.py | 19 +- hexrd/hedm/material/unitcell.py | 591 ++- hexrd/hedm/preprocess/preprocessors.py | 6 +- hexrd/hedm/preprocess/profiles.py | 5 +- hexrd/hedm/sampleOrientations/conversions.py | 48 +- hexrd/hedm/sampleOrientations/rfz.py | 61 +- hexrd/hedm/sampleOrientations/sampleRFZ.py | 55 +- hexrd/hedm/xrdutil/utils.py | 27 +- hexrd/laue/fitting/calibration/laue.py | 278 +- hexrd/laue/instrument/__init__.py | 13 + hexrd/laue/instrument/detector.py | 285 +- hexrd/laue/instrument/hedm_instrument.py | 1031 ++-- hexrd/laue/material/crystallography.py | 19 +- hexrd/laue/xrdutil/utils.py | 27 +- hexrd/module_map.py | 18 +- .../powder/fitting/calibration/instrument.py | 50 +- hexrd/powder/fitting/calibration/powder.py | 93 +- .../fitting/calibration/structureless.py | 72 +- hexrd/powder/instrument/__init__.py | 13 + hexrd/powder/instrument/detector.py | 284 +- hexrd/powder/instrument/hedm_instrument.py | 1027 ++-- hexrd/powder/material/crystallography.py | 19 +- hexrd/powder/wppf/LeBailCalibration.py | 738 +-- hexrd/powder/wppf/WPPF.py | 212 +- hexrd/powder/wppf/parameters.py | 128 +- hexrd/powder/wppf/phase.py | 784 +-- hexrd/powder/wppf/spectrum.py | 90 +- hexrd/powder/wppf/texture.py | 608 ++- hexrd/powder/wppf/wppfsupport.py | 583 +-- hexrd/powder/wppf/xtal.py | 38 +- scripts/install/install_build_dependencies.py | 27 +- setup.py | 2 +- tests/calibration/test_2xrs_calibration.py | 8 +- tests/calibration/test_calibration.py | 18 +- .../test_group_relative_constraints.py | 4 +- tests/calibration/test_hedm_calibration.py | 15 +- .../test_instrument_relative_constraints.py | 9 +- tests/calibration/test_powder_auto_pick.py | 6 +- tests/config/common.py | 6 +- tests/config/test_find_orientations.py | 248 +- tests/config/test_fit_grains.py | 74 +- tests/config/test_image_series.py | 15 +- tests/config/test_instrument.py | 83 +- tests/config/test_material.py | 21 +- tests/config/test_root.py | 30 +- tests/find_orientations_testing.py | 206 +- tests/fit_grains_check.py | 80 +- tests/imageseries/common.py | 50 +- tests/imageseries/test_formats.py | 78 +- tests/imageseries/test_omega.py | 33 +- tests/imageseries/test_process.py | 14 +- tests/imageseries/test_stats.py | 24 +- tests/planedata/test_init.py | 12 +- tests/planedata/test_with_data.py | 10 +- tests/test_absorption_correction.py | 3 +- tests/test_find_orientations.py | 36 +- tests/test_fit-grains.py | 30 +- tests/test_graindata.py | 17 +- tests/test_material.py | 34 +- tests/test_matrix_utils.py | 18 +- tests/test_polar_view.py | 18 +- tests/test_powder.py | 7 +- tests/test_rotations.py | 18 +- tests/test_snip.py | 3 +- tests/test_utils_json.py | 18 +- tests/test_utils_yaml.py | 26 +- tests/test_wppf.py | 5 +- tests/transforms/common.py | 20 +- .../test_angles_to_dvec_from_file.py | 6 +- .../test_angles_to_gvec_from_file.py | 6 +- tests/transforms/test_gvec_to_xy_from_file.py | 22 +- .../test_make_beam_rmat_from_file.py | 9 +- tests/transforms/test_make_binary_rmat.py | 8 +- .../test_make_detector_rmat_from_file.py | 7 +- .../test_make_rmat_of_expmap_from_file.py | 7 +- .../test_make_sample_rmat_from_file.py | 9 +- .../test_quat_distance_from_file.py | 11 +- tests/transforms/test_xy_to_gvec_from_file.py | 6 +- 200 files changed, 16895 insertions(+), 9648 deletions(-) create mode 100644 hexrd/hed/instrument/__init__.py create mode 100644 hexrd/hedm/instrument/__init__.py create mode 100644 hexrd/laue/instrument/__init__.py create mode 100644 hexrd/powder/instrument/__init__.py diff --git a/file_table.tsv b/file_table.tsv index c9da106c7..9e1e39e4f 100644 --- a/file_table.tsv +++ b/file_table.tsv @@ -524,6 +524,7 @@ hexrd\preprocess\argument_classes_factory.py hexrd\hedm\preprocess\argument_clas hexrd\preprocess\preprocessors.py hexrd\hedm\preprocess\preprocessors.py hexrd\preprocess\profiles.py hexrd\hedm\preprocess\profiles.py hexrd\preprocess\yaml_internals.py hexrd\hedm\preprocess\yaml_internals.py +hexrd\preprocess\__init__.py hexrd\hedm\preprocess\__init__.py tests\test_preprocess.py tests\test_preprocess.py tests\test_polar_view.py tests\test_polar_view.py hexrd\transforms\new_capi hexrd\core\transforms\new_capi diff --git a/hexrd/__init__.py b/hexrd/__init__.py index 2658d6793..e8812182f 100644 --- a/hexrd/__init__.py +++ b/hexrd/__init__.py @@ -45,4 +45,3 @@ def __getattr__(name): return importlib.import_module(module) return module raise AttributeError(f"Module `hexrd` has no attribute {name}") - \ No newline at end of file diff --git a/hexrd/copyright.py b/hexrd/copyright.py index b61b3640b..cdf8910d0 100644 --- a/hexrd/copyright.py +++ b/hexrd/copyright.py @@ -1,24 +1,24 @@ # ============================================================ -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. # All rights reserved. -# +# # This file is part of HEXRD. For details on dowloading the source, # see the file COPYING. -# +# # Please also see the file LICENSE. -# +# # This program is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License (as published by the Free Software # Foundation) version 2.1 dated February 1999. -# +# # This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, @@ -36,4 +36,4 @@ COPYRIGHT_FILE = 'COPYING' with open(os.path.join(os.path.dirname(__file__), COPYRIGHT_FILE), 'r') as f: - COPYRIGHT_TEXT = f.read() + COPYRIGHT_TEXT = f.read() diff --git a/hexrd/core/config/config.py b/hexrd/core/config/config.py index 1b5a3683a..4645d5c5e 100644 --- a/hexrd/core/config/config.py +++ b/hexrd/core/config/config.py @@ -7,6 +7,7 @@ logger = logging.getLogger('hexrd.config') + class Config(object): """Access a level of the YAML configuration file @@ -15,6 +16,7 @@ class Config(object): cfg: Config instance or a (pyyaml) YAMLObject config representings a level of the YAML input """ + _dirty = False def __init__(self, cfg): @@ -41,14 +43,12 @@ def get(self, key, default=null): res = temp[item] except KeyError: if default is not null: - logger.info( - '%s not specified, defaulting to %s', key, default - ) + logger.info('%s not specified, defaulting to %s', key, default) res = temp.get(item, default) else: raise RuntimeError( '%s must be specified in configuration file' % key - ) + ) return res def set(self, key, val): @@ -80,12 +80,10 @@ def check_filename(fname, wdir): If fname is an absolute path, use that; otherwise take it as a path relative to the working directory. -""" + """ temp = fname if not os.path.isabs(fname): temp = os.path.join(wdir, temp) if os.path.exists(temp): return temp - raise IOError( - 'file: "%s" not found' % temp - ) + raise IOError('file: "%s" not found' % temp) diff --git a/hexrd/core/config/dumper.py b/hexrd/core/config/dumper.py index 92d3596da..4067ed93c 100644 --- a/hexrd/core/config/dumper.py +++ b/hexrd/core/config/dumper.py @@ -8,7 +8,7 @@ def _dict_path_by_id(d, value, path=()): return path elif isinstance(d, dict): for k, v in d.items(): - p = _dict_path_by_id(v, value, path + (k, )) + p = _dict_path_by_id(v, value, path + (k,)) if p is not None: return p elif isinstance(d, list): @@ -32,6 +32,7 @@ class NumPyIncludeDumper(yaml.Dumper): The ndarray would be saved in foo/bar.npy. """ + def __init__(self, stream, **kwargs): super().__init__(stream, **kwargs) @@ -58,5 +59,6 @@ def represent(self, data): return super().represent(data) -NumPyIncludeDumper.add_representer(np.ndarray, - NumPyIncludeDumper.ndarray_representer) +NumPyIncludeDumper.add_representer( + np.ndarray, NumPyIncludeDumper.ndarray_representer +) diff --git a/hexrd/core/config/imageseries.py b/hexrd/core/config/imageseries.py index 0a133399c..eadae8210 100644 --- a/hexrd/core/config/imageseries.py +++ b/hexrd/core/config/imageseries.py @@ -34,7 +34,7 @@ def imageseries(self): panel = '_'.join(panel) elif panel is None: panel = shared_ims_key - except(KeyError): + except KeyError: panel = shared_ims_key self._image_dict[panel] = oms diff --git a/hexrd/core/config/instrument.py b/hexrd/core/config/instrument.py index 31dbbb310..f284641df 100644 --- a/hexrd/core/config/instrument.py +++ b/hexrd/core/config/instrument.py @@ -31,7 +31,7 @@ def hedm(self): try: icfg = h5py.File(self.configuration, 'r') - except(OSError): + except OSError: with open(self.configuration, 'r') as f: icfg = yaml.load(f, Loader=NumPyIncludeLoader) @@ -47,7 +47,7 @@ def hedm(self, icfg_fname): """Set the HEDMInstrument class.""" try: icfg = h5py.File(icfg_fname, 'r') - except(OSError): + except OSError: with open(icfg_fname, 'r') as f: icfg = yaml.load(f, Loader=NumPyIncludeLoader) diff --git a/hexrd/core/config/material.py b/hexrd/core/config/material.py index 100bc515f..bc84bd724 100644 --- a/hexrd/core/config/material.py +++ b/hexrd/core/config/material.py @@ -10,8 +10,8 @@ from .utils import get_exclusion_parameters -DMIN_DFLT = 0.5 # angstrom -TTHW_DFLT = 0.25 # degrees +DMIN_DFLT = 0.5 # angstrom +TTHW_DFLT = 0.25 # degrees class MaterialConfig(Config): @@ -30,9 +30,7 @@ def definitions(self): temp = os.path.join(self._cfg.working_dir, temp) if os.path.exists(temp): return temp - raise IOError( - f'"material:definitions": "{temp}" does not exist' - ) + raise IOError(f'"material:definitions": "{temp}" does not exist') @property def active(self): diff --git a/hexrd/core/config/root.py b/hexrd/core/config/root.py index f76c9c585..ab75fb3b2 100644 --- a/hexrd/core/config/root.py +++ b/hexrd/core/config/root.py @@ -8,6 +8,7 @@ from .config import Config from .instrument import Instrument + # TODO: Resolve extra-core-dependency from hexrd.hedm.config.findorientations import FindOrientationsConfig from hexrd.hedm.config.fitgrains import FitGrainsConfig @@ -68,8 +69,10 @@ def analysis_dir(self): @property def analysis_id(self): return '_'.join( - [self.analysis_name.strip().replace(' ', '-'), - self.material.active.strip().replace(' ', '-')] + [ + self.analysis_name.strip().replace(' ', '-'), + self.material.active.strip().replace(' ', '-'), + ] ) @property @@ -135,8 +138,9 @@ def multiprocessing(self): if multiproc > ncpus: logger.warning( 'Resuested %s processes, %d available', - multiproc, ncpus - ) + multiproc, + ncpus, + ) res = ncpus else: res = multiproc if multiproc else 1 @@ -145,17 +149,15 @@ def multiprocessing(self): if temp < 1: logger.warning( 'Cannot use less than 1 process, requested %d of %d', - temp, ncpus - ) + temp, + ncpus, + ) res = 1 else: res = temp else: temp = ncpus - 1 - logger.warning( - "Invalid value %s for multiprocessing", - multiproc - ) + logger.warning("Invalid value %s for multiprocessing", multiproc) res = temp return res @@ -164,13 +166,13 @@ def multiprocessing(self, val): isint = isinstance(val, int) if val in ('half', 'all', -1): self.set('multiprocessing', val) - elif (isint and val >= 0 and val <= mp.cpu_count()): + elif isint and val >= 0 and val <= mp.cpu_count(): self.set('multiprocessing', int(val)) else: raise RuntimeError( '"multiprocessing": must be 1:%d, got %s' % (mp.cpu_count(), val) - ) + ) @property def image_series(self): @@ -190,10 +192,10 @@ def image_series(self): panel = '_'.join(panel) elif panel is None: panel = shared_ims_key - except(KeyError): + except KeyError: try: panel = oms.metadata['panel'] - except(KeyError): + except KeyError: panel = shared_ims_key self._image_dict[panel] = oms diff --git a/hexrd/core/config/utils.py b/hexrd/core/config/utils.py index e31322f1b..4733a6efe 100644 --- a/hexrd/core/config/utils.py +++ b/hexrd/core/config/utils.py @@ -4,12 +4,21 @@ ExclusionParameters = namedtuple( - 'ExclusionParameters', ["dmin", "dmax", "tthmin", "tthmax", - "sfacmin", "sfacmax", "pintmin", "pintmax"] + 'ExclusionParameters', + [ + "dmin", + "dmax", + "tthmin", + "tthmax", + "sfacmin", + "sfacmax", + "pintmin", + "pintmax", + ], ) -class Null(): +class Null: pass @@ -52,22 +61,22 @@ def get_exclusion_parameters(cfg, prefix): if sfmin_dflt is not None: warnings.warn( '"min_sfac_ratio" is deprecated, use "sfacmin" instead', - DeprecationWarning + DeprecationWarning, ) # Default for reset_exclusions is True so that old config files will # produce the same behavior. - reset_exclusions= cfg.get(yaml_key("reset_exclusions"), True) + reset_exclusions = cfg.get(yaml_key("reset_exclusions"), True) - return( + return ( reset_exclusions, ExclusionParameters( - dmin = cfg.get(yaml_key("dmin"), None), - dmax = cfg.get(yaml_key("dmax"), None), - tthmin = cfg.get(yaml_key("tthmin"), None), - tthmax = cfg.get(yaml_key("tthmax"), None), - sfacmin = cfg.get(yaml_key("sfacmin"), sfmin_dflt), - sfacmax = cfg.get(yaml_key("sfacmax"), None), - pintmin = cfg.get(yaml_key("pintmin"), None), - pintmax = cfg.get(yaml_key("pintmax"), None), - ) + dmin=cfg.get(yaml_key("dmin"), None), + dmax=cfg.get(yaml_key("dmax"), None), + tthmin=cfg.get(yaml_key("tthmin"), None), + tthmax=cfg.get(yaml_key("tthmax"), None), + sfacmin=cfg.get(yaml_key("sfacmin"), sfmin_dflt), + sfacmax=cfg.get(yaml_key("sfacmax"), None), + pintmin=cfg.get(yaml_key("pintmin"), None), + pintmax=cfg.get(yaml_key("pintmax"), None), + ), ) diff --git a/hexrd/core/constants.py b/hexrd/core/constants.py index 608959be7..0f8bb13fa 100644 --- a/hexrd/core/constants.py +++ b/hexrd/core/constants.py @@ -25,6 +25,8 @@ # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA or visit . # ============================================================================= + +# fmt: off -- Don't try to format this, it has a lot of special formatting. from importlib.metadata import version, PackageNotFoundError import multiprocessing as mp import os @@ -45,38 +47,38 @@ # pi related pi = np.pi piby2 = 0.5 * pi -piby3 = pi / 3. +piby3 = pi / 3.0 piby4 = 0.25 * pi -piby6 = pi / 6. +piby6 = pi / 6.0 # misc radicals -sqrt2 = np.sqrt(2.) -sqrt3 = np.sqrt(3.) +sqrt2 = np.sqrt(2.0) +sqrt3 = np.sqrt(3.0) sqrt3by2 = 0.5 * sqrt3 # fwhm -sigma_to_fwhm = 2.*np.sqrt(2.*np.log(2.)) -fwhm_to_sigma = 1. / sigma_to_fwhm +sigma_to_fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0)) +fwhm_to_sigma = 1.0 / sigma_to_fwhm # tolerancing -epsf = np.finfo(float).eps # ~2.2e-16 -ten_epsf = 10 * epsf # ~2.2e-15 -sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 +epsf = np.finfo(float).eps # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 +sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 # for angles -period_dict = {'degrees': 360.0, 'radians': 2*pi} +period_dict = {'degrees': 360.0, 'radians': 2 * pi} angular_units = 'radians' # module-level angle units -d2r = pi / 180. -r2d = 180. / pi +d2r = pi / 180.0 +r2d = 180.0 / pi # identity arrays identity_3x3 = np.eye(3) # (3, 3) identity -identity_6x1 = np.r_[1., 1., 1., 0., 0., 0.] +identity_6x1 = np.r_[1.0, 1.0, 1.0, 0.0, 0.0, 0.0] # basis vectors -lab_x = np.r_[1., 0., 0.] # X in the lab frame -lab_y = np.r_[0., 1., 0.] # Y in the lab frame -lab_z = np.r_[0., 0., 1.] # Z in the lab frame +lab_x = np.r_[1.0, 0.0, 0.0] # X in the lab frame +lab_y = np.r_[0.0, 1.0, 0.0] # Y in the lab frame +lab_z = np.r_[0.0, 0.0, 1.0] # Z in the lab frame zeros_3 = np.zeros(3) zeros_3x1 = np.zeros((3, 1)) @@ -97,16 +99,16 @@ # !!!: if using Midas/Fable orientations, be aware that the crystal frame # is different as well! See hexrd.crystallography.latticeVectors. fable_to_hexrd_cob_rmat = np.array( - [[ 0., -1., 0.], - [ 0., 0., 1.], - [-1., 0., 0.]] + [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]] ) fable_to_hexrd_cob_qpm = np.array( - [[ 0.5, 0.5, -0.5, -0.5], - [-0.5, 0.5, -0.5, 0.5], - [ 0.5, 0.5, 0.5, 0.5], - [ 0.5, -0.5, -0.5, 0.5]] + [ + [0.5, 0.5, -0.5, -0.5], + [-0.5, 0.5, -0.5, 0.5], + [0.5, 0.5, 0.5, 0.5], + [0.5, -0.5, -0.5, 0.5], + ] ) # shared key for imageseries shared by multiple detectors (ROI's) @@ -124,43 +126,59 @@ approximations, vol 2 (1969) Elsevier """ -c_erf = np.array([0.254829592, --0.284496736, - 1.421413741, --1.453152027, - 1.061405429, - 0.3275911]).astype(np.float64) - -c_coeff_exp1exp = np.array([0.999999584, --0.249992399, -0.055514994, --0.010315766, -0.001535370, --0.000142164]).astype(np.complex128) - -cnum_exp1exp = np.array([1., -99., -3952., -82544., -979524., -6712860., -25815840., -51369120., -44339040., -10628640., -0.]).astype(np.complex128) - -cden_exp1exp = np.array([1., -100., -4050., -86400., -1058400., -7620480., -31752000., -72576000., -81648000., -36288000., -3628800.]).astype(np.complex128) +c_erf = np.array( + [ + 0.254829592, + -0.284496736, + 1.421413741, + -1.453152027, + 1.061405429, + 0.3275911, + ] +).astype(np.float64) + +c_coeff_exp1exp = np.array( + [ + 0.999999584, + -0.249992399, + 0.055514994, + -0.010315766, + 0.001535370, + -0.000142164, + ] +).astype(np.complex128) + +cnum_exp1exp = np.array( + [ + 1.0, + 99.0, + 3952.0, + 82544.0, + 979524.0, + 6712860.0, + 25815840.0, + 51369120.0, + 44339040.0, + 10628640.0, + 0.0, + ] +).astype(np.complex128) + +cden_exp1exp = np.array( + [ + 1.0, + 100.0, + 4050.0, + 86400.0, + 1058400.0, + 7620480.0, + 31752000.0, + 72576000.0, + 81648000.0, + 36288000.0, + 3628800.0, + ] +).astype(np.complex128) """ >> @AUTHOR: Saransh Singh, @@ -169,14 +187,78 @@ >> @DATE: 11/28/2022 SS 1.0 original >> @DETAILS: constants for rodrigues FZ """ -FZtypeArray = np.array([ -0,0,1,1,1,2,2,2,1,1,1,2,2,2,2,1,1,2, -2,2,1,1,1,2,2,2,2,3,3,4,3,4] +FZtypeArray = np.array( + [ + 0, + 0, + 1, + 1, + 1, + 2, + 2, + 2, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 1, + 1, + 2, + 2, + 2, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 3, + 3, + 4, + 3, + 4, + ] ) -FZorderArray = np.array([ -0,0,2,2,2,2,2,2,4,4,4,4,4,4,4,3,3,3, -3,3,6,6,6,6,6,6,6,0,0,0,0,0] +FZorderArray = np.array( + [ + 0, + 0, + 2, + 2, + 2, + 2, + 2, + 2, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 3, + 3, + 3, + 3, + 3, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 0, + 0, + 0, + 0, + 0, + ] ) ''' @@ -187,41 +269,67 @@ >> @DETAILS: constants for sphere sectors used for IPF coloring ''' # radius of homochoric sphere -hoR = (np.pi * 3. / 4.)**(1./3.) +hoR = (np.pi * 3.0 / 4.0) ** (1.0 / 3.0) # radius of homochoric sphere squared -hoR2 = (np.pi * 3. / 4.)**(2./3.) +hoR2 = (np.pi * 3.0 / 4.0) ** (2.0 / 3.0) # edge of cubochoric cube -cuA = np.pi**(2./3.) +cuA = np.pi ** (2.0 / 3.0) # semi-edge of cubochoric cube -cuA_2 = 0.5 * np.pi**(2./3.) +cuA_2 = 0.5 * np.pi ** (2.0 / 3.0) -Avol = (np.pi**(5./6.))/(6**(1./6.)) +Avol = (np.pi ** (5.0 / 6.0)) / (6 ** (1.0 / 6.0)) sc = Avol / cuA prek = 1.6434564029725040 -pref = np.sqrt(6.0/np.pi) - -tfit = np.array([ 0.9999999999999968E0, -0.49999999999986866E0, --0.025000000000632055E0, - 0.003928571496460683E0, --0.0008164666077062752E0, - 0.00019411896443261646E0, --0.00004985822229871769E0, - 0.000014164962366386031E0, --1.9000248160936107E-6, - 5.72184549898506E-6, -7.772149920658778E-6, - 0.00001053483452909705E0, -9.528014229335313E-6, - 5.660288876265125E-6, -1.2844901692764126E-6, 1.1255185726258763E-6, --1.3834391419956455E-6, 7.513691751164847E-7, --2.401996891720091E-7, 4.386887017466388E-8, --3.5917775353564864E-9 ]) - -BP = np.array([ -0., 1., 0.577350269189626, 0.414213562373095, -0., 0.267949192431123, 0., 0.198912367379658, -0., 0.158384440324536, 0., 0.131652497587396]) +pref = np.sqrt(6.0 / np.pi) + +tfit = np.array( + [ + 0.9999999999999968e0, + -0.49999999999986866e0, + -0.025000000000632055e0, + -0.003928571496460683e0, + -0.0008164666077062752e0, + -0.00019411896443261646e0, + -0.00004985822229871769e0, + -0.000014164962366386031e0, + -1.9000248160936107e-6, + -5.72184549898506e-6, + 7.772149920658778e-6, + -0.00001053483452909705e0, + 9.528014229335313e-6, + -5.660288876265125e-6, + 1.2844901692764126e-6, + 1.1255185726258763e-6, + -1.3834391419956455e-6, + 7.513691751164847e-7, + -2.401996891720091e-7, + 4.386887017466388e-8, + -3.5917775353564864e-9, + ] +) + +BP = np.array( + [ + 0.0, + 1.0, + 0.577350269189626, + 0.414213562373095, + 0.0, + 0.267949192431123, + 0.0, + 0.198912367379658, + 0.0, + 0.158384440324536, + 0.0, + 0.131652497587396, + ] +) # this is a constant which defines the sign of the # cross-product in the quaternion multiplication rule @@ -229,16 +337,16 @@ pjik = 1 # sqrt 2 - 1 -tp_8 = np.sqrt(2.) - 1. +tp_8 = np.sqrt(2.0) - 1.0 # 2 - sqrt(3) -tp_12 = 2. - np.sqrt(3.) +tp_12 = 2.0 - np.sqrt(3.0) # for energy/wavelength conversions def keVToAngstrom(x): - return (1e7*scipyc.c*scipyc.h/scipyc.e) / np.array(x, dtype=float) + return (1e7 * scipyc.c * scipyc.h / scipyc.e) / np.array(x, dtype=float) def _readenv(name, ctor, default): @@ -251,11 +359,15 @@ def _readenv(name, ctor, default): return ctor(res) except: import warnings - warnings.warn("environ %s defined but failed to parse '%s'" % - (name, res), RuntimeWarning) + + warnings.warn( + "environ %s defined but failed to parse '%s'" % (name, res), + RuntimeWarning, + ) del warnings return default + del _readenv @@ -297,11 +409,13 @@ def is_writable_file(path): return import appdirs + value = appdirs.user_data_dir('HEXRD') os.environ[key] = value # Must reload numba config from numba.core.config import reload_config + reload_config() @@ -310,18 +424,20 @@ def is_writable_file(path): # some physical constants -cAvogadro = 6.02214076E23 # Avogadro's constant Na -cBoltzmann = 1.380649E-23 # Boltzmann's constant, K -cCharge = 1.602176634E-19 # charge of electron -cJ2eV = 1.602176565E-19 # joule to ev, JperkeV*1e-3 -cLight = 299792458.0 # speed of light, same as c above but name is more descriptive -cMoment = 9.2740100707E-24 # magnetic moment of electron -cPermea = 1.2566370616E-6 # permeability of free space -cPermit = 8.8541878163E-12 # permittivity of free space -cPlanck = 6.62607015E-34 # same as h above but name is more descriptive -cRestmass = 9.1093837090E-31 # rest mass of electron +cAvogadro = 6.02214076e23 # Avogadro's constant Na +cBoltzmann = 1.380649e-23 # Boltzmann's constant, K +cCharge = 1.602176634e-19 # charge of electron +cJ2eV = 1.602176565e-19 # joule to ev, JperkeV*1e-3 +cLight = ( + 299792458.0 # speed of light, same as c above but name is more descriptive +) +cMoment = 9.2740100707e-24 # magnetic moment of electron +cPermea = 1.2566370616e-6 # permeability of free space +cPermit = 8.8541878163e-12 # permittivity of free space +cPlanck = 6.62607015e-34 # same as h above but name is more descriptive +cRestmass = 9.1093837090e-31 # rest mass of electron cClassicalelectronRad = 2.8179403e-6 # classical electron radius in nm -cRestmasskeV = 510.99895069 # rest mass of electron in keV +cRestmasskeV = 510.99895069 # rest mass of electron in keV ''' adding another parametrization of the @@ -334,218 +450,2750 @@ def is_writable_file(path): Acta Cryst. (1995). A51,416-431 ''' scatfac = { - 'H': [0.413048, 0.294953, 0.187491, 0.080701, 0.023736, 4.9e-05, 15.569946, 32.398468, 5.711404, 61.889874, 1.334118], - 'H1-': [0.70226, 0.763666, 0.248678, 0.261323, 0.023017, 0.000425, 23.945604, 74.897919, 6.773289, 233.58345, 1.337531], - 'He': [0.732354, 0.753896, 0.283819, 0.190003, 0.039139, 0.000487, 11.553918, 4.595831, 1.546299, 26.463964, 0.377523], - 'Li': [0.974637, 0.158472, 0.811855, 0.262416, 0.790108, 0.002542, 4.334946, 0.342451, 97.102966, 201.363831, 1.409234], - 'Li1+': [0.432724, 0.549257, 0.376575, -0.336481, 0.97606, 0.001764, 0.260367, 1.042836, 7.885294, 0.260368, 3.042539], - 'Be': [1.533712, 0.638283, 0.601052, 0.106139, 1.118414, 0.002511, 42.662079, 0.59542, 99.106499, 0.15134, 1.843093], - 'Be2+': [3.05543, -2.372617, 1.044914, 0.544233, 0.381737, -0.653773, 0.001226, 0.001227, 1.542106, 0.456279, 4.047479], - 'B': [2.085185, 1.06458, 1.062788, 0.140515, 0.641784, 0.003823, 23.494068, 1.137894, 61.238976, 0.114886, 0.399036], - 'C': [2.657506, 1.078079, 1.490909, -4.24107, 0.713791, 4.297983, 14.780758, 0.776775, 42.086842, -0.000294, 0.239535], - 'Cval': [1.258489, 0.728215, 1.119856, 2.168133, 0.705239, 0.019722, 10.683769, 0.208177, 0.836097, 24.603704, 58.954273], - 'N': [11.89378, 3.277479, 1.858092, 0.858927, 0.912985, -11.804902, 0.000158, 10.232723, 30.34469, 0.656065, 0.217287], - 'O': [2.960427, 2.508818, 0.637853, 0.722838, 1.142756, 0.027014, 14.182259, 5.936858, 0.112726, 34.958481, 0.39024], - 'O1-': [3.106934, 3.235142, 1.148886, 0.783981, 0.676953, 0.046136, 19.86808, 6.960252, 0.170043, 65.693512, 0.630757], - 'O2-': [3.990247, 2.300563, 0.6072, 1.907882, 1.16708, 0.025429, 16.639956, 5.636819, 0.108493, 47.299709, 0.379984], - 'F': [3.511943, 2.772244, 0.678385, 0.915159, 1.089261, 0.032557, 10.687859, 4.380466, 0.093982, 27.255203, 0.313066], - 'F1-': [0.457649, 3.841561, 1.432771, 0.801876, 3.395041, 0.069525, 0.917243, 5.507803, 0.164955, 51.076206, 15.821679], - 'Ne': [4.183749, 2.905726, 0.520513, 1.135641, 1.228065, 0.025576, 8.175457, 3.252536, 0.063295, 21.81391, 0.224952], - 'Na': [4.910127, 3.081783, 1.262067, 1.098938, 0.560991, 0.079712, 3.281434, 9.119178, 0.102763, 132.013947, 0.405878], - 'Na1+': [3.14869, 4.073989, 0.767888, 0.995612, 0.968249, 0.0453, 2.594987, 6.046925, 0.070139, 14.122657, 0.217037], - 'Mg': [4.708971, 1.194814, 1.558157, 1.170413, 3.239403, 0.126842, 4.875207, 108.506081, 0.111516, 48.292408, 1.928171], - 'Mg2+': [3.062918, 4.135106, 0.853742, 1.036792, 0.85252, 0.058851, 2.015803, 4.417941, 0.065307, 9.66971, 0.187818], - 'Al': [4.730796, 2.313951, 1.54198, 1.117564, 3.154754, 0.139509, 3.628931, 43.051167, 0.09596, 108.932388, 1.555918], - 'Al3+': [4.132015, 0.912049, 1.102425, 0.614876, 3.219136, 0.019397, 3.528641, 7.378344, 0.133708, 0.039065, 1.644728], - 'Si': [5.275329, 3.191038, 1.511514, 1.356849, 2.519114, 0.145073, 2.631338, 33.730728, 0.081119, 86.288643, 1.170087], - 'Siva': [2.879033, 3.07296, 1.515981, 1.39003, 4.995051, 0.14603, 1.239713, 38.706276, 0.081481, 93.616333, 2.770293], - 'Si4+': [3.676722, 3.828496, 1.258033, 0.419024, 0.720421, 0.097266, 1.446851, 3.013144, 0.064397, 0.206254, 5.970222], - 'P': [1.950541, 4.14693, 1.49456, 1.522042, 5.729711, 0.155233, 0.908139, 27.044952, 0.07128, 67.520187, 1.981173], - 'S': [6.372157, 5.154568, 1.473732, 1.635073, 1.209372, 0.154722, 1.514347, 22.092527, 0.061373, 55.445175, 0.646925], - 'Cl': [1.446071, 6.870609, 6.151801, 1.750347, 0.634168, 0.146773, 0.052357, 1.193165, 18.343416, 46.398396, 0.401005], - 'Cl1-': [1.061802, 7.139886, 6.524271, 2.355626, 35.829403, -34.916603, 0.144727, 1.171795, 19.467655, 60.320301, 0.000436], - 'Ar': [7.188004, 6.638454, 0.45418, 1.929593, 1.523654, 0.265954, 0.956221, 15.339877, 15.339862, 39.043823, 0.062409], - 'K': [8.163991, 7.146945, 1.07014, 0.877316, 1.486434, 0.253614, 12.816323, 0.808945, 210.327011, 39.597652, 0.052821], - 'K1+': [-17.609339, 1.494873, 7.150305, 10.899569, 15.808228, 0.257164, 18.840979, 0.053453, 0.81294, 22.264105, 14.351593], - 'Ca': [8.593655, 1.477324, 1.436254, 1.182839, 7.113258, 0.196255, 10.460644, 0.041891, 81.390381, 169.847839, 0.688098], - 'Ca2+': [8.501441, 12.880483, 9.765095, 7.156669, 0.71116, -21.013187, 10.525848, -0.004033, 0.010692, 0.684443, 27.231771], - 'Sc': [1.476566, 1.487278, 1.600187, 9.177463, 7.09975, 0.157765, 53.131023, 0.035325, 137.319489, 9.098031, 0.602102], - 'Sc3+': [7.104348, 1.511488, -53.669773, 38.404816, 24.53224, 0.118642, 0.601957, 0.033386, 12.572138, 10.859736, 14.12523], - 'Ti': [9.818524, 1.522646, 1.703101, 1.768774, 7.082555, 0.102473, 8.001879, 0.029763, 39.885422, 120.157997, 0.532405], - 'Ti2+': [7.040119, 1.496285, 9.657304, 0.006534, 1.649561, 0.150362, 0.537072, 0.031914, 8.009958, 201.800293, 24.039482], - 'Ti3+': [36.587933, 7.230255, -9.086077, 2.084594, 17.294008, -35.111282, 0.000681, 0.522262, 5.262317, 15.881716, 6.149805], - 'Ti4+': [45.355537, 7.0929, 7.483858, -43.498817, 1.678915, -0.110628, 9.252186, 0.523046, 13.082852, 10.193876, 0.023064], - 'V': [10.473575, 1.547881, 1.986381, 1.865616, 7.05625, 0.067744, 7.08194, 0.02604, 31.909672, 108.022842, 0.474882], - 'V2+': [7.754356, 2.0641, 2.576998, 2.011404, 7.126177, -0.533379, 7.066315, 0.014993, 7.066308, 22.055786, 0.467568], - 'V3+': [9.95848, 1.59635, 1.483442, -10.846044, 17.332867, 0.474921, 6.763041, 0.056895, 17.750029, 0.328826, 0.388013], - 'V5+': [15.575018, 8.448095, 1.61204, -9.721855, 1.534029, 0.552676, 0.682708, 5.56664, 10.527077, 0.907961, 0.066667], - 'Cr': [11.007069, 1.555477, 2.985293, 1.347855, 7.034779, 0.06551, 6.366281, 0.023987, 23.244839, 105.774498, 0.429369], - 'Cr2+': [10.598877, 1.565858, 2.72828, 0.098064, 6.959321, 0.04987, 6.151846, 0.023519, 17.432816, 54.002388, 0.426301], - 'Cr3+': [7.98931, 1.765079, 2.627125, 1.82938, 6.980908, -0.192123, 6.068867, 0.018342, 6.068887, 16.309284, 0.420864], - 'Mn': [11.709542, 1.733414, 2.673141, 2.023368, 7.00318, -0.147293, 5.59712, 0.0178, 21.78842, 89.517914, 0.383054], - 'Mn2+': [11.287712, 26.042414, 3.058096, 0.090258, 7.088306, -24.566132, 5.506225, 0.000774, 16.158575, 54.766354, 0.37558], - 'Mn3+': [6.926972, 2.081342, 11.128379, 2.375107, -0.419287, -0.093713, 0.378315, 0.015054, 5.379957, 14.429586, 0.004939], - 'Mn4+': [12.409131, 7.466993, 1.809947, -12.138477, 10.780248, 0.672146, 0.3004, 0.112814, 12.520756, 0.168653, 5.173237], - 'Fe': [12.311098, 1.876623, 3.066177, 2.070451, 6.975185, -0.304931, 5.009415, 0.014461, 18.74304, 82.767876, 0.346506], - 'Fe2+': [11.776765, 11.165097, 3.533495, 0.165345, 7.036932, -9.676919, 4.912232, 0.001748, 14.166556, 42.381958, 0.341324], - 'Fe3+': [9.721638, 63.403847, 2.141347, 2.629274, 7.033846, -61.930725, 4.869297, 0.000293, 4.867602, 13.539076, 0.33852], - 'Co': [12.91451, 2.481908, 3.466894, 2.106351, 6.960892, -0.936572, 4.507138, 0.009126, 16.438129, 76.98732, 0.314418], - 'Co2+': [6.99384, 26.285812, 12.254289, 0.246114, 4.017407, -24.796852, 0.310779, 0.000684, 4.400528, 35.741447, 12.536393], - 'Co3+': [6.861739, 2.67857, 12.281889, 3.501741, -0.179384, -1.147345, 0.309794, 0.008142, 4.331703, 11.914167, 11.914167], - 'Ni': [13.521865, 6.947285, 3.866028, 2.1359, 4.284731, -2.762697, 4.077277, 0.286763, 14.622634, 71.96608, 0.004437], - 'Ni2+': [12.519017, 37.832058, 4.387257, 0.661552, 6.949072, -36.344471, 3.933053, 0.000442, 10.449184, 23.860998, 0.283723], - 'Ni3+': [13.579366, 1.902844, 12.859268, 3.811005, -6.838595, -0.317618, 0.31314, 0.012621, 3.906407, 10.894311, 0.344379], - 'Cu': [14.014192, 4.784577, 5.056806, 1.457971, 6.932996, -3.254477, 3.73828, 0.003744, 13.034982, 72.554794, 0.265666], - 'Cu1+': [12.960763, 16.34215, 1.110102, 5.520682, 6.915452, -14.84932, 3.57601, 0.000975, 29.523218, 10.114283, 0.261326], - 'Cu2+': [11.895569, 16.344978, 5.799817, 1.048804, 6.789088, -14.878383, 3.378519, 0.000924, 8.133653, 20.526524, 0.254741], - 'Zn': [14.741002, 6.907748, 4.642337, 2.191766, 38.424042, -36.915829, 3.388232, 0.243315, 11.903689, 63.31213, 0.000397], - 'Zn2+': [13.340772, 10.428857, 5.544489, 0.762295, 6.869172, -8.945248, 3.215913, 0.001413, 8.54268, 21.891756, 0.239215], - 'Ga': [15.758946, 6.841123, 4.121016, 2.714681, 2.395246, -0.847395, 3.121754, 0.226057, 12.482196, 66.203621, 0.007238], - 'Ga3+': [13.123875, 35.288189, 6.126979, 0.611551, 6.724807, -33.875122, 2.80996, 0.000323, 6.831534, 16.784311, 0.212002], - 'Ge': [16.540613, 1.5679, 3.727829, 3.345098, 6.785079, 0.018726, 2.866618, 0.012198, 13.432163, 58.866047, 0.210974], - 'Ge4+': [6.876636, 6.779091, 9.969591, 3.135857, 0.152389, 1.086542, 2.025174, 0.17665, 3.573822, 7.685848, 16.677574], - 'As': [17.025642, 4.503441, 3.715904, 3.9372, 6.790175, -2.984117, 2.597739, 0.003012, 14.272119, 50.437996, 0.193015], - 'Se': [17.354071, 4.653248, 4.259489, 4.136455, 6.749163, -3.160982, 2.349787, 0.00255, 15.57946, 45.181202, 0.177432], - 'Br': [17.55057, 5.411882, 3.93718, 3.880645, 6.707793, -2.492088, 2.119226, 16.557184, 0.002481, 42.164009, 0.162121], - 'Br1-': [17.71431, 6.466926, 6.947385, 4.402674, -0.697279, 1.152674, 2.122554, 19.050768, 0.152708, 58.690361, 58.690372], - 'Kr': [17.655279, 6.848105, 4.171004, 3.44676, 6.6852, -2.810592, 1.908231, 16.606236, 0.001598, 39.917473, 0.146896], - 'Rb': [8.123134, 2.138042, 6.761702, 1.156051, 17.679546, 1.139548, 15.142385, 33.542667, 0.129372, 224.132507, 1.713368], - 'Rb1+': [17.68432, 7.761588, 6.680874, 2.668883, 0.070974, 1.133263, 1.710209, 14.919863, 0.128542, 31.654478, 0.128543], - 'Sr': [17.730219, 9.795867, 6.099763, 2.620025, 0.600053, 1.140251, 1.56306, 14.310868, 0.120574, 135.771317, 0.120574], - 'Sr2+': [17.694973, 1.275762, 6.154252, 9.234786, 0.515995, 1.125309, 1.550888, 30.133041, 0.118774, 13.821799, 0.118774], - 'Y': [17.79204, 10.253252, 5.714949, 3.170516, 0.918251, 1.131787, 1.429691, 13.132816, 0.112173, 108.197029, 0.112173], - 'Zr': [17.859772, 10.911038, 5.821115, 3.512513, 0.746965, 1.124859, 1.310692, 12.319285, 0.104353, 91.777542, 0.104353], - 'Zr4+': [6.802956, 17.699253, 10.650647, -0.248108, 0.250338, 0.827902, 0.096228, 1.296127, 11.240715, -0.219259, -0.219021], - 'Nb': [17.958399, 12.063054, 5.007015, 3.287667, 1.531019, 1.123452, 1.21159, 12.246687, 0.098615, 75.011948, 0.098615], - 'Nb3+': [17.714323, 1.675213, 7.483963, 8.322464, 11.143573, -8.339573, 1.172419, 30.102791, 0.080255, -0.002983, 10.456687], - 'Nb5+': [17.580206, 7.633277, 10.793497, 0.180884, 67.837921, -68.02478, 1.165852, 0.078558, 9.507652, 31.621656, -0.000438], - 'Mo': [6.236218, 17.987711, 12.973127, 3.451426, 0.210899, 1.10877, 0.09078, 1.10831, 11.46872, 66.684151, 0.09078], - 'Mo3+': [7.44705, 17.778122, 11.886068, 1.997905, 1.789626, -1.898764, 0.072, 1.073145, 9.83472, 28.221746, -0.011674], - 'Mo5+': [7.929879, 17.667669, 11.515987, 0.500402, 77.444084, -78.056595, 0.068856, 1.068064, 9.046229, 26.558945, -0.000473], - 'Mo6+': [34.757683, 9.653037, 6.584769, -18.628115, 2.490594, 1.141916, 1.30177, 7.123843, 0.094097, 1.617443, 12.335434], - 'Tc': [17.840963, 3.428236, 1.373012, 12.947364, 6.335469, 1.074784, 1.005729, 41.901382, 119.320541, 9.781542, 0.083391], - 'Ru': [6.271624, 17.906738, 14.123269, 3.746008, 0.908235, 1.043992, 0.07704, 0.928222, 9.555345, 35.86068, 123.552246], - 'Ru3+': [17.894758, 13.579529, 10.729251, 2.474095, 48.227997, -51.905243, 0.902827, 8.740579, 0.045125, 24.764954, -0.001699], - 'Ru4+': [17.845776, 13.455084, 10.229087, 1.653524, 14.059795, -17.241762, 0.90107, 8.482392, 0.045972, 23.015272, -0.004889], - 'Rh': [6.216648, 17.919739, 3.854252, 0.840326, 15.173498, 0.995452, 0.070789, 0.856121, 33.889484, 121.686691, 9.029517], - 'Rh3+': [17.758621, 14.569813, 5.29832, 2.533579, 0.879753, 0.960843, 0.841779, 8.319533, 0.06905, 23.709131, 0.06905], - 'Rh4+': [17.716188, 14.446654, 5.185801, 1.703448, 0.989992, 0.959941, 0.840572, 8.100647, 0.068995, 22.357307, 0.068995], - 'Pd': [6.121511, 4.784063, 16.631683, 4.318258, 13.246773, 0.883099, 0.062549, 0.784031, 8.751391, 34.489983, 0.784031], - 'Pd2+': [6.122282, 15.651012, 3.513508, 9.06079, 8.771199, 0.879336, 0.062424, 8.018296, 24.784275, 0.776457, 0.776457], - 'Pd4+': [6.152421, -96.069023, 31.622141, 81.578255, 17.801403, 0.915874, 0.063951, 11.090354, 13.466152, 9.758302, 0.783014], - 'Ag': [6.073874, 17.155437, 4.173344, 0.852238, 17.988686, 0.756603, 0.055333, 7.896512, 28.443739, 110.376106, 0.716809], - 'Ag1+': [6.091192, 4.019526, 16.948174, 4.258638, 13.889437, 0.785127, 0.056305, 0.71934, 7.758938, 27.368349, 0.71934], - 'Ag2+': [6.401808, 48.699802, 4.799859, -32.332523, 16.35671, 1.068247, 0.068167, 0.94227, 20.639496, 1.100365, 6.883131], - 'Cd': [6.080986, 18.019468, 4.018197, 1.30351, 17.974669, 0.603504, 0.04899, 7.273646, 29.119284, 95.831207, 0.661231], - 'Cd2+': [6.093711, 43.909691, 17.041306, -39.675117, 17.958918, 0.664795, 0.050624, 8.654143, 15.621396, 11.082067, 0.667591], - 'In': [6.196477, 18.816183, 4.050479, 1.638929, 17.962912, 0.333097, 0.042072, 6.695665, 31.00979, 103.284348, 0.610714], - 'In3+': [6.206277, 18.497746, 3.078131, 10.524613, 7.401234, 0.293677, 0.041357, 6.605563, 18.79225, 0.608082, 0.608082], - 'Sn': [19.325171, 6.281571, 4.498866, 1.856934, 17.917318, 0.119024, 6.118104, 0.036915, 32.529045, 95.037186, 0.565651], - 'Sn2+': [6.353672, 4.770377, 14.672025, 4.235959, 18.002131, -0.042519, 0.03472, 6.167891, 6.167879, 29.006456, 0.561774], - 'Sn4+': [15.445732, 6.420892, 4.56298, 1.713385, 18.033537, -0.172219, 6.280898, 0.033144, 6.280899, 17.983601, 0.55798], - 'Sb': [5.394956, 6.54957, 19.650681, 1.82782, 17.867832, -0.290506, 33.326523, 0.030974, 5.564929, 87.130966, 0.523992], - 'Sb3+': [10.189171, 57.461918, 19.356573, 4.862206, -45.394096, 1.516108, 0.089485, 0.375256, 5.357987, 22.153736, 0.297768], - 'Sb5+': [17.920622, 6.647932, 12.724075, 1.555545, 7.600591, -0.445371, 0.522315, 0.029487, 5.71821, 16.433775, 5.718204], - 'Te': [6.660302, 6.940756, 19.847015, 1.557175, 17.802427, -0.806668, 33.031654, 0.02575, 5.065547, 84.101616, 0.48766], - 'I': [19.884502, 6.736593, 8.110516, 1.170953, 17.548716, -0.448811, 4.628591, 0.027754, 31.849096, 84.406387, 0.46355], - 'I1-': [20.01033, 17.835524, 8.10413, 2.231118, 9.158548, -3.341004, 4.565931, 0.444266, 32.430672, 95.14904, 0.014906], - 'Xe': [19.97892, 11.774945, 9.332182, 1.244749, 17.737501, -6.065902, 4.143356, 0.010142, 28.7962, 75.280685, 0.413616], - 'Cs': [17.418674, 8.314444, 10.323193, 1.383834, 19.876251, -2.322802, 0.399828, 0.016872, 25.605827, 233.339676, 3.826915], - 'Cs1+': [19.939056, 24.967621, 10.375884, 0.454243, 17.660248, -19.394306, 3.770511, 0.00404, 25.311275, 76.537766, 0.38473], - 'Ba': [19.747343, 17.368477, 10.465718, 2.592602, 11.003653, -5.183497, 3.481823, 0.371224, 21.226641, 173.834274, 0.010719], - 'Ba2+': [19.7502, 17.513683, 10.884892, 0.321585, 65.149834, -59.618172, 3.430748, 0.36159, 21.358307, 70.309402, 0.001418], - 'La': [19.966019, 27.329655, 11.018425, 3.086696, 17.335455, -21.745489, 3.197408, 0.003446, 19.955492, 141.381973, 0.341817], - 'La3+': [19.688887, 17.345703, 11.356296, 0.099418, 82.358124, -76.846909, 3.146211, 0.339586, 18.753832, 90.345459, 0.001072], - 'Ce': [17.355122, 43.988499, 20.54665, 3.13067, 11.353665, -38.386017, 0.328369, 0.002047, 3.088196, 134.907654, 18.83296], - 'Ce3+': [26.593231, 85.866432, -6.677695, 12.111847, 17.401903, -80.313423, 3.280381, 0.001012, 4.313575, 17.868504, 0.326962], - 'Ce4+': [17.457533, 25.659941, 11.691037, 19.695251, -16.994749, -3.515096, 0.311812, -0.003793, 16.568687, 2.886395, -0.008931], - 'Pr': [21.551311, 17.16173, 11.903859, 2.679103, 9.564197, -3.871068, 2.995675, 0.312491, 17.716705, 152.192825, 0.010468], - 'Pr3+': [20.879841, 36.035797, 12.135341, 0.283103, 17.167803, -30.500784, 2.870897, 0.002364, 16.615236, 53.909359, 0.306993], - 'Pr4+': [17.496082, 21.538509, 20.403114, 12.062211, -7.492043, -9.016722, 0.294457, -0.002742, 2.772886, 15.804613, -0.013556], - 'Nd': [17.331244, 62.783924, 12.160097, 2.663483, 22.23995, -57.189842, 0.300269, 0.00132, 17.026001, 148.748993, 2.910268], - 'Nd3+': [17.120077, 56.038139, 21.468307, 10.000671, 2.905866, -50.541992, 0.291295, 0.001421, 2.743681, 14.581367, 22.485098], - 'Pm': [17.286388, 51.560162, 12.478557, 2.675515, 22.960947, -45.973682, 0.28662, 0.00155, 16.223755, 143.984512, 2.79648], - 'Pm3+': [22.221066, 17.068142, 12.805423, 0.435687, 52.23877, -46.767181, 2.635767, 0.277039, 14.927315, 45.768017, 0.001455], - 'Sm': [23.700363, 23.072214, 12.777782, 2.684217, 17.204367, -17.452166, 2.689539, 0.003491, 15.495437, 139.862473, 0.274536], - 'Sm3+': [15.618565, 19.538092, 13.398946, -4.358811, 24.490461, -9.714854, 0.006001, 0.306379, 14.979594, 0.748825, 2.454492], - 'Eu': [17.186195, 37.156837, 13.103387, 2.707246, 24.419271, -31.586687, 0.261678, 0.001995, 14.78736, 134.816299, 2.581883], - 'Eu2+': [23.899035, 31.657497, 12.955752, 1.700576, 16.992199, -26.204315, 2.467332, 0.00223, 13.625002, 35.089481, 0.253136], - 'Eu3+': [17.758327, 33.498665, 24.067188, 13.436883, -9.019134, -19.768026, 0.244474, -0.003901, 2.487526, 14.568011, -0.015628], - 'Gd': [24.898117, 17.104952, 13.222581, 3.266152, 48.995213, -43.505684, 2.435028, 0.246961, 13.996325, 110.863091, 0.001383], - 'Gd3+': [24.344999, 16.945311, 13.866931, 0.481674, 93.506378, -88.147179, 2.333971, 0.239215, 12.982995, 43.876347, 0.000673], - 'Tb': [25.910013, 32.344139, 13.765117, 2.751404, 17.064405, -26.851971, 2.373912, 0.002034, 13.481969, 125.83651, 0.236916], - 'Tb3+': [24.878252, 16.856016, 13.663937, 1.279671, 39.271294, -33.950317, 2.223301, 0.22729, 11.812528, 29.910065, 0.001527], - 'Dy': [26.671785, 88.687576, 14.065445, 2.768497, 17.067781, -83.279831, 2.282593, 0.000665, 12.92023, 121.937187, 0.225531], - 'Dy3+': [16.864344, 90.383461, 13.675473, 1.687078, 25.540651, -85.15065, 0.216275, 0.000593, 11.121207, 26.250975, 2.13593], - 'Ho': [27.15019, 16.999819, 14.059334, 3.386979, 46.546471, -41.165253, 2.16966, 0.215414, 12.213148, 100.506783, 0.001211], - 'Ho3+': [16.837524, 63.221336, 13.703766, 2.061602, 26.202621, -58.026505, 0.206873, 0.000796, 10.500283, 24.031883, 2.05506], - 'Er': [28.174887, 82.493271, 14.624002, 2.802756, 17.018515, -77.135223, 2.120995, 0.00064, 11.915256, 114.529938, 0.207519], - 'Er3+': [16.810127, 22.681061, 13.864114, 2.294506, 26.864477, -17.51346, 0.198293, 0.002126, 9.973341, 22.836388, 1.979442], - 'Tm': [28.925894, 76.173798, 14.904704, 2.814812, 16.998117, -70.839813, 2.046203, 0.000656, 11.465375, 111.41198, 0.199376], - 'Tm3+': [16.7875, 15.350905, 14.182357, 2.299111, 27.573771, -10.192087, 0.190852, 0.003036, 9.602934, 22.52688, 1.912862], - 'Yb': [29.67676, 65.624069, 15.160854, 2.830288, 16.99785, -60.313812, 1.97763, 0.00072, 11.044622, 108.139153, 0.19211], - 'Yb2+': [28.443794, 16.849527, 14.165081, 3.445311, 28.308853, -23.214935, 1.863896, 0.183811, 9.225469, 23.691355, 0.001463], - 'Yb3+': [28.191629, 16.828087, 14.167848, 2.744962, 23.171774, -18.103676, 1.842889, 0.182788, 9.045957, 20.799847, 0.001759], - 'Lu': [30.122866, 15.099346, 56.314899, 3.54098, 16.943729, -51.049416, 1.88309, 10.342764, 0.00078, 89.55925, 0.183849], - 'Lu3+': [28.828693, 16.823227, 14.247617, 3.079559, 25.647667, -20.626528, 1.776641, 0.17556, 8.575531, 19.693701, 0.001453], - 'Hf': [30.617033, 15.145351, 54.933548, 4.096253, 16.896156, -49.719837, 1.795613, 9.934469, 0.000739, 76.189705, 0.175914], - 'Hf4+': [29.267378, 16.792543, 14.78531, 2.184128, 23.791996, -18.820383, 1.697911, 0.168313, 8.190025, 18.277578, 0.001431], - 'Ta': [31.066359, 15.341823, 49.278297, 4.577665, 16.828321, -44.119026, 1.708732, 9.618455, 0.00076, 66.346199, 0.168002], - 'Ta5+': [29.539469, 16.741854, 15.18207, 1.642916, 16.437447, -11.542459, 1.612934, 0.16046, 7.654408, 17.070732, 0.001858], - 'W': [31.5079, 15.682498, 37.960129, 4.885509, 16.792112, -32.864574, 1.629485, 9.446448, 0.000898, 59.980675, 0.160798], - 'W6+': [29.729357, 17.247808, 15.184488, 1.154652, 0.739335, 3.945157, 1.501648, 0.140803, 6.880573, 14.299601, 14.299618], - 'Re': [31.888456, 16.117104, 42.390297, 5.211669, 16.767591, -37.412682, 1.549238, 9.233474, 0.000689, 54.516373, 0.152815], - 'Os': [32.210297, 16.67844, 48.559906, 5.455839, 16.735533, -43.677956, 1.473531, 9.049695, 0.000519, 50.210201, 0.145771], - 'Os4+': [17.113485, 15.79237, 23.342392, 4.090271, 7.671292, 3.98839, 0.13185, 7.288542, 1.389307, 19.629425, 1.389307], - 'Ir': [32.004436, 1.975454, 17.070105, 15.939454, 5.990003, 4.018893, 1.353767, 81.014175, 0.128093, 7.661196, 26.659403], - 'Ir3+': [31.537575, 16.363338, 15.597141, 5.051404, 1.436935, 4.009459, 1.334144, 7.451918, 0.127514, 21.705648, 0.127515], - 'Ir4+': [30.391249, 16.146996, 17.019068, 4.458904, 0.975372, 4.006865, 1.328519, 7.181766, 0.127337, 19.060146, 1.328519], - 'Pt': [31.273891, 18.44544, 17.063745, 5.555933, 1.57527, 4.050394, 1.316992, 8.797154, 0.124741, 40.177994, 1.316997], - 'Pt2+': [31.986849, 17.249048, 15.269374, 5.760234, 1.694079, 4.032512, 1.281143, 7.625512, 0.123571, 24.190826, 0.123571], - 'Pt4+': [41.932713, 16.339224, 17.653894, 6.01242, -12.036877, 4.094551, 1.111409, 6.466086, 0.128917, 16.954155, 0.778721], - 'Au': [16.77739, 19.317156, 32.979683, 5.595453, 10.576854, -6.279078, 0.122737, 8.62157, 1.256902, 38.00882, 0.000601], - 'Au1+': [32.124306, 16.716476, 16.8141, 7.311565, 0.993064, 4.040792, 1.216073, 7.165378, 0.118715, 20.442486, 53.095985], - 'Au3+': [31.704271, 17.545767, 16.819551, 5.52264, 0.361725, 4.042679, 1.215561, 7.220506, 0.118812, 20.05097, 1.215562], - 'Hg': [16.83989, 20.023823, 28.428564, 5.881564, 4.714706, 4.076478, 0.115905, 8.256927, 1.19525, 39.247227, 1.19525], - 'Hg1+': [28.866837, 19.27754, 16.776051, 6.281459, 3.710289, 4.06843, 1.173967, 7.583842, 0.115351, 29.055994, 1.173968], - 'Hg2+': [32.411079, 18.690371, 16.711773, 9.974835, -3.847611, 4.052869, 1.16298, 7.329806, 0.114518, 22.009489, 22.009493], - 'Tl': [16.630795, 19.386616, 32.808571, 1.747191, 6.356862, 4.066939, 0.110704, 7.181401, 1.11973, 90.660263, 26.014978], - 'Tl1+': [32.295044, 16.570049, 17.991013, 1.535355, 7.554591, 4.05403, 1.101544, 0.11002, 6.528559, 52.495068, 20.338634], - 'Tl3+': [32.525639, 19.139185, 17.100321, 5.891115, 12.599463, -9.256075, 1.094966, 6.900992, 0.103667, 18.489614, -0.001401], - 'Pb': [16.419567, 32.73859, 6.530247, 2.342742, 19.916475, 4.049824, 0.105499, 1.055049, 25.02589, 80.906593, 6.664449], - 'Pb2+': [27.392647, 16.496822, 19.984501, 6.813923, 5.23391, 4.065623, 1.058874, 0.106305, 6.708123, 24.395554, 1.058874], - 'Pb4+': [32.505657, 20.01424, 14.645661, 5.029499, 1.760138, 4.044678, 1.047035, 6.670321, 0.105279, 16.52504, 0.105279], - 'Bi': [16.282274, 32.725136, 6.678302, 2.69475, 20.576559, 4.040914, 0.10118, 1.002287, 25.714146, 77.057549, 6.291882], - 'Bi3+': [32.461437, 19.438683, 16.302486, 7.322662, 0.431704, 4.043703, 0.99793, 6.038867, 0.101338, 18.371586, 46.361046], - 'Bi5+': [16.734028, 20.580494, 9.452623, 61.155834, -34.041023, 4.113663, 0.105076, 4.773282, 11.762162, 1.211775, 1.619408], - 'Po': [16.289164, 32.807171, 21.095163, 2.505901, 7.254589, 4.046556, 0.098121, 0.966265, 6.046622, 76.598068, 28.096128], - 'At': [16.011461, 32.615547, 8.113899, 2.884082, 21.377867, 3.995684, 0.092639, 0.904416, 26.543257, 68.372963, 5.499512], - 'Rn': [16.070229, 32.641106, 21.489658, 2.299218, 9.480184, 4.020977, 0.090437, 0.876409, 5.239687, 69.188477, 27.632641], - 'Fr': [16.007385, 32.66383, 21.594351, 1.598497, 11.121192, 4.003472, 0.087031, 0.840187, 4.954467, 199.805801, 26.905106], - 'Ra': [32.56369, 21.396671, 11.298093, 2.834688, 15.914965, 3.981773, 0.80198, 4.590666, 22.758972, 160.404388, 0.083544], - 'Ra2+': [4.986228, 32.474945, 21.947443, 11.800013, 10.807292, 3.956572, 0.082597, 0.791468, 4.608034, 24.792431, 0.082597], - 'Ac': [15.914053, 32.535042, 21.553976, 11.433394, 3.612409, 3.939212, 0.080511, 0.770669, 4.352206, 21.381622, 130.500748], - 'Ac3+': [15.584983, 32.022125, 21.456327, 0.757593, 12.341252, 3.838984, 0.077438, 0.739963, 4.040735, 47.525002, 19.406845], - 'Th': [15.784024, 32.454899, 21.849222, 4.239077, 11.736191, 3.922533, 0.077067, 0.735137, 4.097976, 109.464111, 20.512138], - 'Th4+': [15.515445, 32.090691, 13.996399, 12.918157, 7.635514, 3.831122, 0.074499, 0.711663, 3.871044, 18.596891, 3.871044], - 'Pa': [32.740208, 21.973675, 12.957398, 3.683832, 15.744058, 3.886066, 0.709545, 4.050881, 19.231543, 117.255005, 0.07404], - 'U': [15.679275, 32.824306, 13.660459, 3.687261, 22.279434, 3.854444, 0.071206, 0.681177, 18.236156, 112.500038, 3.930325], - 'U3+': [15.360309, 32.395657, 21.96129, 1.325894, 14.251453, 3.706622, 0.067815, 0.654643, 3.643409, 39.604965, 16.33057], - 'U4+': [15.355091, 32.235306, 0.557745, 14.396367, 21.751173, 3.705863, 0.067789, 0.652613, 42.354237, 15.908239, 3.553231], - 'U6+': [15.333844, 31.770849, 21.274414, 13.872636, 0.048519, 3.700591, 0.067644, 0.646384, 3.317894, 14.65025, 75.339699], - 'Np': [32.999901, 22.638077, 14.219973, 3.67295, 15.683245, 3.769391, 0.657086, 3.854918, 17.435474, 109.464485, 0.068033], - 'Np3+': [15.378152, 32.572132, 22.206125, 1.413295, 14.828381, 3.60337, 0.064613, 0.63142, 3.561936, 37.875511, 15.546129], - 'Np4+': [15.373926, 32.423019, 21.969994, 0.662078, 14.96935, 3.603039, 0.064597, 0.629658, 3.476389, 39.438942, 15.135764], - 'Np6+': [15.359986, 31.992825, 21.412458, 0.066574, 14.568174, 3.600942, 0.064528, 0.624505, 3.253441, 67.658318, 13.980832], - 'Pu': [33.281178, 23.148544, 15.153755, 3.031492, 15.704215, 3.6642, 0.634999, 3.856168, 16.849735, 121.292038, 0.064857], - 'Pu3+': [15.356004, 32.769127, 22.68021, 1.351055, 15.416232, 3.428895, 0.06059, 0.604663, 3.491509, 37.260635, 14.981921], - 'Pu4+': [15.416219, 32.610569, 22.256662, 0.719495, 15.518152, 3.480408, 0.061456, 0.607938, 3.411848, 37.628792, 14.46436], - 'Pu6+': [15.436506, 32.289719, 14.726737, 15.012391, 7.024677, 3.502325, 0.061815, 0.606541, 3.245363, 13.616438, 3.245364], - 'Am': [33.435162, 23.657259, 15.576339, 3.027023, 15.7461, 3.54116, 0.612785, 3.792942, 16.195778, 117.757004, 0.061755], - 'Cm': [15.804837, 33.480801, 24.150198, 3.655563, 15.499866, 3.39084, 0.058619, 0.59016, 3.67472, 100.736191, 15.408296], - 'Bk': [15.889072, 33.625286, 24.710381, 3.707139, 15.839268, 3.213169, 0.055503, 0.569571, 3.615472, 97.694786, 14.754303], - 'Cf': [33.794075, 25.467693, 16.048487, 3.657525, 16.008982, 3.005326, 0.550447, 3.581973, 14.357388, 96.064972, 0.05245] - } + 'H': [ + 0.413048, + 0.294953, + 0.187491, + 0.080701, + 0.023736, + 4.9e-05, + 15.569946, + 32.398468, + 5.711404, + 61.889874, + 1.334118, + ], + 'H1-': [ + 0.70226, + 0.763666, + 0.248678, + 0.261323, + 0.023017, + 0.000425, + 23.945604, + 74.897919, + 6.773289, + 233.58345, + 1.337531, + ], + 'He': [ + 0.732354, + 0.753896, + 0.283819, + 0.190003, + 0.039139, + 0.000487, + 11.553918, + 4.595831, + 1.546299, + 26.463964, + 0.377523, + ], + 'Li': [ + 0.974637, + 0.158472, + 0.811855, + 0.262416, + 0.790108, + 0.002542, + 4.334946, + 0.342451, + 97.102966, + 201.363831, + 1.409234, + ], + 'Li1+': [ + 0.432724, + 0.549257, + 0.376575, + -0.336481, + 0.97606, + 0.001764, + 0.260367, + 1.042836, + 7.885294, + 0.260368, + 3.042539, + ], + 'Be': [ + 1.533712, + 0.638283, + 0.601052, + 0.106139, + 1.118414, + 0.002511, + 42.662079, + 0.59542, + 99.106499, + 0.15134, + 1.843093, + ], + 'Be2+': [ + 3.05543, + -2.372617, + 1.044914, + 0.544233, + 0.381737, + -0.653773, + 0.001226, + 0.001227, + 1.542106, + 0.456279, + 4.047479, + ], + 'B': [ + 2.085185, + 1.06458, + 1.062788, + 0.140515, + 0.641784, + 0.003823, + 23.494068, + 1.137894, + 61.238976, + 0.114886, + 0.399036, + ], + 'C': [ + 2.657506, + 1.078079, + 1.490909, + -4.24107, + 0.713791, + 4.297983, + 14.780758, + 0.776775, + 42.086842, + -0.000294, + 0.239535, + ], + 'Cval': [ + 1.258489, + 0.728215, + 1.119856, + 2.168133, + 0.705239, + 0.019722, + 10.683769, + 0.208177, + 0.836097, + 24.603704, + 58.954273, + ], + 'N': [ + 11.89378, + 3.277479, + 1.858092, + 0.858927, + 0.912985, + -11.804902, + 0.000158, + 10.232723, + 30.34469, + 0.656065, + 0.217287, + ], + 'O': [ + 2.960427, + 2.508818, + 0.637853, + 0.722838, + 1.142756, + 0.027014, + 14.182259, + 5.936858, + 0.112726, + 34.958481, + 0.39024, + ], + 'O1-': [ + 3.106934, + 3.235142, + 1.148886, + 0.783981, + 0.676953, + 0.046136, + 19.86808, + 6.960252, + 0.170043, + 65.693512, + 0.630757, + ], + 'O2-': [ + 3.990247, + 2.300563, + 0.6072, + 1.907882, + 1.16708, + 0.025429, + 16.639956, + 5.636819, + 0.108493, + 47.299709, + 0.379984, + ], + 'F': [ + 3.511943, + 2.772244, + 0.678385, + 0.915159, + 1.089261, + 0.032557, + 10.687859, + 4.380466, + 0.093982, + 27.255203, + 0.313066, + ], + 'F1-': [ + 0.457649, + 3.841561, + 1.432771, + 0.801876, + 3.395041, + 0.069525, + 0.917243, + 5.507803, + 0.164955, + 51.076206, + 15.821679, + ], + 'Ne': [ + 4.183749, + 2.905726, + 0.520513, + 1.135641, + 1.228065, + 0.025576, + 8.175457, + 3.252536, + 0.063295, + 21.81391, + 0.224952, + ], + 'Na': [ + 4.910127, + 3.081783, + 1.262067, + 1.098938, + 0.560991, + 0.079712, + 3.281434, + 9.119178, + 0.102763, + 132.013947, + 0.405878, + ], + 'Na1+': [ + 3.14869, + 4.073989, + 0.767888, + 0.995612, + 0.968249, + 0.0453, + 2.594987, + 6.046925, + 0.070139, + 14.122657, + 0.217037, + ], + 'Mg': [ + 4.708971, + 1.194814, + 1.558157, + 1.170413, + 3.239403, + 0.126842, + 4.875207, + 108.506081, + 0.111516, + 48.292408, + 1.928171, + ], + 'Mg2+': [ + 3.062918, + 4.135106, + 0.853742, + 1.036792, + 0.85252, + 0.058851, + 2.015803, + 4.417941, + 0.065307, + 9.66971, + 0.187818, + ], + 'Al': [ + 4.730796, + 2.313951, + 1.54198, + 1.117564, + 3.154754, + 0.139509, + 3.628931, + 43.051167, + 0.09596, + 108.932388, + 1.555918, + ], + 'Al3+': [ + 4.132015, + 0.912049, + 1.102425, + 0.614876, + 3.219136, + 0.019397, + 3.528641, + 7.378344, + 0.133708, + 0.039065, + 1.644728, + ], + 'Si': [ + 5.275329, + 3.191038, + 1.511514, + 1.356849, + 2.519114, + 0.145073, + 2.631338, + 33.730728, + 0.081119, + 86.288643, + 1.170087, + ], + 'Siva': [ + 2.879033, + 3.07296, + 1.515981, + 1.39003, + 4.995051, + 0.14603, + 1.239713, + 38.706276, + 0.081481, + 93.616333, + 2.770293, + ], + 'Si4+': [ + 3.676722, + 3.828496, + 1.258033, + 0.419024, + 0.720421, + 0.097266, + 1.446851, + 3.013144, + 0.064397, + 0.206254, + 5.970222, + ], + 'P': [ + 1.950541, + 4.14693, + 1.49456, + 1.522042, + 5.729711, + 0.155233, + 0.908139, + 27.044952, + 0.07128, + 67.520187, + 1.981173, + ], + 'S': [ + 6.372157, + 5.154568, + 1.473732, + 1.635073, + 1.209372, + 0.154722, + 1.514347, + 22.092527, + 0.061373, + 55.445175, + 0.646925, + ], + 'Cl': [ + 1.446071, + 6.870609, + 6.151801, + 1.750347, + 0.634168, + 0.146773, + 0.052357, + 1.193165, + 18.343416, + 46.398396, + 0.401005, + ], + 'Cl1-': [ + 1.061802, + 7.139886, + 6.524271, + 2.355626, + 35.829403, + -34.916603, + 0.144727, + 1.171795, + 19.467655, + 60.320301, + 0.000436, + ], + 'Ar': [ + 7.188004, + 6.638454, + 0.45418, + 1.929593, + 1.523654, + 0.265954, + 0.956221, + 15.339877, + 15.339862, + 39.043823, + 0.062409, + ], + 'K': [ + 8.163991, + 7.146945, + 1.07014, + 0.877316, + 1.486434, + 0.253614, + 12.816323, + 0.808945, + 210.327011, + 39.597652, + 0.052821, + ], + 'K1+': [ + -17.609339, + 1.494873, + 7.150305, + 10.899569, + 15.808228, + 0.257164, + 18.840979, + 0.053453, + 0.81294, + 22.264105, + 14.351593, + ], + 'Ca': [ + 8.593655, + 1.477324, + 1.436254, + 1.182839, + 7.113258, + 0.196255, + 10.460644, + 0.041891, + 81.390381, + 169.847839, + 0.688098, + ], + 'Ca2+': [ + 8.501441, + 12.880483, + 9.765095, + 7.156669, + 0.71116, + -21.013187, + 10.525848, + -0.004033, + 0.010692, + 0.684443, + 27.231771, + ], + 'Sc': [ + 1.476566, + 1.487278, + 1.600187, + 9.177463, + 7.09975, + 0.157765, + 53.131023, + 0.035325, + 137.319489, + 9.098031, + 0.602102, + ], + 'Sc3+': [ + 7.104348, + 1.511488, + -53.669773, + 38.404816, + 24.53224, + 0.118642, + 0.601957, + 0.033386, + 12.572138, + 10.859736, + 14.12523, + ], + 'Ti': [ + 9.818524, + 1.522646, + 1.703101, + 1.768774, + 7.082555, + 0.102473, + 8.001879, + 0.029763, + 39.885422, + 120.157997, + 0.532405, + ], + 'Ti2+': [ + 7.040119, + 1.496285, + 9.657304, + 0.006534, + 1.649561, + 0.150362, + 0.537072, + 0.031914, + 8.009958, + 201.800293, + 24.039482, + ], + 'Ti3+': [ + 36.587933, + 7.230255, + -9.086077, + 2.084594, + 17.294008, + -35.111282, + 0.000681, + 0.522262, + 5.262317, + 15.881716, + 6.149805, + ], + 'Ti4+': [ + 45.355537, + 7.0929, + 7.483858, + -43.498817, + 1.678915, + -0.110628, + 9.252186, + 0.523046, + 13.082852, + 10.193876, + 0.023064, + ], + 'V': [ + 10.473575, + 1.547881, + 1.986381, + 1.865616, + 7.05625, + 0.067744, + 7.08194, + 0.02604, + 31.909672, + 108.022842, + 0.474882, + ], + 'V2+': [ + 7.754356, + 2.0641, + 2.576998, + 2.011404, + 7.126177, + -0.533379, + 7.066315, + 0.014993, + 7.066308, + 22.055786, + 0.467568, + ], + 'V3+': [ + 9.95848, + 1.59635, + 1.483442, + -10.846044, + 17.332867, + 0.474921, + 6.763041, + 0.056895, + 17.750029, + 0.328826, + 0.388013, + ], + 'V5+': [ + 15.575018, + 8.448095, + 1.61204, + -9.721855, + 1.534029, + 0.552676, + 0.682708, + 5.56664, + 10.527077, + 0.907961, + 0.066667, + ], + 'Cr': [ + 11.007069, + 1.555477, + 2.985293, + 1.347855, + 7.034779, + 0.06551, + 6.366281, + 0.023987, + 23.244839, + 105.774498, + 0.429369, + ], + 'Cr2+': [ + 10.598877, + 1.565858, + 2.72828, + 0.098064, + 6.959321, + 0.04987, + 6.151846, + 0.023519, + 17.432816, + 54.002388, + 0.426301, + ], + 'Cr3+': [ + 7.98931, + 1.765079, + 2.627125, + 1.82938, + 6.980908, + -0.192123, + 6.068867, + 0.018342, + 6.068887, + 16.309284, + 0.420864, + ], + 'Mn': [ + 11.709542, + 1.733414, + 2.673141, + 2.023368, + 7.00318, + -0.147293, + 5.59712, + 0.0178, + 21.78842, + 89.517914, + 0.383054, + ], + 'Mn2+': [ + 11.287712, + 26.042414, + 3.058096, + 0.090258, + 7.088306, + -24.566132, + 5.506225, + 0.000774, + 16.158575, + 54.766354, + 0.37558, + ], + 'Mn3+': [ + 6.926972, + 2.081342, + 11.128379, + 2.375107, + -0.419287, + -0.093713, + 0.378315, + 0.015054, + 5.379957, + 14.429586, + 0.004939, + ], + 'Mn4+': [ + 12.409131, + 7.466993, + 1.809947, + -12.138477, + 10.780248, + 0.672146, + 0.3004, + 0.112814, + 12.520756, + 0.168653, + 5.173237, + ], + 'Fe': [ + 12.311098, + 1.876623, + 3.066177, + 2.070451, + 6.975185, + -0.304931, + 5.009415, + 0.014461, + 18.74304, + 82.767876, + 0.346506, + ], + 'Fe2+': [ + 11.776765, + 11.165097, + 3.533495, + 0.165345, + 7.036932, + -9.676919, + 4.912232, + 0.001748, + 14.166556, + 42.381958, + 0.341324, + ], + 'Fe3+': [ + 9.721638, + 63.403847, + 2.141347, + 2.629274, + 7.033846, + -61.930725, + 4.869297, + 0.000293, + 4.867602, + 13.539076, + 0.33852, + ], + 'Co': [ + 12.91451, + 2.481908, + 3.466894, + 2.106351, + 6.960892, + -0.936572, + 4.507138, + 0.009126, + 16.438129, + 76.98732, + 0.314418, + ], + 'Co2+': [ + 6.99384, + 26.285812, + 12.254289, + 0.246114, + 4.017407, + -24.796852, + 0.310779, + 0.000684, + 4.400528, + 35.741447, + 12.536393, + ], + 'Co3+': [ + 6.861739, + 2.67857, + 12.281889, + 3.501741, + -0.179384, + -1.147345, + 0.309794, + 0.008142, + 4.331703, + 11.914167, + 11.914167, + ], + 'Ni': [ + 13.521865, + 6.947285, + 3.866028, + 2.1359, + 4.284731, + -2.762697, + 4.077277, + 0.286763, + 14.622634, + 71.96608, + 0.004437, + ], + 'Ni2+': [ + 12.519017, + 37.832058, + 4.387257, + 0.661552, + 6.949072, + -36.344471, + 3.933053, + 0.000442, + 10.449184, + 23.860998, + 0.283723, + ], + 'Ni3+': [ + 13.579366, + 1.902844, + 12.859268, + 3.811005, + -6.838595, + -0.317618, + 0.31314, + 0.012621, + 3.906407, + 10.894311, + 0.344379, + ], + 'Cu': [ + 14.014192, + 4.784577, + 5.056806, + 1.457971, + 6.932996, + -3.254477, + 3.73828, + 0.003744, + 13.034982, + 72.554794, + 0.265666, + ], + 'Cu1+': [ + 12.960763, + 16.34215, + 1.110102, + 5.520682, + 6.915452, + -14.84932, + 3.57601, + 0.000975, + 29.523218, + 10.114283, + 0.261326, + ], + 'Cu2+': [ + 11.895569, + 16.344978, + 5.799817, + 1.048804, + 6.789088, + -14.878383, + 3.378519, + 0.000924, + 8.133653, + 20.526524, + 0.254741, + ], + 'Zn': [ + 14.741002, + 6.907748, + 4.642337, + 2.191766, + 38.424042, + -36.915829, + 3.388232, + 0.243315, + 11.903689, + 63.31213, + 0.000397, + ], + 'Zn2+': [ + 13.340772, + 10.428857, + 5.544489, + 0.762295, + 6.869172, + -8.945248, + 3.215913, + 0.001413, + 8.54268, + 21.891756, + 0.239215, + ], + 'Ga': [ + 15.758946, + 6.841123, + 4.121016, + 2.714681, + 2.395246, + -0.847395, + 3.121754, + 0.226057, + 12.482196, + 66.203621, + 0.007238, + ], + 'Ga3+': [ + 13.123875, + 35.288189, + 6.126979, + 0.611551, + 6.724807, + -33.875122, + 2.80996, + 0.000323, + 6.831534, + 16.784311, + 0.212002, + ], + 'Ge': [ + 16.540613, + 1.5679, + 3.727829, + 3.345098, + 6.785079, + 0.018726, + 2.866618, + 0.012198, + 13.432163, + 58.866047, + 0.210974, + ], + 'Ge4+': [ + 6.876636, + 6.779091, + 9.969591, + 3.135857, + 0.152389, + 1.086542, + 2.025174, + 0.17665, + 3.573822, + 7.685848, + 16.677574, + ], + 'As': [ + 17.025642, + 4.503441, + 3.715904, + 3.9372, + 6.790175, + -2.984117, + 2.597739, + 0.003012, + 14.272119, + 50.437996, + 0.193015, + ], + 'Se': [ + 17.354071, + 4.653248, + 4.259489, + 4.136455, + 6.749163, + -3.160982, + 2.349787, + 0.00255, + 15.57946, + 45.181202, + 0.177432, + ], + 'Br': [ + 17.55057, + 5.411882, + 3.93718, + 3.880645, + 6.707793, + -2.492088, + 2.119226, + 16.557184, + 0.002481, + 42.164009, + 0.162121, + ], + 'Br1-': [ + 17.71431, + 6.466926, + 6.947385, + 4.402674, + -0.697279, + 1.152674, + 2.122554, + 19.050768, + 0.152708, + 58.690361, + 58.690372, + ], + 'Kr': [ + 17.655279, + 6.848105, + 4.171004, + 3.44676, + 6.6852, + -2.810592, + 1.908231, + 16.606236, + 0.001598, + 39.917473, + 0.146896, + ], + 'Rb': [ + 8.123134, + 2.138042, + 6.761702, + 1.156051, + 17.679546, + 1.139548, + 15.142385, + 33.542667, + 0.129372, + 224.132507, + 1.713368, + ], + 'Rb1+': [ + 17.68432, + 7.761588, + 6.680874, + 2.668883, + 0.070974, + 1.133263, + 1.710209, + 14.919863, + 0.128542, + 31.654478, + 0.128543, + ], + 'Sr': [ + 17.730219, + 9.795867, + 6.099763, + 2.620025, + 0.600053, + 1.140251, + 1.56306, + 14.310868, + 0.120574, + 135.771317, + 0.120574, + ], + 'Sr2+': [ + 17.694973, + 1.275762, + 6.154252, + 9.234786, + 0.515995, + 1.125309, + 1.550888, + 30.133041, + 0.118774, + 13.821799, + 0.118774, + ], + 'Y': [ + 17.79204, + 10.253252, + 5.714949, + 3.170516, + 0.918251, + 1.131787, + 1.429691, + 13.132816, + 0.112173, + 108.197029, + 0.112173, + ], + 'Zr': [ + 17.859772, + 10.911038, + 5.821115, + 3.512513, + 0.746965, + 1.124859, + 1.310692, + 12.319285, + 0.104353, + 91.777542, + 0.104353, + ], + 'Zr4+': [ + 6.802956, + 17.699253, + 10.650647, + -0.248108, + 0.250338, + 0.827902, + 0.096228, + 1.296127, + 11.240715, + -0.219259, + -0.219021, + ], + 'Nb': [ + 17.958399, + 12.063054, + 5.007015, + 3.287667, + 1.531019, + 1.123452, + 1.21159, + 12.246687, + 0.098615, + 75.011948, + 0.098615, + ], + 'Nb3+': [ + 17.714323, + 1.675213, + 7.483963, + 8.322464, + 11.143573, + -8.339573, + 1.172419, + 30.102791, + 0.080255, + -0.002983, + 10.456687, + ], + 'Nb5+': [ + 17.580206, + 7.633277, + 10.793497, + 0.180884, + 67.837921, + -68.02478, + 1.165852, + 0.078558, + 9.507652, + 31.621656, + -0.000438, + ], + 'Mo': [ + 6.236218, + 17.987711, + 12.973127, + 3.451426, + 0.210899, + 1.10877, + 0.09078, + 1.10831, + 11.46872, + 66.684151, + 0.09078, + ], + 'Mo3+': [ + 7.44705, + 17.778122, + 11.886068, + 1.997905, + 1.789626, + -1.898764, + 0.072, + 1.073145, + 9.83472, + 28.221746, + -0.011674, + ], + 'Mo5+': [ + 7.929879, + 17.667669, + 11.515987, + 0.500402, + 77.444084, + -78.056595, + 0.068856, + 1.068064, + 9.046229, + 26.558945, + -0.000473, + ], + 'Mo6+': [ + 34.757683, + 9.653037, + 6.584769, + -18.628115, + 2.490594, + 1.141916, + 1.30177, + 7.123843, + 0.094097, + 1.617443, + 12.335434, + ], + 'Tc': [ + 17.840963, + 3.428236, + 1.373012, + 12.947364, + 6.335469, + 1.074784, + 1.005729, + 41.901382, + 119.320541, + 9.781542, + 0.083391, + ], + 'Ru': [ + 6.271624, + 17.906738, + 14.123269, + 3.746008, + 0.908235, + 1.043992, + 0.07704, + 0.928222, + 9.555345, + 35.86068, + 123.552246, + ], + 'Ru3+': [ + 17.894758, + 13.579529, + 10.729251, + 2.474095, + 48.227997, + -51.905243, + 0.902827, + 8.740579, + 0.045125, + 24.764954, + -0.001699, + ], + 'Ru4+': [ + 17.845776, + 13.455084, + 10.229087, + 1.653524, + 14.059795, + -17.241762, + 0.90107, + 8.482392, + 0.045972, + 23.015272, + -0.004889, + ], + 'Rh': [ + 6.216648, + 17.919739, + 3.854252, + 0.840326, + 15.173498, + 0.995452, + 0.070789, + 0.856121, + 33.889484, + 121.686691, + 9.029517, + ], + 'Rh3+': [ + 17.758621, + 14.569813, + 5.29832, + 2.533579, + 0.879753, + 0.960843, + 0.841779, + 8.319533, + 0.06905, + 23.709131, + 0.06905, + ], + 'Rh4+': [ + 17.716188, + 14.446654, + 5.185801, + 1.703448, + 0.989992, + 0.959941, + 0.840572, + 8.100647, + 0.068995, + 22.357307, + 0.068995, + ], + 'Pd': [ + 6.121511, + 4.784063, + 16.631683, + 4.318258, + 13.246773, + 0.883099, + 0.062549, + 0.784031, + 8.751391, + 34.489983, + 0.784031, + ], + 'Pd2+': [ + 6.122282, + 15.651012, + 3.513508, + 9.06079, + 8.771199, + 0.879336, + 0.062424, + 8.018296, + 24.784275, + 0.776457, + 0.776457, + ], + 'Pd4+': [ + 6.152421, + -96.069023, + 31.622141, + 81.578255, + 17.801403, + 0.915874, + 0.063951, + 11.090354, + 13.466152, + 9.758302, + 0.783014, + ], + 'Ag': [ + 6.073874, + 17.155437, + 4.173344, + 0.852238, + 17.988686, + 0.756603, + 0.055333, + 7.896512, + 28.443739, + 110.376106, + 0.716809, + ], + 'Ag1+': [ + 6.091192, + 4.019526, + 16.948174, + 4.258638, + 13.889437, + 0.785127, + 0.056305, + 0.71934, + 7.758938, + 27.368349, + 0.71934, + ], + 'Ag2+': [ + 6.401808, + 48.699802, + 4.799859, + -32.332523, + 16.35671, + 1.068247, + 0.068167, + 0.94227, + 20.639496, + 1.100365, + 6.883131, + ], + 'Cd': [ + 6.080986, + 18.019468, + 4.018197, + 1.30351, + 17.974669, + 0.603504, + 0.04899, + 7.273646, + 29.119284, + 95.831207, + 0.661231, + ], + 'Cd2+': [ + 6.093711, + 43.909691, + 17.041306, + -39.675117, + 17.958918, + 0.664795, + 0.050624, + 8.654143, + 15.621396, + 11.082067, + 0.667591, + ], + 'In': [ + 6.196477, + 18.816183, + 4.050479, + 1.638929, + 17.962912, + 0.333097, + 0.042072, + 6.695665, + 31.00979, + 103.284348, + 0.610714, + ], + 'In3+': [ + 6.206277, + 18.497746, + 3.078131, + 10.524613, + 7.401234, + 0.293677, + 0.041357, + 6.605563, + 18.79225, + 0.608082, + 0.608082, + ], + 'Sn': [ + 19.325171, + 6.281571, + 4.498866, + 1.856934, + 17.917318, + 0.119024, + 6.118104, + 0.036915, + 32.529045, + 95.037186, + 0.565651, + ], + 'Sn2+': [ + 6.353672, + 4.770377, + 14.672025, + 4.235959, + 18.002131, + -0.042519, + 0.03472, + 6.167891, + 6.167879, + 29.006456, + 0.561774, + ], + 'Sn4+': [ + 15.445732, + 6.420892, + 4.56298, + 1.713385, + 18.033537, + -0.172219, + 6.280898, + 0.033144, + 6.280899, + 17.983601, + 0.55798, + ], + 'Sb': [ + 5.394956, + 6.54957, + 19.650681, + 1.82782, + 17.867832, + -0.290506, + 33.326523, + 0.030974, + 5.564929, + 87.130966, + 0.523992, + ], + 'Sb3+': [ + 10.189171, + 57.461918, + 19.356573, + 4.862206, + -45.394096, + 1.516108, + 0.089485, + 0.375256, + 5.357987, + 22.153736, + 0.297768, + ], + 'Sb5+': [ + 17.920622, + 6.647932, + 12.724075, + 1.555545, + 7.600591, + -0.445371, + 0.522315, + 0.029487, + 5.71821, + 16.433775, + 5.718204, + ], + 'Te': [ + 6.660302, + 6.940756, + 19.847015, + 1.557175, + 17.802427, + -0.806668, + 33.031654, + 0.02575, + 5.065547, + 84.101616, + 0.48766, + ], + 'I': [ + 19.884502, + 6.736593, + 8.110516, + 1.170953, + 17.548716, + -0.448811, + 4.628591, + 0.027754, + 31.849096, + 84.406387, + 0.46355, + ], + 'I1-': [ + 20.01033, + 17.835524, + 8.10413, + 2.231118, + 9.158548, + -3.341004, + 4.565931, + 0.444266, + 32.430672, + 95.14904, + 0.014906, + ], + 'Xe': [ + 19.97892, + 11.774945, + 9.332182, + 1.244749, + 17.737501, + -6.065902, + 4.143356, + 0.010142, + 28.7962, + 75.280685, + 0.413616, + ], + 'Cs': [ + 17.418674, + 8.314444, + 10.323193, + 1.383834, + 19.876251, + -2.322802, + 0.399828, + 0.016872, + 25.605827, + 233.339676, + 3.826915, + ], + 'Cs1+': [ + 19.939056, + 24.967621, + 10.375884, + 0.454243, + 17.660248, + -19.394306, + 3.770511, + 0.00404, + 25.311275, + 76.537766, + 0.38473, + ], + 'Ba': [ + 19.747343, + 17.368477, + 10.465718, + 2.592602, + 11.003653, + -5.183497, + 3.481823, + 0.371224, + 21.226641, + 173.834274, + 0.010719, + ], + 'Ba2+': [ + 19.7502, + 17.513683, + 10.884892, + 0.321585, + 65.149834, + -59.618172, + 3.430748, + 0.36159, + 21.358307, + 70.309402, + 0.001418, + ], + 'La': [ + 19.966019, + 27.329655, + 11.018425, + 3.086696, + 17.335455, + -21.745489, + 3.197408, + 0.003446, + 19.955492, + 141.381973, + 0.341817, + ], + 'La3+': [ + 19.688887, + 17.345703, + 11.356296, + 0.099418, + 82.358124, + -76.846909, + 3.146211, + 0.339586, + 18.753832, + 90.345459, + 0.001072, + ], + 'Ce': [ + 17.355122, + 43.988499, + 20.54665, + 3.13067, + 11.353665, + -38.386017, + 0.328369, + 0.002047, + 3.088196, + 134.907654, + 18.83296, + ], + 'Ce3+': [ + 26.593231, + 85.866432, + -6.677695, + 12.111847, + 17.401903, + -80.313423, + 3.280381, + 0.001012, + 4.313575, + 17.868504, + 0.326962, + ], + 'Ce4+': [ + 17.457533, + 25.659941, + 11.691037, + 19.695251, + -16.994749, + -3.515096, + 0.311812, + -0.003793, + 16.568687, + 2.886395, + -0.008931, + ], + 'Pr': [ + 21.551311, + 17.16173, + 11.903859, + 2.679103, + 9.564197, + -3.871068, + 2.995675, + 0.312491, + 17.716705, + 152.192825, + 0.010468, + ], + 'Pr3+': [ + 20.879841, + 36.035797, + 12.135341, + 0.283103, + 17.167803, + -30.500784, + 2.870897, + 0.002364, + 16.615236, + 53.909359, + 0.306993, + ], + 'Pr4+': [ + 17.496082, + 21.538509, + 20.403114, + 12.062211, + -7.492043, + -9.016722, + 0.294457, + -0.002742, + 2.772886, + 15.804613, + -0.013556, + ], + 'Nd': [ + 17.331244, + 62.783924, + 12.160097, + 2.663483, + 22.23995, + -57.189842, + 0.300269, + 0.00132, + 17.026001, + 148.748993, + 2.910268, + ], + 'Nd3+': [ + 17.120077, + 56.038139, + 21.468307, + 10.000671, + 2.905866, + -50.541992, + 0.291295, + 0.001421, + 2.743681, + 14.581367, + 22.485098, + ], + 'Pm': [ + 17.286388, + 51.560162, + 12.478557, + 2.675515, + 22.960947, + -45.973682, + 0.28662, + 0.00155, + 16.223755, + 143.984512, + 2.79648, + ], + 'Pm3+': [ + 22.221066, + 17.068142, + 12.805423, + 0.435687, + 52.23877, + -46.767181, + 2.635767, + 0.277039, + 14.927315, + 45.768017, + 0.001455, + ], + 'Sm': [ + 23.700363, + 23.072214, + 12.777782, + 2.684217, + 17.204367, + -17.452166, + 2.689539, + 0.003491, + 15.495437, + 139.862473, + 0.274536, + ], + 'Sm3+': [ + 15.618565, + 19.538092, + 13.398946, + -4.358811, + 24.490461, + -9.714854, + 0.006001, + 0.306379, + 14.979594, + 0.748825, + 2.454492, + ], + 'Eu': [ + 17.186195, + 37.156837, + 13.103387, + 2.707246, + 24.419271, + -31.586687, + 0.261678, + 0.001995, + 14.78736, + 134.816299, + 2.581883, + ], + 'Eu2+': [ + 23.899035, + 31.657497, + 12.955752, + 1.700576, + 16.992199, + -26.204315, + 2.467332, + 0.00223, + 13.625002, + 35.089481, + 0.253136, + ], + 'Eu3+': [ + 17.758327, + 33.498665, + 24.067188, + 13.436883, + -9.019134, + -19.768026, + 0.244474, + -0.003901, + 2.487526, + 14.568011, + -0.015628, + ], + 'Gd': [ + 24.898117, + 17.104952, + 13.222581, + 3.266152, + 48.995213, + -43.505684, + 2.435028, + 0.246961, + 13.996325, + 110.863091, + 0.001383, + ], + 'Gd3+': [ + 24.344999, + 16.945311, + 13.866931, + 0.481674, + 93.506378, + -88.147179, + 2.333971, + 0.239215, + 12.982995, + 43.876347, + 0.000673, + ], + 'Tb': [ + 25.910013, + 32.344139, + 13.765117, + 2.751404, + 17.064405, + -26.851971, + 2.373912, + 0.002034, + 13.481969, + 125.83651, + 0.236916, + ], + 'Tb3+': [ + 24.878252, + 16.856016, + 13.663937, + 1.279671, + 39.271294, + -33.950317, + 2.223301, + 0.22729, + 11.812528, + 29.910065, + 0.001527, + ], + 'Dy': [ + 26.671785, + 88.687576, + 14.065445, + 2.768497, + 17.067781, + -83.279831, + 2.282593, + 0.000665, + 12.92023, + 121.937187, + 0.225531, + ], + 'Dy3+': [ + 16.864344, + 90.383461, + 13.675473, + 1.687078, + 25.540651, + -85.15065, + 0.216275, + 0.000593, + 11.121207, + 26.250975, + 2.13593, + ], + 'Ho': [ + 27.15019, + 16.999819, + 14.059334, + 3.386979, + 46.546471, + -41.165253, + 2.16966, + 0.215414, + 12.213148, + 100.506783, + 0.001211, + ], + 'Ho3+': [ + 16.837524, + 63.221336, + 13.703766, + 2.061602, + 26.202621, + -58.026505, + 0.206873, + 0.000796, + 10.500283, + 24.031883, + 2.05506, + ], + 'Er': [ + 28.174887, + 82.493271, + 14.624002, + 2.802756, + 17.018515, + -77.135223, + 2.120995, + 0.00064, + 11.915256, + 114.529938, + 0.207519, + ], + 'Er3+': [ + 16.810127, + 22.681061, + 13.864114, + 2.294506, + 26.864477, + -17.51346, + 0.198293, + 0.002126, + 9.973341, + 22.836388, + 1.979442, + ], + 'Tm': [ + 28.925894, + 76.173798, + 14.904704, + 2.814812, + 16.998117, + -70.839813, + 2.046203, + 0.000656, + 11.465375, + 111.41198, + 0.199376, + ], + 'Tm3+': [ + 16.7875, + 15.350905, + 14.182357, + 2.299111, + 27.573771, + -10.192087, + 0.190852, + 0.003036, + 9.602934, + 22.52688, + 1.912862, + ], + 'Yb': [ + 29.67676, + 65.624069, + 15.160854, + 2.830288, + 16.99785, + -60.313812, + 1.97763, + 0.00072, + 11.044622, + 108.139153, + 0.19211, + ], + 'Yb2+': [ + 28.443794, + 16.849527, + 14.165081, + 3.445311, + 28.308853, + -23.214935, + 1.863896, + 0.183811, + 9.225469, + 23.691355, + 0.001463, + ], + 'Yb3+': [ + 28.191629, + 16.828087, + 14.167848, + 2.744962, + 23.171774, + -18.103676, + 1.842889, + 0.182788, + 9.045957, + 20.799847, + 0.001759, + ], + 'Lu': [ + 30.122866, + 15.099346, + 56.314899, + 3.54098, + 16.943729, + -51.049416, + 1.88309, + 10.342764, + 0.00078, + 89.55925, + 0.183849, + ], + 'Lu3+': [ + 28.828693, + 16.823227, + 14.247617, + 3.079559, + 25.647667, + -20.626528, + 1.776641, + 0.17556, + 8.575531, + 19.693701, + 0.001453, + ], + 'Hf': [ + 30.617033, + 15.145351, + 54.933548, + 4.096253, + 16.896156, + -49.719837, + 1.795613, + 9.934469, + 0.000739, + 76.189705, + 0.175914, + ], + 'Hf4+': [ + 29.267378, + 16.792543, + 14.78531, + 2.184128, + 23.791996, + -18.820383, + 1.697911, + 0.168313, + 8.190025, + 18.277578, + 0.001431, + ], + 'Ta': [ + 31.066359, + 15.341823, + 49.278297, + 4.577665, + 16.828321, + -44.119026, + 1.708732, + 9.618455, + 0.00076, + 66.346199, + 0.168002, + ], + 'Ta5+': [ + 29.539469, + 16.741854, + 15.18207, + 1.642916, + 16.437447, + -11.542459, + 1.612934, + 0.16046, + 7.654408, + 17.070732, + 0.001858, + ], + 'W': [ + 31.5079, + 15.682498, + 37.960129, + 4.885509, + 16.792112, + -32.864574, + 1.629485, + 9.446448, + 0.000898, + 59.980675, + 0.160798, + ], + 'W6+': [ + 29.729357, + 17.247808, + 15.184488, + 1.154652, + 0.739335, + 3.945157, + 1.501648, + 0.140803, + 6.880573, + 14.299601, + 14.299618, + ], + 'Re': [ + 31.888456, + 16.117104, + 42.390297, + 5.211669, + 16.767591, + -37.412682, + 1.549238, + 9.233474, + 0.000689, + 54.516373, + 0.152815, + ], + 'Os': [ + 32.210297, + 16.67844, + 48.559906, + 5.455839, + 16.735533, + -43.677956, + 1.473531, + 9.049695, + 0.000519, + 50.210201, + 0.145771, + ], + 'Os4+': [ + 17.113485, + 15.79237, + 23.342392, + 4.090271, + 7.671292, + 3.98839, + 0.13185, + 7.288542, + 1.389307, + 19.629425, + 1.389307, + ], + 'Ir': [ + 32.004436, + 1.975454, + 17.070105, + 15.939454, + 5.990003, + 4.018893, + 1.353767, + 81.014175, + 0.128093, + 7.661196, + 26.659403, + ], + 'Ir3+': [ + 31.537575, + 16.363338, + 15.597141, + 5.051404, + 1.436935, + 4.009459, + 1.334144, + 7.451918, + 0.127514, + 21.705648, + 0.127515, + ], + 'Ir4+': [ + 30.391249, + 16.146996, + 17.019068, + 4.458904, + 0.975372, + 4.006865, + 1.328519, + 7.181766, + 0.127337, + 19.060146, + 1.328519, + ], + 'Pt': [ + 31.273891, + 18.44544, + 17.063745, + 5.555933, + 1.57527, + 4.050394, + 1.316992, + 8.797154, + 0.124741, + 40.177994, + 1.316997, + ], + 'Pt2+': [ + 31.986849, + 17.249048, + 15.269374, + 5.760234, + 1.694079, + 4.032512, + 1.281143, + 7.625512, + 0.123571, + 24.190826, + 0.123571, + ], + 'Pt4+': [ + 41.932713, + 16.339224, + 17.653894, + 6.01242, + -12.036877, + 4.094551, + 1.111409, + 6.466086, + 0.128917, + 16.954155, + 0.778721, + ], + 'Au': [ + 16.77739, + 19.317156, + 32.979683, + 5.595453, + 10.576854, + -6.279078, + 0.122737, + 8.62157, + 1.256902, + 38.00882, + 0.000601, + ], + 'Au1+': [ + 32.124306, + 16.716476, + 16.8141, + 7.311565, + 0.993064, + 4.040792, + 1.216073, + 7.165378, + 0.118715, + 20.442486, + 53.095985, + ], + 'Au3+': [ + 31.704271, + 17.545767, + 16.819551, + 5.52264, + 0.361725, + 4.042679, + 1.215561, + 7.220506, + 0.118812, + 20.05097, + 1.215562, + ], + 'Hg': [ + 16.83989, + 20.023823, + 28.428564, + 5.881564, + 4.714706, + 4.076478, + 0.115905, + 8.256927, + 1.19525, + 39.247227, + 1.19525, + ], + 'Hg1+': [ + 28.866837, + 19.27754, + 16.776051, + 6.281459, + 3.710289, + 4.06843, + 1.173967, + 7.583842, + 0.115351, + 29.055994, + 1.173968, + ], + 'Hg2+': [ + 32.411079, + 18.690371, + 16.711773, + 9.974835, + -3.847611, + 4.052869, + 1.16298, + 7.329806, + 0.114518, + 22.009489, + 22.009493, + ], + 'Tl': [ + 16.630795, + 19.386616, + 32.808571, + 1.747191, + 6.356862, + 4.066939, + 0.110704, + 7.181401, + 1.11973, + 90.660263, + 26.014978, + ], + 'Tl1+': [ + 32.295044, + 16.570049, + 17.991013, + 1.535355, + 7.554591, + 4.05403, + 1.101544, + 0.11002, + 6.528559, + 52.495068, + 20.338634, + ], + 'Tl3+': [ + 32.525639, + 19.139185, + 17.100321, + 5.891115, + 12.599463, + -9.256075, + 1.094966, + 6.900992, + 0.103667, + 18.489614, + -0.001401, + ], + 'Pb': [ + 16.419567, + 32.73859, + 6.530247, + 2.342742, + 19.916475, + 4.049824, + 0.105499, + 1.055049, + 25.02589, + 80.906593, + 6.664449, + ], + 'Pb2+': [ + 27.392647, + 16.496822, + 19.984501, + 6.813923, + 5.23391, + 4.065623, + 1.058874, + 0.106305, + 6.708123, + 24.395554, + 1.058874, + ], + 'Pb4+': [ + 32.505657, + 20.01424, + 14.645661, + 5.029499, + 1.760138, + 4.044678, + 1.047035, + 6.670321, + 0.105279, + 16.52504, + 0.105279, + ], + 'Bi': [ + 16.282274, + 32.725136, + 6.678302, + 2.69475, + 20.576559, + 4.040914, + 0.10118, + 1.002287, + 25.714146, + 77.057549, + 6.291882, + ], + 'Bi3+': [ + 32.461437, + 19.438683, + 16.302486, + 7.322662, + 0.431704, + 4.043703, + 0.99793, + 6.038867, + 0.101338, + 18.371586, + 46.361046, + ], + 'Bi5+': [ + 16.734028, + 20.580494, + 9.452623, + 61.155834, + -34.041023, + 4.113663, + 0.105076, + 4.773282, + 11.762162, + 1.211775, + 1.619408, + ], + 'Po': [ + 16.289164, + 32.807171, + 21.095163, + 2.505901, + 7.254589, + 4.046556, + 0.098121, + 0.966265, + 6.046622, + 76.598068, + 28.096128, + ], + 'At': [ + 16.011461, + 32.615547, + 8.113899, + 2.884082, + 21.377867, + 3.995684, + 0.092639, + 0.904416, + 26.543257, + 68.372963, + 5.499512, + ], + 'Rn': [ + 16.070229, + 32.641106, + 21.489658, + 2.299218, + 9.480184, + 4.020977, + 0.090437, + 0.876409, + 5.239687, + 69.188477, + 27.632641, + ], + 'Fr': [ + 16.007385, + 32.66383, + 21.594351, + 1.598497, + 11.121192, + 4.003472, + 0.087031, + 0.840187, + 4.954467, + 199.805801, + 26.905106, + ], + 'Ra': [ + 32.56369, + 21.396671, + 11.298093, + 2.834688, + 15.914965, + 3.981773, + 0.80198, + 4.590666, + 22.758972, + 160.404388, + 0.083544, + ], + 'Ra2+': [ + 4.986228, + 32.474945, + 21.947443, + 11.800013, + 10.807292, + 3.956572, + 0.082597, + 0.791468, + 4.608034, + 24.792431, + 0.082597, + ], + 'Ac': [ + 15.914053, + 32.535042, + 21.553976, + 11.433394, + 3.612409, + 3.939212, + 0.080511, + 0.770669, + 4.352206, + 21.381622, + 130.500748, + ], + 'Ac3+': [ + 15.584983, + 32.022125, + 21.456327, + 0.757593, + 12.341252, + 3.838984, + 0.077438, + 0.739963, + 4.040735, + 47.525002, + 19.406845, + ], + 'Th': [ + 15.784024, + 32.454899, + 21.849222, + 4.239077, + 11.736191, + 3.922533, + 0.077067, + 0.735137, + 4.097976, + 109.464111, + 20.512138, + ], + 'Th4+': [ + 15.515445, + 32.090691, + 13.996399, + 12.918157, + 7.635514, + 3.831122, + 0.074499, + 0.711663, + 3.871044, + 18.596891, + 3.871044, + ], + 'Pa': [ + 32.740208, + 21.973675, + 12.957398, + 3.683832, + 15.744058, + 3.886066, + 0.709545, + 4.050881, + 19.231543, + 117.255005, + 0.07404, + ], + 'U': [ + 15.679275, + 32.824306, + 13.660459, + 3.687261, + 22.279434, + 3.854444, + 0.071206, + 0.681177, + 18.236156, + 112.500038, + 3.930325, + ], + 'U3+': [ + 15.360309, + 32.395657, + 21.96129, + 1.325894, + 14.251453, + 3.706622, + 0.067815, + 0.654643, + 3.643409, + 39.604965, + 16.33057, + ], + 'U4+': [ + 15.355091, + 32.235306, + 0.557745, + 14.396367, + 21.751173, + 3.705863, + 0.067789, + 0.652613, + 42.354237, + 15.908239, + 3.553231, + ], + 'U6+': [ + 15.333844, + 31.770849, + 21.274414, + 13.872636, + 0.048519, + 3.700591, + 0.067644, + 0.646384, + 3.317894, + 14.65025, + 75.339699, + ], + 'Np': [ + 32.999901, + 22.638077, + 14.219973, + 3.67295, + 15.683245, + 3.769391, + 0.657086, + 3.854918, + 17.435474, + 109.464485, + 0.068033, + ], + 'Np3+': [ + 15.378152, + 32.572132, + 22.206125, + 1.413295, + 14.828381, + 3.60337, + 0.064613, + 0.63142, + 3.561936, + 37.875511, + 15.546129, + ], + 'Np4+': [ + 15.373926, + 32.423019, + 21.969994, + 0.662078, + 14.96935, + 3.603039, + 0.064597, + 0.629658, + 3.476389, + 39.438942, + 15.135764, + ], + 'Np6+': [ + 15.359986, + 31.992825, + 21.412458, + 0.066574, + 14.568174, + 3.600942, + 0.064528, + 0.624505, + 3.253441, + 67.658318, + 13.980832, + ], + 'Pu': [ + 33.281178, + 23.148544, + 15.153755, + 3.031492, + 15.704215, + 3.6642, + 0.634999, + 3.856168, + 16.849735, + 121.292038, + 0.064857, + ], + 'Pu3+': [ + 15.356004, + 32.769127, + 22.68021, + 1.351055, + 15.416232, + 3.428895, + 0.06059, + 0.604663, + 3.491509, + 37.260635, + 14.981921, + ], + 'Pu4+': [ + 15.416219, + 32.610569, + 22.256662, + 0.719495, + 15.518152, + 3.480408, + 0.061456, + 0.607938, + 3.411848, + 37.628792, + 14.46436, + ], + 'Pu6+': [ + 15.436506, + 32.289719, + 14.726737, + 15.012391, + 7.024677, + 3.502325, + 0.061815, + 0.606541, + 3.245363, + 13.616438, + 3.245364, + ], + 'Am': [ + 33.435162, + 23.657259, + 15.576339, + 3.027023, + 15.7461, + 3.54116, + 0.612785, + 3.792942, + 16.195778, + 117.757004, + 0.061755, + ], + 'Cm': [ + 15.804837, + 33.480801, + 24.150198, + 3.655563, + 15.499866, + 3.39084, + 0.058619, + 0.59016, + 3.67472, + 100.736191, + 15.408296, + ], + 'Bk': [ + 15.889072, + 33.625286, + 24.710381, + 3.707139, + 15.839268, + 3.213169, + 0.055503, + 0.569571, + 3.615472, + 97.694786, + 14.754303, + ], + 'Cf': [ + 33.794075, + 25.467693, + 16.048487, + 3.657525, + 16.008982, + 3.005326, + 0.550447, + 3.581973, + 14.357388, + 96.064972, + 0.05245, + ], +} chargestate = { 'H': ['0', '1-'], @@ -645,7 +3293,7 @@ def is_writable_file(path): 'Am': ['0'], 'Cm': ['0'], 'Bk': ['0'], - 'Cf': ['0'] + 'Cf': ['0'], } ''' this dictionary tabulates the small @@ -653,127 +3301,513 @@ def is_writable_file(path): elements up to Z=92 ''' fNT = { -'H':-0.00054423,'He':-0.00054817,'Li':-0.00071131,'Be':-0.00097394,'B':-0.0012687,'C':-0.0016442,'N':-0.0019191,'O':-0.0021944, -'F':-0.0023389,'Ne':-0.0027186,'Na':-0.0028873,'Mg':-0.0032502,'Al':-0.0034361,'Si':-0.0038284,'P':-0.003985,'S':-0.0043804, -'Cl':-0.0044718,'Ar':-0.0044493,'K':-0.0050651,'Ca':-0.0054748,'Sc':-0.0053814,'Ti':-0.0055454,'V':-0.0056967,'Cr':-0.006077, -'Mn':-0.0062409,'Fe':-0.0066403,'Co':-0.0067859,'Ni':-0.0073281,'Cu':-0.0072602,'Zn':-0.0075516,'Ga':-0.0075615,'Ge':-0.0077386, -'As':-0.0079737,'Se':-0.0080314,'Br':-0.0084102,'Kr':-0.008484,'Rb':-0.008787,'Sr':-0.0090407,'Y':-0.0093851,'Zr':-0.0096221, -'Nb':-0.0099257,'Mo':-0.010086,'Tc':-0.01035,'Ru':-0.010508,'Rh':-0.010795,'Pd':-0.010908,'Ag':-0.011234,'Cd':-0.011244, -'In':-0.011471,'Sn':-0.011555,'Sb':-0.01172,'Te':-0.011625,'I':-0.012143,'Xe':-0.012184,'Cs':-0.012486,'Ba':-0.012527, -'La':-0.012831,'Ce':-0.01317,'Pr':-0.013552,'Nd':-0.013692,'Pm':-0.014078,'Sm':-0.014025,'Eu':-0.014328,'Gd':-0.014289, -'Tb':-0.014584,'Dy':-0.014705,'Ho':-0.014931,'Er':-0.015166,'Tm':-0.01546,'Yb':-0.015534,'Lu':-0.015805,'Hf':-0.015933, -'Ta':-0.016156,'W':-0.01634,'Re':-0.016572,'Os':-0.016659,'Ir':-0.016921,'Pt':-0.017109,'Au':-0.017382,'Hg':-0.017503, -'Tl':-0.01761,'Pb':-0.017802,'Bi':-0.018084,'Po':-0.01852,'At':-0.018874,'Rn':-0.018276,'Fr':-0.01862,'Ra':-0.018795, -'Ac':-0.01914,'Th':-0.01915,'Pa':-0.019663,'U':-0.019507 + 'H': -0.00054423, + 'He': -0.00054817, + 'Li': -0.00071131, + 'Be': -0.00097394, + 'B': -0.0012687, + 'C': -0.0016442, + 'N': -0.0019191, + 'O': -0.0021944, + 'F': -0.0023389, + 'Ne': -0.0027186, + 'Na': -0.0028873, + 'Mg': -0.0032502, + 'Al': -0.0034361, + 'Si': -0.0038284, + 'P': -0.003985, + 'S': -0.0043804, + 'Cl': -0.0044718, + 'Ar': -0.0044493, + 'K': -0.0050651, + 'Ca': -0.0054748, + 'Sc': -0.0053814, + 'Ti': -0.0055454, + 'V': -0.0056967, + 'Cr': -0.006077, + 'Mn': -0.0062409, + 'Fe': -0.0066403, + 'Co': -0.0067859, + 'Ni': -0.0073281, + 'Cu': -0.0072602, + 'Zn': -0.0075516, + 'Ga': -0.0075615, + 'Ge': -0.0077386, + 'As': -0.0079737, + 'Se': -0.0080314, + 'Br': -0.0084102, + 'Kr': -0.008484, + 'Rb': -0.008787, + 'Sr': -0.0090407, + 'Y': -0.0093851, + 'Zr': -0.0096221, + 'Nb': -0.0099257, + 'Mo': -0.010086, + 'Tc': -0.01035, + 'Ru': -0.010508, + 'Rh': -0.010795, + 'Pd': -0.010908, + 'Ag': -0.011234, + 'Cd': -0.011244, + 'In': -0.011471, + 'Sn': -0.011555, + 'Sb': -0.01172, + 'Te': -0.011625, + 'I': -0.012143, + 'Xe': -0.012184, + 'Cs': -0.012486, + 'Ba': -0.012527, + 'La': -0.012831, + 'Ce': -0.01317, + 'Pr': -0.013552, + 'Nd': -0.013692, + 'Pm': -0.014078, + 'Sm': -0.014025, + 'Eu': -0.014328, + 'Gd': -0.014289, + 'Tb': -0.014584, + 'Dy': -0.014705, + 'Ho': -0.014931, + 'Er': -0.015166, + 'Tm': -0.01546, + 'Yb': -0.015534, + 'Lu': -0.015805, + 'Hf': -0.015933, + 'Ta': -0.016156, + 'W': -0.01634, + 'Re': -0.016572, + 'Os': -0.016659, + 'Ir': -0.016921, + 'Pt': -0.017109, + 'Au': -0.017382, + 'Hg': -0.017503, + 'Tl': -0.01761, + 'Pb': -0.017802, + 'Bi': -0.018084, + 'Po': -0.01852, + 'At': -0.018874, + 'Rn': -0.018276, + 'Fr': -0.01862, + 'Ra': -0.018795, + 'Ac': -0.01914, + 'Th': -0.01915, + 'Pa': -0.019663, + 'U': -0.019507, } ''' relativistic correction factor for in anomalous scattering for all elements upto Z=92 ''' frel = { -'H':0.0,'He':0.0,'Li':-0.0006,'Be':-0.0006,'B':-0.0012,'C':-0.0018,'N':-0.003,'O':-0.0042, -'F':-0.0054,'Ne':-0.0066,'Na':-0.0084,'Mg':-0.0108,'Al':-0.0126,'Si':-0.0156,'P':-0.018,'S':-0.021, -'Cl':-0.0246,'Ar':-0.0282,'K':-0.0318,'Ca':-0.036,'Sc':-0.0408,'Ti':-0.045,'V':-0.0504,'Cr':-0.0558, -'Mn':-0.0612,'Fe':-0.0678,'Co':-0.0738,'Ni':-0.081,'Cu':-0.0876,'Zn':-0.0954,'Ga':-0.1032,'Ge':-0.1116, -'As':-0.12,'Se':-0.129,'Br':-0.1386,'Kr':-0.1482,'Rb':-0.1584,'Sr':-0.1692,'Y':-0.18,'Zr':-0.1914, -'Nb':-0.2028,'Mo':-0.2154,'Tc':-0.228,'Ru':-0.2406,'Rh':-0.2544,'Pd':-0.2682,'Ag':-0.2826,'Cd':-0.2976, -'In':-0.3126,'Sn':-0.3282,'Sb':-0.345,'Te':-0.3612,'I':-0.3786,'Xe':-0.396,'Cs':-0.414,'Ba':-0.4326, -'La':-0.4518,'Ce':-0.4716,'Pr':-0.4914,'Nd':-0.5124,'Pm':-0.5334,'Sm':-0.555,'Eu':-0.5772,'Gd':-0.6, -'Tb':-0.6234,'Dy':-0.6474,'Ho':-0.6714,'Er':-0.6966,'Tm':-0.7224,'Yb':-0.7488,'Lu':-0.7758,'Hf':-0.8028, -'Ta':-0.831,'W':-0.8598,'Re':-0.8892,'Os':-0.9192,'Ir':-0.9498,'Pt':-0.9816,'Au':-1.0134,'Hg':-1.0458, -'Tl':-1.0794,'Pb':-1.1136,'Bi':-1.1484,'Po':-1.1838,'At':-1.2198,'Rn':-1.257,'Fr':-1.2942,'Ra':-1.3326, -'Ac':-1.3722,'Th':-1.4118,'Pa':-1.4526,'U':-1.494} + 'H': 0.0, + 'He': 0.0, + 'Li': -0.0006, + 'Be': -0.0006, + 'B': -0.0012, + 'C': -0.0018, + 'N': -0.003, + 'O': -0.0042, + 'F': -0.0054, + 'Ne': -0.0066, + 'Na': -0.0084, + 'Mg': -0.0108, + 'Al': -0.0126, + 'Si': -0.0156, + 'P': -0.018, + 'S': -0.021, + 'Cl': -0.0246, + 'Ar': -0.0282, + 'K': -0.0318, + 'Ca': -0.036, + 'Sc': -0.0408, + 'Ti': -0.045, + 'V': -0.0504, + 'Cr': -0.0558, + 'Mn': -0.0612, + 'Fe': -0.0678, + 'Co': -0.0738, + 'Ni': -0.081, + 'Cu': -0.0876, + 'Zn': -0.0954, + 'Ga': -0.1032, + 'Ge': -0.1116, + 'As': -0.12, + 'Se': -0.129, + 'Br': -0.1386, + 'Kr': -0.1482, + 'Rb': -0.1584, + 'Sr': -0.1692, + 'Y': -0.18, + 'Zr': -0.1914, + 'Nb': -0.2028, + 'Mo': -0.2154, + 'Tc': -0.228, + 'Ru': -0.2406, + 'Rh': -0.2544, + 'Pd': -0.2682, + 'Ag': -0.2826, + 'Cd': -0.2976, + 'In': -0.3126, + 'Sn': -0.3282, + 'Sb': -0.345, + 'Te': -0.3612, + 'I': -0.3786, + 'Xe': -0.396, + 'Cs': -0.414, + 'Ba': -0.4326, + 'La': -0.4518, + 'Ce': -0.4716, + 'Pr': -0.4914, + 'Nd': -0.5124, + 'Pm': -0.5334, + 'Sm': -0.555, + 'Eu': -0.5772, + 'Gd': -0.6, + 'Tb': -0.6234, + 'Dy': -0.6474, + 'Ho': -0.6714, + 'Er': -0.6966, + 'Tm': -0.7224, + 'Yb': -0.7488, + 'Lu': -0.7758, + 'Hf': -0.8028, + 'Ta': -0.831, + 'W': -0.8598, + 'Re': -0.8892, + 'Os': -0.9192, + 'Ir': -0.9498, + 'Pt': -0.9816, + 'Au': -1.0134, + 'Hg': -1.0458, + 'Tl': -1.0794, + 'Pb': -1.1136, + 'Bi': -1.1484, + 'Po': -1.1838, + 'At': -1.2198, + 'Rn': -1.257, + 'Fr': -1.2942, + 'Ra': -1.3326, + 'Ac': -1.3722, + 'Th': -1.4118, + 'Pa': -1.4526, + 'U': -1.494, +} ''' atomic weights for things like density computations (from NIST elemental data base) ''' -atom_weights = np.array([1.00794, 4.002602, 6.941, 9.012182, 10.811, - 12.0107, 14.0067, 15.9994, 18.9984032, 20.1797, - 22.98976928, 24.3050, 26.9815386, 28.0855, 30.973762, - 32.065, 35.453, 39.948, 39.0983, 40.078, - 44.955912, 47.867, 50.9415, 51.9961, 54.938045, - 55.845, 58.933195, 58.6934, 63.546, 65.38, - 69.723, 72.64, 74.92160, 78.96, 79.904, - 83.798, 85.4678, 87.62, 88.90585, 91.224, - 92.90638, 95.96, 98.9062, 101.07, 102.90550, - 106.42, 107.8682, 112.411, 114.818, 118.710, - 121.760, 127.60, 126.90447, 131.293, 132.9054519, - 137.327, 138.90547, 140.116, 140.90765, 144.242, - 145.0, 150.36, 151.964, 157.25, 158.92535, - 162.500, 164.93032, 167.259, 168.93421, 173.054, - 174.9668, 178.49, 180.94788, 183.84, 186.207, - 190.23, 192.217, 195.084, 196.966569, 200.59, - 204.3833, 207.2, 208.98040, 209.0, 210.0, - 222.0, 223.0, 226.0, 227.0, 232.03806, - 231.03588, 238.02891, 237.0, 244.0, 243.0, - 247.0, 247.0, 251.0]) +atom_weights = np.array( + [ + 1.00794, + 4.002602, + 6.941, + 9.012182, + 10.811, + 12.0107, + 14.0067, + 15.9994, + 18.9984032, + 20.1797, + 22.98976928, + 24.3050, + 26.9815386, + 28.0855, + 30.973762, + 32.065, + 35.453, + 39.948, + 39.0983, + 40.078, + 44.955912, + 47.867, + 50.9415, + 51.9961, + 54.938045, + 55.845, + 58.933195, + 58.6934, + 63.546, + 65.38, + 69.723, + 72.64, + 74.92160, + 78.96, + 79.904, + 83.798, + 85.4678, + 87.62, + 88.90585, + 91.224, + 92.90638, + 95.96, + 98.9062, + 101.07, + 102.90550, + 106.42, + 107.8682, + 112.411, + 114.818, + 118.710, + 121.760, + 127.60, + 126.90447, + 131.293, + 132.9054519, + 137.327, + 138.90547, + 140.116, + 140.90765, + 144.242, + 145.0, + 150.36, + 151.964, + 157.25, + 158.92535, + 162.500, + 164.93032, + 167.259, + 168.93421, + 173.054, + 174.9668, + 178.49, + 180.94788, + 183.84, + 186.207, + 190.23, + 192.217, + 195.084, + 196.966569, + 200.59, + 204.3833, + 207.2, + 208.98040, + 209.0, + 210.0, + 222.0, + 223.0, + 226.0, + 227.0, + 232.03806, + 231.03588, + 238.02891, + 237.0, + 244.0, + 243.0, + 247.0, + 247.0, + 251.0, + ] +) """ dictionary of atomic weights """ -ATOM_WEIGHTS_DICT = {'H': 1.00794,'He': 4.002602,'Li': 6.941, - 'Be': 9.012182,'B': 10.811,'C': 12.0107, - 'N': 14.0067,'O': 15.9994,'F': 18.9984032, - 'Ne': 20.1797,'Na': 22.98976928,'Mg': 24.305, - 'Al': 26.9815386,'Si': 28.0855,'P': 30.973762, - 'S': 32.065,'Cl': 35.453,'Ar': 39.948, - 'K': 39.0983,'Ca': 40.078,'Sc': 44.955912, - 'Ti': 47.867,'V': 50.9415,'Cr': 51.9961, - 'Mn': 54.938045,'Fe': 55.845,'Co': 58.933195, - 'Ni': 58.6934,'Cu': 63.546,'Zn': 65.38, - 'Ga': 69.723,'Ge': 72.64,'As': 74.9216, - 'Se': 78.96,'Br': 79.904,'Kr': 83.798, - 'Rb': 85.4678,'Sr': 87.62,'Y': 88.90585, - 'Zr': 91.224,'Nb': 92.90638,'Mo': 95.96, - 'Tc': 98.9062,'Ru': 101.07,'Rh': 102.9055, - 'Pd': 106.42,'Ag': 107.8682,'Cd': 112.411, - 'In': 114.818,'Sn': 118.71,'Sb': 121.76, - 'Te': 127.6,'I': 126.90447,'Xe': 131.293, - 'Cs': 132.9054519,'Ba': 137.327,'La': 138.90547, - 'Ce': 140.116,'Pr': 140.90765,'Nd': 144.242, - 'Pm': 145.0,'Sm': 150.36,'Eu': 151.964, - 'Gd': 157.25,'Tb': 158.92535,'Dy': 162.5, - 'Ho': 164.93032,'Er': 167.259,'Tm': 168.93421, - 'Yb': 173.054,'Lu': 174.9668,'Hf': 178.49, - 'Ta': 180.94788,'W': 183.84,'Re': 186.207, - 'Os': 190.23,'Ir': 192.217,'Pt': 195.084, - 'Au': 196.966569,'Hg': 200.59,'Tl': 204.3833, - 'Pb': 207.2,'Bi': 208.9804,'Po': 209.0, - 'At': 210.0,'Rn': 222.0,'Fr': 223.0,'Ra': 226.0, - 'Ac': 227.0,'Th': 232.0377,'Pa': 231.03588, - 'U': 238.02891,'Np': 237.0,'Pu': 244.0, - 'Am': 243.0,'Cm': 247.0,'Bk': 247.0, - 'Cf': 251.0} +ATOM_WEIGHTS_DICT = { + 'H': 1.00794, + 'He': 4.002602, + 'Li': 6.941, + 'Be': 9.012182, + 'B': 10.811, + 'C': 12.0107, + 'N': 14.0067, + 'O': 15.9994, + 'F': 18.9984032, + 'Ne': 20.1797, + 'Na': 22.98976928, + 'Mg': 24.305, + 'Al': 26.9815386, + 'Si': 28.0855, + 'P': 30.973762, + 'S': 32.065, + 'Cl': 35.453, + 'Ar': 39.948, + 'K': 39.0983, + 'Ca': 40.078, + 'Sc': 44.955912, + 'Ti': 47.867, + 'V': 50.9415, + 'Cr': 51.9961, + 'Mn': 54.938045, + 'Fe': 55.845, + 'Co': 58.933195, + 'Ni': 58.6934, + 'Cu': 63.546, + 'Zn': 65.38, + 'Ga': 69.723, + 'Ge': 72.64, + 'As': 74.9216, + 'Se': 78.96, + 'Br': 79.904, + 'Kr': 83.798, + 'Rb': 85.4678, + 'Sr': 87.62, + 'Y': 88.90585, + 'Zr': 91.224, + 'Nb': 92.90638, + 'Mo': 95.96, + 'Tc': 98.9062, + 'Ru': 101.07, + 'Rh': 102.9055, + 'Pd': 106.42, + 'Ag': 107.8682, + 'Cd': 112.411, + 'In': 114.818, + 'Sn': 118.71, + 'Sb': 121.76, + 'Te': 127.6, + 'I': 126.90447, + 'Xe': 131.293, + 'Cs': 132.9054519, + 'Ba': 137.327, + 'La': 138.90547, + 'Ce': 140.116, + 'Pr': 140.90765, + 'Nd': 144.242, + 'Pm': 145.0, + 'Sm': 150.36, + 'Eu': 151.964, + 'Gd': 157.25, + 'Tb': 158.92535, + 'Dy': 162.5, + 'Ho': 164.93032, + 'Er': 167.259, + 'Tm': 168.93421, + 'Yb': 173.054, + 'Lu': 174.9668, + 'Hf': 178.49, + 'Ta': 180.94788, + 'W': 183.84, + 'Re': 186.207, + 'Os': 190.23, + 'Ir': 192.217, + 'Pt': 195.084, + 'Au': 196.966569, + 'Hg': 200.59, + 'Tl': 204.3833, + 'Pb': 207.2, + 'Bi': 208.9804, + 'Po': 209.0, + 'At': 210.0, + 'Rn': 222.0, + 'Fr': 223.0, + 'Ra': 226.0, + 'Ac': 227.0, + 'Th': 232.0377, + 'Pa': 231.03588, + 'U': 238.02891, + 'Np': 237.0, + 'Pu': 244.0, + 'Am': 243.0, + 'Cm': 247.0, + 'Bk': 247.0, + 'Cf': 251.0, +} """ densities of elements in g/cc """ -DENSITY = {'H': 8.99e-05,'He': 0.0001785,'Li': 0.535,'Be': 1.848, - 'B': 2.46,'C': 2.26,'N': 0.001251,'O': 0.001429, - 'F': 0.001696,'Ne': 0.0009,'Na': 0.968,'Mg': 1.738, - 'Al': 2.7,'Si': 2.33,'P': 1.823,'S': 1.96,'Cl': 0.003214, - 'Ar': 0.001784,'K': 0.856,'Ca': 1.55,'Sc': 2.985, - 'Ti': 4.507,'V': 6.11,'Cr': 7.14,'Mn': 7.47,'Fe': 7.874, - 'Co': 8.9,'Ni': 8.908,'Cu': 8.92,'Zn': 7.14,'Ga': 5.904, - 'Ge': 5.323,'As': 5.727,'Se': 4.819,'Br': 3.12,'Kr': 0.00375, - 'Rb': 1.532,'Sr': 2.63,'Y': 4.472,'Zr': 6.511, - 'Nb': 8.57,'Mo': 10.28,'Tc': 11.5,'Ru': 12.37, - 'Rh': 12.45,'Pd': 12.023,'Ag': 10.49,'Cd': 8.65, - 'In': 7.31,'Sn': 7.31,'Sb': 6.697,'Te': 6.24, - 'I': 4.94,'Xe': 0.0059,'Cs': 1.879,'Ba': 3.51, - 'La': 6.146,'Ce': 6.689,'Pr': 6.64,'Nd': 7.01, - 'Pm': 7.264,'Sm': 7.353,'Eu': 5.244,'Gd': 7.901, - 'Tb': 8.219,'Dy': 8.551,'Ho': 8.795,'Er': 9.066, - 'Tm': 9.321,'Yb': 6.57,'Lu': 9.841,'Hf': 13.31, - 'Ta': 16.65,'W': 19.25,'Re': 21.02,'Os': 22.59, - 'Ir': 22.56,'Pt': 21.09,'Au': 19.3,'Hg': 13.534, - 'Tl': 11.85,'Pb': 11.34,'Bi': 9.78,'Po': 9.196, - 'At': None,'Rn': 0.00973,'Fr': None,'Ra': 5.0, - 'Ac': 10.07,'Th': 11.724,'Pa': 15.37,'U': 19.05, - 'Np': 20.45,'Pu': 19.816,'Am': 13.67,'Cm': 13.51, - 'Bk': 14.78,'Cf': 15.1} +DENSITY = { + 'H': 8.99e-05, + 'He': 0.0001785, + 'Li': 0.535, + 'Be': 1.848, + 'B': 2.46, + 'C': 2.26, + 'N': 0.001251, + 'O': 0.001429, + 'F': 0.001696, + 'Ne': 0.0009, + 'Na': 0.968, + 'Mg': 1.738, + 'Al': 2.7, + 'Si': 2.33, + 'P': 1.823, + 'S': 1.96, + 'Cl': 0.003214, + 'Ar': 0.001784, + 'K': 0.856, + 'Ca': 1.55, + 'Sc': 2.985, + 'Ti': 4.507, + 'V': 6.11, + 'Cr': 7.14, + 'Mn': 7.47, + 'Fe': 7.874, + 'Co': 8.9, + 'Ni': 8.908, + 'Cu': 8.92, + 'Zn': 7.14, + 'Ga': 5.904, + 'Ge': 5.323, + 'As': 5.727, + 'Se': 4.819, + 'Br': 3.12, + 'Kr': 0.00375, + 'Rb': 1.532, + 'Sr': 2.63, + 'Y': 4.472, + 'Zr': 6.511, + 'Nb': 8.57, + 'Mo': 10.28, + 'Tc': 11.5, + 'Ru': 12.37, + 'Rh': 12.45, + 'Pd': 12.023, + 'Ag': 10.49, + 'Cd': 8.65, + 'In': 7.31, + 'Sn': 7.31, + 'Sb': 6.697, + 'Te': 6.24, + 'I': 4.94, + 'Xe': 0.0059, + 'Cs': 1.879, + 'Ba': 3.51, + 'La': 6.146, + 'Ce': 6.689, + 'Pr': 6.64, + 'Nd': 7.01, + 'Pm': 7.264, + 'Sm': 7.353, + 'Eu': 5.244, + 'Gd': 7.901, + 'Tb': 8.219, + 'Dy': 8.551, + 'Ho': 8.795, + 'Er': 9.066, + 'Tm': 9.321, + 'Yb': 6.57, + 'Lu': 9.841, + 'Hf': 13.31, + 'Ta': 16.65, + 'W': 19.25, + 'Re': 21.02, + 'Os': 22.59, + 'Ir': 22.56, + 'Pt': 21.09, + 'Au': 19.3, + 'Hg': 13.534, + 'Tl': 11.85, + 'Pb': 11.34, + 'Bi': 9.78, + 'Po': 9.196, + 'At': None, + 'Rn': 0.00973, + 'Fr': None, + 'Ra': 5.0, + 'Ac': 10.07, + 'Th': 11.724, + 'Pa': 15.37, + 'U': 19.05, + 'Np': 20.45, + 'Pu': 19.816, + 'Am': 13.67, + 'Cm': 13.51, + 'Bk': 14.78, + 'Cf': 15.1, +} # some polymer densities commonly used in hexrd DENSITY_COMPOUNDS = { @@ -789,22 +3823,115 @@ def is_writable_file(path): used in I/O from cif file ''' ptable = { - 'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8, - 'F': 9, 'Ne': 10, 'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15, - 'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22, - 'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29, - 'Zn': 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36, - 'Rb': 37, 'Sr': 38, 'Y': 39, 'Zr': 40, 'Nb': 41, 'Mo': 42, 'Tc': 43, - 'Ru': 44, 'Rh': 45, 'Pd': 46, 'Ag': 47, 'Cd': 48, 'In': 49, 'Sn': 50, - 'Sb': 51, 'Te': 52, 'I': 53, 'Xe': 54, 'Cs': 55, 'Ba': 56, 'La': 57, - 'Ce': 58, 'Pr': 59, 'Nd': 60, 'Pm': 61, 'Sm': 62, 'Eu': 63, 'Gd': 64, - 'Tb': 65, 'Dy': 66, 'Ho': 67, 'Er': 68, 'Tm': 69, 'Yb': 70, 'Lu': 71, - 'Hf': 72, 'Ta': 73, 'W': 74, 'Re': 75, 'Os': 76, 'Ir': 77, 'Pt': 78, - 'Au': 79, 'Hg': 80, 'Tl': 81, 'Pb': 82, 'Bi': 83, 'Po': 84, 'At': 85, - 'Rn': 86, 'Fr': 87, 'Ra': 88, 'Ac': 89, 'Th': 90, 'Pa': 91, 'U': 92, - 'Np': 93, 'Pu': 94, 'Am': 95, 'Cm': 96, 'Bk': 97, 'Cf': 98, 'Es': 99, - 'Fm': 100, 'Md': 101, 'No': 102, 'Lr': 103, 'Rf': 104, 'Db': 105, - 'Sg': 106, 'Bh': 107, 'Hs': 108, 'Mt': 109 + 'H': 1, + 'He': 2, + 'Li': 3, + 'Be': 4, + 'B': 5, + 'C': 6, + 'N': 7, + 'O': 8, + 'F': 9, + 'Ne': 10, + 'Na': 11, + 'Mg': 12, + 'Al': 13, + 'Si': 14, + 'P': 15, + 'S': 16, + 'Cl': 17, + 'Ar': 18, + 'K': 19, + 'Ca': 20, + 'Sc': 21, + 'Ti': 22, + 'V': 23, + 'Cr': 24, + 'Mn': 25, + 'Fe': 26, + 'Co': 27, + 'Ni': 28, + 'Cu': 29, + 'Zn': 30, + 'Ga': 31, + 'Ge': 32, + 'As': 33, + 'Se': 34, + 'Br': 35, + 'Kr': 36, + 'Rb': 37, + 'Sr': 38, + 'Y': 39, + 'Zr': 40, + 'Nb': 41, + 'Mo': 42, + 'Tc': 43, + 'Ru': 44, + 'Rh': 45, + 'Pd': 46, + 'Ag': 47, + 'Cd': 48, + 'In': 49, + 'Sn': 50, + 'Sb': 51, + 'Te': 52, + 'I': 53, + 'Xe': 54, + 'Cs': 55, + 'Ba': 56, + 'La': 57, + 'Ce': 58, + 'Pr': 59, + 'Nd': 60, + 'Pm': 61, + 'Sm': 62, + 'Eu': 63, + 'Gd': 64, + 'Tb': 65, + 'Dy': 66, + 'Ho': 67, + 'Er': 68, + 'Tm': 69, + 'Yb': 70, + 'Lu': 71, + 'Hf': 72, + 'Ta': 73, + 'W': 74, + 'Re': 75, + 'Os': 76, + 'Ir': 77, + 'Pt': 78, + 'Au': 79, + 'Hg': 80, + 'Tl': 81, + 'Pb': 82, + 'Bi': 83, + 'Po': 84, + 'At': 85, + 'Rn': 86, + 'Fr': 87, + 'Ra': 88, + 'Ac': 89, + 'Th': 90, + 'Pa': 91, + 'U': 92, + 'Np': 93, + 'Pu': 94, + 'Am': 95, + 'Cm': 96, + 'Bk': 97, + 'Cf': 98, + 'Es': 99, + 'Fm': 100, + 'Md': 101, + 'No': 102, + 'Lr': 103, + 'Rf': 104, + 'Db': 105, + 'Sg': 106, + 'Bh': 107, + 'Hs': 108, + 'Mt': 109, } ptableinverse = dict.fromkeys(ptable.values()) @@ -814,14 +3941,83 @@ def is_writable_file(path): ''' listing the symmorphic space groups ''' -sgnum_symmorphic = np.array([ - 1, 2, 3, 5, 6, 8, 10, 12, 16, 21, 22, 23, 25, 35, 38, 42, 44, 47, - 65, 69, 71, 75, 79, 81, 82, 83, 87, 89, 97, 99, 107, 111, 115, - 119, 121, 123, 139, 143, 146, 147, 148, 149, 150, 155, 156, 157, - 160, 162, 164, 166, 168, 174, 175, 177, 183, 187, 189, 191, 195, - 196, 197, 200, 202, 204, 207, 209, 211, 215, 216, 217, 221, 225, - 229 -]) +sgnum_symmorphic = np.array( + [ + 1, + 2, + 3, + 5, + 6, + 8, + 10, + 12, + 16, + 21, + 22, + 23, + 25, + 35, + 38, + 42, + 44, + 47, + 65, + 69, + 71, + 75, + 79, + 81, + 82, + 83, + 87, + 89, + 97, + 99, + 107, + 111, + 115, + 119, + 121, + 123, + 139, + 143, + 146, + 147, + 148, + 149, + 150, + 155, + 156, + 157, + 160, + 162, + 164, + 166, + 168, + 174, + 175, + 177, + 183, + 187, + 189, + 191, + 195, + 196, + 197, + 200, + 202, + 204, + 207, + 209, + 211, + 215, + 216, + 217, + 221, + 225, + 229, + ] +) ''' this variable encodes all the generators (including translations) for all 230 space groups @@ -829,85 +4025,244 @@ def is_writable_file(path): operators ''' SYM_GL = [ - "000 ", "100 ", "01cOOO0 ", - "01cODO0 ", "02aDDOcOOO0 ", "01jOOO0 ", - "01jOOD0 ", "02aDDOjOOO0 ", "02aDDOjOOD0 ", - "11cOOO0 ", "11cODO0 ", "12aDDOcOOO0 ", - "11cOOD0 ", "11cODD0 ", "12aDDOcOOD0 ", - "02bOOOcOOO0 ", "02bOODcOOD0 ", "02bOOOcDDO0 ", - "02bDODcODD0 ", "03aDDObOODcOOD0 ", "03aDDObOOOcOOO0 ", - "04aODDaDODbOOOcOOO0 ", "03aDDDbOOOcOOO0 ", "03aDDDbDODcODD0 ", - "02bOOOjOOO0 ", "02bOODjOOD0 ", "02bOOOjOOD0 ", - "02bOOOjDOO0 ", "02bOODjDOO0 ", "02bOOOjODD0 ", - "02bDODjDOD0 ", "02bOOOjDDO0 ", "02bOODjDDO0 ", - "02bOOOjDDD0 ", "03aDDObOOOjOOO0 ", "03aDDObOODjOOD0 ", - "03aDDObOOOjOOD0 ", "03aODDbOOOjOOO0 ", "03aODDbOOOjODO0 ", - "03aODDbOOOjDOO0 ", "03aODDbOOOjDDO0 ", "04aODDaDODbOOOjOOO0 ", - "04aODDaDODbOOOjBBB0 ", "03aDDDbOOOjOOO0 ", "03aDDDbOOOjDDO0 ", - "03aDDDbOOOjDOO0 ", "12bOOOcOOO0 ", "03bOOOcOOOhDDD1BBB ", - "12bOOOcOOD0 ", "03bOOOcOOOhDDO1BBO ", "12bDOOcOOO0 ", - "12bDOOcDDD0 ", "12bDODcDOD0 ", "12bDOOcOOD0 ", - "12bOOOcDDO0 ", "12bDDOcODD0 ", "12bOODcODD0 ", - "12bOOOcDDD0 ", "03bOOOcDDOhDDO1BBO ", "12bDDDcOOD0 ", - "12bDODcODD0 ", "12bDODcODO0 ", "13aDDObOODcOOD0 ", - "13aDDObODDcODD0 ", "13aDDObOOOcOOO0 ", "13aDDObOOOcOOD0 ", - "13aDDObODOcODO0 ", "04aDDObDDOcOOOhODD1OBB ", "14aODDaDODbOOOcOOO0 ", - "05aODDaDODbOOOcOOOhBBB1ZZZ ", "13aDDDbOOOcOOO0 ", "13aDDDbOOOcDDO0 ", - "13aDDDbDODcODD0 ", "13aDDDbODOcODO0 ", "02bOOOgOOO0 ", - "02bOODgOOB0 ", "02bOOOgOOD0 ", "02bOODgOOF0 ", - "03aDDDbOOOgOOO0 ", "03aDDDbDDDgODB0 ", "02bOOOmOOO0 ", - "03aDDDbOOOmOOO0 ", "12bOOOgOOO0 ", "12bOOOgOOD0 ", - "03bOOOgDDOhDDO1YBO ", "03bOOOgDDDhDDD1YYY ", "13aDDDbOOOgOOO0 ", - "04aDDDbDDDgODBhODB1OYZ ", "03bOOOgOOOcOOO0 ", "03bOOOgDDOcDDO0 ", - "03bOODgOOBcOOO0 ", "03bOODgDDBcDDB0 ", "03bOOOgOODcOOO0 ", - "03bOOOgDDDcDDD0 ", "03bOODgOOFcOOO0 ", "03bOODgDDFcDDF0 ", - "04aDDDbOOOgOOOcOOO0 ", "04aDDDbDDDgODBcDOF0 ", "03bOOOgOOOjOOO0 ", - "03bOOOgOOOjDDO0 ", "03bOOOgOODjOOD0 ", "03bOOOgDDDjDDD0 ", - "03bOOOgOOOjOOD0 ", "03bOOOgOOOjDDD0 ", "03bOOOgOODjOOO0 ", - "03bOOOgOODjDDO0 ", "04aDDDbOOOgOOOjOOO0 ", "04aDDDbOOOgOOOjOOD0 ", - "04aDDDbDDDgODBjOOO0 ", "04aDDDbDDDgODBjOOD0 ", "03bOOOmOOOcOOO0 ", - "03bOOOmOOOcOOD0 ", "03bOOOmOOOcDDO0 ", "03bOOOmOOOcDDD0 ", - "03bOOOmOOOjOOO0 ", "03bOOOmOOOjOOD0 ", "03bOOOmOOOjDDO0 ", - "03bOOOmOOOjDDD0 ", "04aDDDbOOOmOOOjOOO0 ", "04aDDDbOOOmOOOjOOD0 ", - "04aDDDbOOOmOOOcOOO0 ", "04aDDDbOOOmOOOcDOF0 ", "13bOOOgOOOcOOO0 ", - "13bOOOgOOOcOOD0 ", "04bOOOgOOOcOOOhDDO1YYO ", "04bOOOgOOOcOOOhDDD1YYY ", - "13bOOOgOOOcDDO0 ", "13bOOOgOOOcDDD0 ", "04bOOOgDDOcDDOhDDO1YBO ", - "04bOOOgDDOcDDDhDDO1YBO ", "13bOOOgOODcOOO0 ", "13bOOOgOODcOOD0 ", - "04bOOOgDDDcOODhDDD1YBY ", "04bOOOgDDDcOOOhDDD1YBY ", "13bOOOgOODcDDO0 ", - "13bOOOgDDDcDDD0 ", "04bOOOgDDDcDDDhDDD1YBY ", "04bOOOgDDDcDDOhDDD1YBY ", - "14aDDDbOOOgOOOcOOO0 ", "14aDDDbOOOgOOOcOOD0 ", "05aDDDbDDDgODBcDOFhODB1OBZ ", - "05aDDDbDDDgODBcDOBhODB1OBZ ", "01nOOO0 ", "01nOOC0 ", - "01nOOE0 ", "02aECCnOOO0 ", "11nOOO0 ", - "12aECCnOOO0 ", "02nOOOfOOO0 ", "02nOOOeOOO0 ", - "02nOOCfOOE0 ", "02nOOCeOOO0 ", "02nOOEfOOC0 ", - "02nOOEeOOO0 ", "03aECCnOOOeOOO0 ", "02nOOOkOOO0 ", - "02nOOOlOOO0 ", "02nOOOkOOD0 ", "02nOOOlOOD0 ", - "03aECCnOOOkOOO0 ", "03aECCnOOOkOOD0 ", "12nOOOfOOO0 ", - "12nOOOfOOD0 ", "12nOOOeOOO0 ", "12nOOOeOOD0 ", - "13aECCnOOOeOOO0 ", "13aECCnOOOeOOD0 ", "02nOOObOOO0 ", - "02nOOCbOOD0 ", "02nOOEbOOD0 ", "02nOOEbOOO0 ", - "02nOOCbOOO0 ", "02nOOObOOD0 ", "02nOOOiOOO0 ", - "12nOOObOOO0 ", "12nOOObOOD0 ", "03nOOObOOOeOOO0 ", - "03nOOCbOODeOOC0 ", "03nOOEbOODeOOE0 ", "03nOOEbOOOeOOE0 ", - "03nOOCbOOOeOOC0 ", "03nOOObOODeOOO0 ", "03nOOObOOOkOOO0 ", - "03nOOObOOOkOOD0 ", "03nOOObOODkOOD0 ", "03nOOObOODkOOO0 ", - "03nOOOiOOOkOOO0 ", "03nOOOiOODkOOD0 ", "03nOOOiOOOeOOO0 ", - "03nOOOiOODeOOO0 ", "13nOOObOOOeOOO0 ", "13nOOObOOOeOOD0 ", - "13nOOObOODeOOD0 ", "13nOOObOODeOOO0 ", "03bOOOcOOOdOOO0 ", - "05aODDaDODbOOOcOOOdOOO0 ", "04aDDDbOOOcOOOdOOO0 ", "03bDODcODDdOOO0 ", - "04aDDDbDODcODDdOOO0 ", "13bOOOcOOOdOOO0 ", "04bOOOcOOOdOOOhDDD1YYY ", - "15aODDaDODbOOOcOOOdOOO0 ", "06aODDaDODbOOOcOOOdOOOhBBB1ZZZ ", "14aDDDbOOOcOOOdOOO0 ", - "13bDODcODDdOOO0 ", "14aDDDbDODcODDdOOO0 ", "04bOOOcOOOdOOOeOOO0 ", - "04bOOOcOOOdOOOeDDD0 ", "06aODDaDODbOOOcOOOdOOOeOOO0 ", "06aODDaDODbODDcDDOdOOOeFBF0 ", - "05aDDDbOOOcOOOdOOOeOOO0 ", "04bDODcODDdOOOeBFF0 ", "04bDODcODDdOOOeFBB0 ", - "05aDDDbDODcODDdOOOeFBB0 ", "04bOOOcOOOdOOOlOOO0 ", "06aODDaDODbOOOcOOOdOOOlOOO0 ", - "05aDDDbOOOcOOOdOOOlOOO0 ", "04bOOOcOOOdOOOlDDD0 ", "06aODDaDODbOOOcOOOdOOOlDDD0 ", - "05aDDDbDODcODDdOOOlBBB0 ", "14bOOOcOOOdOOOeOOO0 ", "05bOOOcOOOdOOOeOOOhDDD1YYY ", - "14bOOOcOOOdOOOeDDD0 ", "05bOOOcOOOdOOOeDDDhDDD1YYY ", "16aODDaDODbOOOcOOOdOOOeOOO0 ", - "16aODDaDODbOOOcOOOdOOOeDDD0 ", "07aODDaDODbODDcDDOdOOOeFBFhBBB1ZZZ ", "07aODDaDODbODDcDDOdOOOeFBFhFFF1XXX ", - "15aDDDbOOOcOOOdOOOeOOO0 ", "15aDDDbDODcODDdOOOeFBB0 ", "01dOOO0 ", - "11dOOO0 ", "02dOOOfOOO0 ", "02dOOOlOOO0 ", - "02dOOOlDDD0 ", "12dOOOfOOO0 ", "12dOOOfDDD0 "] + "000 ", + "100 ", + "01cOOO0 ", + "01cODO0 ", + "02aDDOcOOO0 ", + "01jOOO0 ", + "01jOOD0 ", + "02aDDOjOOO0 ", + "02aDDOjOOD0 ", + "11cOOO0 ", + "11cODO0 ", + "12aDDOcOOO0 ", + "11cOOD0 ", + "11cODD0 ", + "12aDDOcOOD0 ", + "02bOOOcOOO0 ", + "02bOODcOOD0 ", + "02bOOOcDDO0 ", + "02bDODcODD0 ", + "03aDDObOODcOOD0 ", + "03aDDObOOOcOOO0 ", + "04aODDaDODbOOOcOOO0 ", + "03aDDDbOOOcOOO0 ", + "03aDDDbDODcODD0 ", + "02bOOOjOOO0 ", + "02bOODjOOD0 ", + "02bOOOjOOD0 ", + "02bOOOjDOO0 ", + "02bOODjDOO0 ", + "02bOOOjODD0 ", + "02bDODjDOD0 ", + "02bOOOjDDO0 ", + "02bOODjDDO0 ", + "02bOOOjDDD0 ", + "03aDDObOOOjOOO0 ", + "03aDDObOODjOOD0 ", + "03aDDObOOOjOOD0 ", + "03aODDbOOOjOOO0 ", + "03aODDbOOOjODO0 ", + "03aODDbOOOjDOO0 ", + "03aODDbOOOjDDO0 ", + "04aODDaDODbOOOjOOO0 ", + "04aODDaDODbOOOjBBB0 ", + "03aDDDbOOOjOOO0 ", + "03aDDDbOOOjDDO0 ", + "03aDDDbOOOjDOO0 ", + "12bOOOcOOO0 ", + "03bOOOcOOOhDDD1BBB ", + "12bOOOcOOD0 ", + "03bOOOcOOOhDDO1BBO ", + "12bDOOcOOO0 ", + "12bDOOcDDD0 ", + "12bDODcDOD0 ", + "12bDOOcOOD0 ", + "12bOOOcDDO0 ", + "12bDDOcODD0 ", + "12bOODcODD0 ", + "12bOOOcDDD0 ", + "03bOOOcDDOhDDO1BBO ", + "12bDDDcOOD0 ", + "12bDODcODD0 ", + "12bDODcODO0 ", + "13aDDObOODcOOD0 ", + "13aDDObODDcODD0 ", + "13aDDObOOOcOOO0 ", + "13aDDObOOOcOOD0 ", + "13aDDObODOcODO0 ", + "04aDDObDDOcOOOhODD1OBB ", + "14aODDaDODbOOOcOOO0 ", + "05aODDaDODbOOOcOOOhBBB1ZZZ ", + "13aDDDbOOOcOOO0 ", + "13aDDDbOOOcDDO0 ", + "13aDDDbDODcODD0 ", + "13aDDDbODOcODO0 ", + "02bOOOgOOO0 ", + "02bOODgOOB0 ", + "02bOOOgOOD0 ", + "02bOODgOOF0 ", + "03aDDDbOOOgOOO0 ", + "03aDDDbDDDgODB0 ", + "02bOOOmOOO0 ", + "03aDDDbOOOmOOO0 ", + "12bOOOgOOO0 ", + "12bOOOgOOD0 ", + "03bOOOgDDOhDDO1YBO ", + "03bOOOgDDDhDDD1YYY ", + "13aDDDbOOOgOOO0 ", + "04aDDDbDDDgODBhODB1OYZ ", + "03bOOOgOOOcOOO0 ", + "03bOOOgDDOcDDO0 ", + "03bOODgOOBcOOO0 ", + "03bOODgDDBcDDB0 ", + "03bOOOgOODcOOO0 ", + "03bOOOgDDDcDDD0 ", + "03bOODgOOFcOOO0 ", + "03bOODgDDFcDDF0 ", + "04aDDDbOOOgOOOcOOO0 ", + "04aDDDbDDDgODBcDOF0 ", + "03bOOOgOOOjOOO0 ", + "03bOOOgOOOjDDO0 ", + "03bOOOgOODjOOD0 ", + "03bOOOgDDDjDDD0 ", + "03bOOOgOOOjOOD0 ", + "03bOOOgOOOjDDD0 ", + "03bOOOgOODjOOO0 ", + "03bOOOgOODjDDO0 ", + "04aDDDbOOOgOOOjOOO0 ", + "04aDDDbOOOgOOOjOOD0 ", + "04aDDDbDDDgODBjOOO0 ", + "04aDDDbDDDgODBjOOD0 ", + "03bOOOmOOOcOOO0 ", + "03bOOOmOOOcOOD0 ", + "03bOOOmOOOcDDO0 ", + "03bOOOmOOOcDDD0 ", + "03bOOOmOOOjOOO0 ", + "03bOOOmOOOjOOD0 ", + "03bOOOmOOOjDDO0 ", + "03bOOOmOOOjDDD0 ", + "04aDDDbOOOmOOOjOOO0 ", + "04aDDDbOOOmOOOjOOD0 ", + "04aDDDbOOOmOOOcOOO0 ", + "04aDDDbOOOmOOOcDOF0 ", + "13bOOOgOOOcOOO0 ", + "13bOOOgOOOcOOD0 ", + "04bOOOgOOOcOOOhDDO1YYO ", + "04bOOOgOOOcOOOhDDD1YYY ", + "13bOOOgOOOcDDO0 ", + "13bOOOgOOOcDDD0 ", + "04bOOOgDDOcDDOhDDO1YBO ", + "04bOOOgDDOcDDDhDDO1YBO ", + "13bOOOgOODcOOO0 ", + "13bOOOgOODcOOD0 ", + "04bOOOgDDDcOODhDDD1YBY ", + "04bOOOgDDDcOOOhDDD1YBY ", + "13bOOOgOODcDDO0 ", + "13bOOOgDDDcDDD0 ", + "04bOOOgDDDcDDDhDDD1YBY ", + "04bOOOgDDDcDDOhDDD1YBY ", + "14aDDDbOOOgOOOcOOO0 ", + "14aDDDbOOOgOOOcOOD0 ", + "05aDDDbDDDgODBcDOFhODB1OBZ ", + "05aDDDbDDDgODBcDOBhODB1OBZ ", + "01nOOO0 ", + "01nOOC0 ", + "01nOOE0 ", + "02aECCnOOO0 ", + "11nOOO0 ", + "12aECCnOOO0 ", + "02nOOOfOOO0 ", + "02nOOOeOOO0 ", + "02nOOCfOOE0 ", + "02nOOCeOOO0 ", + "02nOOEfOOC0 ", + "02nOOEeOOO0 ", + "03aECCnOOOeOOO0 ", + "02nOOOkOOO0 ", + "02nOOOlOOO0 ", + "02nOOOkOOD0 ", + "02nOOOlOOD0 ", + "03aECCnOOOkOOO0 ", + "03aECCnOOOkOOD0 ", + "12nOOOfOOO0 ", + "12nOOOfOOD0 ", + "12nOOOeOOO0 ", + "12nOOOeOOD0 ", + "13aECCnOOOeOOO0 ", + "13aECCnOOOeOOD0 ", + "02nOOObOOO0 ", + "02nOOCbOOD0 ", + "02nOOEbOOD0 ", + "02nOOEbOOO0 ", + "02nOOCbOOO0 ", + "02nOOObOOD0 ", + "02nOOOiOOO0 ", + "12nOOObOOO0 ", + "12nOOObOOD0 ", + "03nOOObOOOeOOO0 ", + "03nOOCbOODeOOC0 ", + "03nOOEbOODeOOE0 ", + "03nOOEbOOOeOOE0 ", + "03nOOCbOOOeOOC0 ", + "03nOOObOODeOOO0 ", + "03nOOObOOOkOOO0 ", + "03nOOObOOOkOOD0 ", + "03nOOObOODkOOD0 ", + "03nOOObOODkOOO0 ", + "03nOOOiOOOkOOO0 ", + "03nOOOiOODkOOD0 ", + "03nOOOiOOOeOOO0 ", + "03nOOOiOODeOOO0 ", + "13nOOObOOOeOOO0 ", + "13nOOObOOOeOOD0 ", + "13nOOObOODeOOD0 ", + "13nOOObOODeOOO0 ", + "03bOOOcOOOdOOO0 ", + "05aODDaDODbOOOcOOOdOOO0 ", + "04aDDDbOOOcOOOdOOO0 ", + "03bDODcODDdOOO0 ", + "04aDDDbDODcODDdOOO0 ", + "13bOOOcOOOdOOO0 ", + "04bOOOcOOOdOOOhDDD1YYY ", + "15aODDaDODbOOOcOOOdOOO0 ", + "06aODDaDODbOOOcOOOdOOOhBBB1ZZZ ", + "14aDDDbOOOcOOOdOOO0 ", + "13bDODcODDdOOO0 ", + "14aDDDbDODcODDdOOO0 ", + "04bOOOcOOOdOOOeOOO0 ", + "04bOOOcOOOdOOOeDDD0 ", + "06aODDaDODbOOOcOOOdOOOeOOO0 ", + "06aODDaDODbODDcDDOdOOOeFBF0 ", + "05aDDDbOOOcOOOdOOOeOOO0 ", + "04bDODcODDdOOOeBFF0 ", + "04bDODcODDdOOOeFBB0 ", + "05aDDDbDODcODDdOOOeFBB0 ", + "04bOOOcOOOdOOOlOOO0 ", + "06aODDaDODbOOOcOOOdOOOlOOO0 ", + "05aDDDbOOOcOOOdOOOlOOO0 ", + "04bOOOcOOOdOOOlDDD0 ", + "06aODDaDODbOOOcOOOdOOOlDDD0 ", + "05aDDDbDODcODDdOOOlBBB0 ", + "14bOOOcOOOdOOOeOOO0 ", + "05bOOOcOOOdOOOeOOOhDDD1YYY ", + "14bOOOcOOOdOOOeDDD0 ", + "05bOOOcOOOdOOOeDDDhDDD1YYY ", + "16aODDaDODbOOOcOOOdOOOeOOO0 ", + "16aODDaDODbOOOcOOOdOOOeDDD0 ", + "07aODDaDODbODDcDDOdOOOeFBFhBBB1ZZZ ", + "07aODDaDODbODDcDDOdOOOeFBFhFFF1XXX ", + "15aDDDbOOOcOOOdOOOeOOO0 ", + "15aDDDbDODcODDdOOOeFBB0 ", + "01dOOO0 ", + "11dOOO0 ", + "02dOOOfOOO0 ", + "02dOOOlOOO0 ", + "02dOOOlDDD0 ", + "12dOOOfOOO0 ", + "12dOOOfDDD0 ", +] ''' this table contains the screw axis and glide planes @@ -1080,7 +4435,7 @@ def is_writable_file(path): 226: [['', '', 'c'], ['', '', '']], 227: [['d', '', ''], ['4_1', '', '']], 228: [['d', '', 'c'], ['4_1', '', '']], - 230: [['a', '', 'd'], ['4_1', '', '']] + 230: [['a', '', 'd'], ['4_1', '', '']], } ''' @@ -1102,93 +4457,93 @@ def is_writable_file(path): # 180@c SYM_GENERATORS['b'] = np.zeros([3, 3]) -SYM_GENERATORS['b'][0, 0] = -1. -SYM_GENERATORS['b'][1, 1] = -1. -SYM_GENERATORS['b'][2, 2] = 1. +SYM_GENERATORS['b'][0, 0] = -1.0 +SYM_GENERATORS['b'][1, 1] = -1.0 +SYM_GENERATORS['b'][2, 2] = 1.0 # 180@b SYM_GENERATORS['c'] = np.zeros([3, 3]) -SYM_GENERATORS['c'][0, 0] = -1. -SYM_GENERATORS['c'][1, 1] = 1. -SYM_GENERATORS['c'][2, 2] = -1. +SYM_GENERATORS['c'][0, 0] = -1.0 +SYM_GENERATORS['c'][1, 1] = 1.0 +SYM_GENERATORS['c'][2, 2] = -1.0 # 120@[111] SYM_GENERATORS['d'] = np.zeros([3, 3]) -SYM_GENERATORS['d'][0, 2] = 1. -SYM_GENERATORS['d'][1, 0] = 1. -SYM_GENERATORS['d'][2, 1] = 1. +SYM_GENERATORS['d'][0, 2] = 1.0 +SYM_GENERATORS['d'][1, 0] = 1.0 +SYM_GENERATORS['d'][2, 1] = 1.0 # 180@[110] SYM_GENERATORS['e'] = np.zeros([3, 3]) -SYM_GENERATORS['e'][0, 1] = 1. -SYM_GENERATORS['e'][1, 0] = 1. -SYM_GENERATORS['e'][2, 2] = -1. +SYM_GENERATORS['e'][0, 1] = 1.0 +SYM_GENERATORS['e'][1, 0] = 1.0 +SYM_GENERATORS['e'][2, 2] = -1.0 # SYM_GENERATORS['f'] = np.zeros([3, 3]) -SYM_GENERATORS['f'][0, 1] = -1. -SYM_GENERATORS['f'][1, 0] = -1. -SYM_GENERATORS['f'][2, 2] = -1. +SYM_GENERATORS['f'][0, 1] = -1.0 +SYM_GENERATORS['f'][1, 0] = -1.0 +SYM_GENERATORS['f'][2, 2] = -1.0 # SYM_GENERATORS['g'] = np.zeros([3, 3]) -SYM_GENERATORS['g'][0, 1] = -1. -SYM_GENERATORS['g'][1, 0] = 1. -SYM_GENERATORS['g'][2, 2] = 1. +SYM_GENERATORS['g'][0, 1] = -1.0 +SYM_GENERATORS['g'][1, 0] = 1.0 +SYM_GENERATORS['g'][2, 2] = 1.0 # inversion SYM_GENERATORS['h'] = -np.eye(3) # c-mirror SYM_GENERATORS['i'] = np.zeros([3, 3]) -SYM_GENERATORS['i'][0, 0] = 1. -SYM_GENERATORS['i'][1, 1] = 1. -SYM_GENERATORS['i'][2, 2] = -1. +SYM_GENERATORS['i'][0, 0] = 1.0 +SYM_GENERATORS['i'][1, 1] = 1.0 +SYM_GENERATORS['i'][2, 2] = -1.0 # b-mirror SYM_GENERATORS['j'] = np.zeros([3, 3]) -SYM_GENERATORS['j'][0, 0] = 1. -SYM_GENERATORS['j'][1, 1] = -1. -SYM_GENERATORS['j'][2, 2] = 1. +SYM_GENERATORS['j'][0, 0] = 1.0 +SYM_GENERATORS['j'][1, 1] = -1.0 +SYM_GENERATORS['j'][2, 2] = 1.0 # 90@[001] SYM_GENERATORS['k'] = np.zeros([3, 3]) -SYM_GENERATORS['k'][0, 1] = -1. -SYM_GENERATORS['k'][1, 0] = -1. -SYM_GENERATORS['k'][2, 2] = 1. +SYM_GENERATORS['k'][0, 1] = -1.0 +SYM_GENERATORS['k'][1, 0] = -1.0 +SYM_GENERATORS['k'][2, 2] = 1.0 # SYM_GENERATORS['l'] = np.zeros([3, 3]) -SYM_GENERATORS['l'][0, 1] = 1. -SYM_GENERATORS['l'][1, 0] = 1. -SYM_GENERATORS['l'][2, 2] = 1. +SYM_GENERATORS['l'][0, 1] = 1.0 +SYM_GENERATORS['l'][1, 0] = 1.0 +SYM_GENERATORS['l'][2, 2] = 1.0 # SYM_GENERATORS['m'] = np.zeros([3, 3]) -SYM_GENERATORS['m'][0, 1] = 1. -SYM_GENERATORS['m'][1, 0] = -1. -SYM_GENERATORS['m'][2, 2] = -1. +SYM_GENERATORS['m'][0, 1] = 1.0 +SYM_GENERATORS['m'][1, 0] = -1.0 +SYM_GENERATORS['m'][2, 2] = -1.0 # SYM_GENERATORS['n'] = np.zeros([3, 3]) -SYM_GENERATORS['n'][0, 1] = -1. -SYM_GENERATORS['n'][1, 0] = 1. -SYM_GENERATORS['n'][1, 1] = -1. -SYM_GENERATORS['n'][2, 2] = 1. +SYM_GENERATORS['n'][0, 1] = -1.0 +SYM_GENERATORS['n'][1, 0] = 1.0 +SYM_GENERATORS['n'][1, 1] = -1.0 +SYM_GENERATORS['n'][2, 2] = 1.0 ''' translation components ''' -SYM_GENERATORS['A'] = 1./6. -SYM_GENERATORS['B'] = 1./4. -SYM_GENERATORS['C'] = 1./3. -SYM_GENERATORS['D'] = 1./2. -SYM_GENERATORS['E'] = 2./3. -SYM_GENERATORS['F'] = 3./4. -SYM_GENERATORS['G'] = 5./6. -SYM_GENERATORS['O'] = 0. -SYM_GENERATORS['X'] = -3./8. -SYM_GENERATORS['Y'] = -1./4. -SYM_GENERATORS['Z'] = -1./8. +SYM_GENERATORS['A'] = 1.0 / 6.0 +SYM_GENERATORS['B'] = 1.0 / 4.0 +SYM_GENERATORS['C'] = 1.0 / 3.0 +SYM_GENERATORS['D'] = 1.0 / 2.0 +SYM_GENERATORS['E'] = 2.0 / 3.0 +SYM_GENERATORS['F'] = 3.0 / 4.0 +SYM_GENERATORS['G'] = 5.0 / 6.0 +SYM_GENERATORS['O'] = 0.0 +SYM_GENERATORS['X'] = -3.0 / 8.0 +SYM_GENERATORS['Y'] = -1.0 / 4.0 +SYM_GENERATORS['Z'] = -1.0 / 8.0 ''' @AUTHOR Saransh Singh, @@ -1238,7 +4593,7 @@ def is_writable_file(path): 'th': '3cdh', 'o': '2dg', 'td': '2dm', - 'oh': '3dgh' + 'oh': '3dgh', } # The above dict must be in the correct order for this to work SYM_PG_to_PGNUM = {pg: i + 1 for i, pg in enumerate(SYM_GL_PG)} diff --git a/hexrd/core/convolution/__init__.py b/hexrd/core/convolution/__init__.py index 617a37458..f07eaedf6 100644 --- a/hexrd/core/convolution/__init__.py +++ b/hexrd/core/convolution/__init__.py @@ -3,4 +3,3 @@ from .convolve import convolve - diff --git a/hexrd/core/convolution/convolve.py b/hexrd/core/convolution/convolve.py index ebc0b3e35..e33716e9c 100644 --- a/hexrd/core/convolution/convolve.py +++ b/hexrd/core/convolution/convolve.py @@ -17,7 +17,9 @@ try: _convolve = load_library("_convolve", LIBRARY_PATH) except Exception: - raise ImportError("Convolution C extension is missing. Try re-building astropy.") + raise ImportError( + "Convolution C extension is missing. Try re-building astropy." + ) # The GIL is automatically released by default when calling functions imported # from libraries loaded by ctypes.cdll.LoadLibrary() @@ -26,19 +28,23 @@ # Boundary None _convolveNd_c = _convolve.convolveNd_c _convolveNd_c.restype = None -_convolveNd_c.argtypes = [ndpointer(ctypes.c_double, flags={"C_CONTIGUOUS", "WRITEABLE"}), # return array - ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # input array - ctypes.c_uint, # N dim - # size array for input and result unless - # embed_result_within_padded_region is False, - # in which case the result array is assumed to be - # input.shape - 2*(kernel.shape//2). Note: integer division. - ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), - ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # kernel array - ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), # size array for kernel - ctypes.c_bool, # nan_interpolate - ctypes.c_bool, # embed_result_within_padded_region - ctypes.c_uint] # n_threads +_convolveNd_c.argtypes = [ + ndpointer( + ctypes.c_double, flags={"C_CONTIGUOUS", "WRITEABLE"} + ), # return array + ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # input array + ctypes.c_uint, # N dim + # size array for input and result unless + # embed_result_within_padded_region is False, + # in which case the result array is assumed to be + # input.shape - 2*(kernel.shape//2). Note: integer division. + ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), + ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # kernel array + ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), # size array for kernel + ctypes.c_bool, # nan_interpolate + ctypes.c_bool, # embed_result_within_padded_region + ctypes.c_uint, +] # n_threads # Disabling all doctests in this module until a better way of handling warnings # in doctests can be determined @@ -48,8 +54,15 @@ MAX_NORMALIZATION = 100 -def _copy_input_if_needed(input, dtype=float, order='C', nan_treatment=None, - mask=None, fill_value=None): + +def _copy_input_if_needed( + input, + dtype=float, + order='C', + nan_treatment=None, + mask=None, + fill_value=None, +): # strip quantity attributes if hasattr(input, 'unit'): input = input.value @@ -59,7 +72,11 @@ def _copy_input_if_needed(input, dtype=float, order='C', nan_treatment=None, # Anything that's masked must be turned into NaNs for the interpolation. # This requires copying. A copy is also needed for nan_treatment == 'fill' # A copy prevents possible function side-effects of the input array. - if nan_treatment == 'fill' or np.ma.is_masked(input) or mask is not None: + if ( + nan_treatment == 'fill' + or np.ma.is_masked(input) + or mask is not None + ): if np.ma.is_masked(input): # ``np.ma.maskedarray.filled()`` returns a copy, however there # is no way to specify the return type or order etc. In addition @@ -68,12 +85,16 @@ def _copy_input_if_needed(input, dtype=float, order='C', nan_treatment=None, # ``float`` masked arrays. ``subok=True`` is needed to retain # ``np.ma.maskedarray.filled()``. ``copy=False`` allows the fill # to act as the copy if type and order are already correct. - output = np.array(input, dtype=dtype, copy=False, order=order, subok=True) + output = np.array( + input, dtype=dtype, copy=False, order=order, subok=True + ) output = output.filled(fill_value) else: # Since we're making a copy, we might as well use `subok=False` to save, # what is probably, a negligible amount of memory. - output = np.array(input, dtype=dtype, copy=True, order=order, subok=False) + output = np.array( + input, dtype=dtype, copy=True, order=order, subok=False + ) if mask is not None: # mask != 0 yields a bool mask for all ints/floats/bool @@ -83,16 +104,29 @@ def _copy_input_if_needed(input, dtype=float, order='C', nan_treatment=None, # The advantage of `subok=True` is that it won't copy when array is an ndarray subclass. If it # is and `subok=False` (default), then it will copy even if `copy=False`. This uses less memory # when ndarray subclasses are passed in. - output = np.array(input, dtype=dtype, copy=False, order=order, subok=True) + output = np.array( + input, dtype=dtype, copy=False, order=order, subok=True + ) except (TypeError, ValueError) as e: - raise TypeError('input should be a Numpy array or something ' - 'convertible into a float array', e) + raise TypeError( + 'input should be a Numpy array or something ' + 'convertible into a float array', + e, + ) return output -def convolve(array, kernel, boundary='fill', fill_value=0., - nan_treatment='interpolate', normalize_kernel=True, mask=None, - preserve_nan=False, normalization_zero_tol=1e-8): +def convolve( + array, + kernel, + boundary='fill', + fill_value=0.0, + nan_treatment='interpolate', + normalize_kernel=True, + mask=None, + preserve_nan=False, + normalization_zero_tol=1e-8, +): """ Convolve an array with a kernel. @@ -167,8 +201,11 @@ def convolve(array, kernel, boundary='fill', fill_value=0., """ if boundary not in BOUNDARY_OPTIONS: - raise ValueError("Invalid boundary option: must be one of {}" - .format(BOUNDARY_OPTIONS)) + raise ValueError( + "Invalid boundary option: must be one of {}".format( + BOUNDARY_OPTIONS + ) + ) if nan_treatment not in ('interpolate', 'fill'): raise ValueError("nan_treatment must be one of 'interpolate','fill'") @@ -195,14 +232,24 @@ def convolve(array, kernel, boundary='fill', fill_value=0., # Convert kernel to ndarray if not already # Copy or alias array to array_internal - array_internal = _copy_input_if_needed(passed_array, dtype=float, order='C', - nan_treatment=nan_treatment, mask=mask, - fill_value=np.nan) + array_internal = _copy_input_if_needed( + passed_array, + dtype=float, + order='C', + nan_treatment=nan_treatment, + mask=mask, + fill_value=np.nan, + ) array_dtype = getattr(passed_array, 'dtype', array_internal.dtype) # Copy or alias kernel to kernel_internal - kernel_internal = _copy_input_if_needed(passed_kernel, dtype=float, order='C', - nan_treatment=None, mask=None, - fill_value=fill_value) + kernel_internal = _copy_input_if_needed( + passed_kernel, + dtype=float, + order='C', + nan_treatment=None, + mask=None, + fill_value=fill_value, + ) # Make sure kernel has all odd axes if has_even_axis(kernel_internal): @@ -219,15 +266,18 @@ def convolve(array, kernel, boundary='fill', fill_value=0., if array_internal.ndim == 0: raise Exception("cannot convolve 0-dimensional arrays") elif array_internal.ndim > 3: - raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional ' - 'arrays at this time') + raise NotImplementedError( + 'convolve only supports 1, 2, and 3-dimensional ' + 'arrays at this time' + ) elif array_internal.ndim != kernel_internal.ndim: - raise Exception('array and kernel have differing number of ' - 'dimensions.') + raise Exception( + 'array and kernel have differing number of ' 'dimensions.' + ) array_shape = np.array(array_internal.shape) kernel_shape = np.array(kernel_internal.shape) - pad_width = kernel_shape//2 + pad_width = kernel_shape // 2 # For boundary=None only the center space is convolved. All array indices within a # distance kernel.shape//2 from the edge are completely ignored (zeroed). @@ -239,25 +289,34 @@ def convolve(array, kernel, boundary='fill', fill_value=0., # For odd kernels it is: # array_shape >= kernel_shape OR array_shape > kernel_shape-1 OR array_shape > 2*(kernel_shape//2). # Since the latter is equal to the former two for even lengths, the latter condition is complete. - if boundary is None and not np.all(array_shape > 2*pad_width): - raise KernelSizeError("for boundary=None all kernel axes must be smaller than array's - " - "use boundary in ['fill', 'extend', 'wrap'] instead.") + if boundary is None and not np.all(array_shape > 2 * pad_width): + raise KernelSizeError( + "for boundary=None all kernel axes must be smaller than array's - " + "use boundary in ['fill', 'extend', 'wrap'] instead." + ) # NaN interpolation significantly slows down the C convolution # computation. Since nan_treatment = 'interpolate', is the default # check whether it is even needed, if not, don't interpolate. # NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any() - nan_interpolate = (nan_treatment == 'interpolate') and np.isnan(array_internal.sum()) + nan_interpolate = (nan_treatment == 'interpolate') and np.isnan( + array_internal.sum() + ) # Check if kernel is normalizable if normalize_kernel or nan_interpolate: kernel_sum = kernel_internal.sum() - kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol) - - if kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero: - raise ValueError("The kernel can't be normalized, because its sum is " - "close to zero. The sum of the given kernel is < {}" - .format(1. / MAX_NORMALIZATION)) + kernel_sums_to_zero = np.isclose( + kernel_sum, 0, atol=normalization_zero_tol + ) + + if kernel_sum < 1.0 / MAX_NORMALIZATION or kernel_sums_to_zero: + raise ValueError( + "The kernel can't be normalized, because its sum is " + "close to zero. The sum of the given kernel is < {}".format( + 1.0 / MAX_NORMALIZATION + ) + ) # Mark the NaN values so we can replace them later if interpolate_nan is # not set @@ -276,20 +335,35 @@ def convolve(array, kernel, boundary='fill', fill_value=0., embed_result_within_padded_region = False if boundary == 'fill': # This method is faster than using numpy.pad(..., mode='constant') - array_to_convolve = np.full(array_shape + 2*pad_width, fill_value=fill_value, dtype=float, order='C') + array_to_convolve = np.full( + array_shape + 2 * pad_width, + fill_value=fill_value, + dtype=float, + order='C', + ) # Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of [pad_width[0]:-pad_width[0]] # to account for when the kernel has size of 1 making pad_width = 0. if array_internal.ndim == 1: - array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0]] = array_internal + array_to_convolve[ + pad_width[0] : array_shape[0] + pad_width[0] + ] = array_internal elif array_internal.ndim == 2: - array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0], - pad_width[1]:array_shape[1]+pad_width[1]] = array_internal + array_to_convolve[ + pad_width[0] : array_shape[0] + pad_width[0], + pad_width[1] : array_shape[1] + pad_width[1], + ] = array_internal else: - array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0], - pad_width[1]:array_shape[1]+pad_width[1], - pad_width[2]:array_shape[2]+pad_width[2]] = array_internal + array_to_convolve[ + pad_width[0] : array_shape[0] + pad_width[0], + pad_width[1] : array_shape[1] + pad_width[1], + pad_width[2] : array_shape[2] + pad_width[2], + ] = array_internal else: - np_pad_mode_dict = {'fill': 'constant', 'extend': 'edge', 'wrap': 'wrap'} + np_pad_mode_dict = { + 'fill': 'constant', + 'extend': 'edge', + 'wrap': 'wrap', + } np_pad_mode = np_pad_mode_dict[boundary] pad_width = kernel_shape // 2 @@ -298,18 +372,27 @@ def convolve(array, kernel, boundary='fill', fill_value=0., elif array_internal.ndim == 2: np_pad_width = ((pad_width[0],), (pad_width[1],)) else: - np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],)) - - array_to_convolve = np.pad(array_internal, pad_width=np_pad_width, - mode=np_pad_mode) - - _convolveNd_c(result, array_to_convolve, - array_to_convolve.ndim, - np.array(array_to_convolve.shape, dtype=ctypes.c_size_t, order='C'), - kernel_internal, - np.array(kernel_shape, dtype=ctypes.c_size_t, order='C'), - nan_interpolate, embed_result_within_padded_region, - n_threads) + np_pad_width = ( + (pad_width[0],), + (pad_width[1],), + (pad_width[2],), + ) + + array_to_convolve = np.pad( + array_internal, pad_width=np_pad_width, mode=np_pad_mode + ) + + _convolveNd_c( + result, + array_to_convolve, + array_to_convolve.ndim, + np.array(array_to_convolve.shape, dtype=ctypes.c_size_t, order='C'), + kernel_internal, + np.array(kernel_shape, dtype=ctypes.c_size_t, order='C'), + nan_interpolate, + embed_result_within_padded_region, + n_threads, + ) # So far, normalization has only occured for nan_treatment == 'interpolate' # because this had to happen within the C extension so as to ignore @@ -321,10 +404,12 @@ def convolve(array, kernel, boundary='fill', fill_value=0., result *= kernel_sum if nan_interpolate and not preserve_nan and np.isnan(result.sum()): - warnings.warn("nan_treatment='interpolate', however, NaN values detected " - "post convolution. A contiguous region of NaN values, larger " - "than the kernel size, are present in the input array. " - "Increase the kernel size to avoid this.") + warnings.warn( + "nan_treatment='interpolate', however, NaN values detected " + "post convolution. A contiguous region of NaN values, larger " + "than the kernel size, are present in the input array. " + "Increase the kernel size to avoid this." + ) if preserve_nan: result[initially_nan] = np.nan @@ -339,6 +424,3 @@ def convolve(array, kernel, boundary='fill', fill_value=0., return result.astype(array_dtype) else: return result - - - diff --git a/hexrd/core/convolution/utils.py b/hexrd/core/convolution/utils.py index 4283c3985..485ad80df 100644 --- a/hexrd/core/convolution/utils.py +++ b/hexrd/core/convolution/utils.py @@ -3,6 +3,7 @@ import ctypes import numpy as np + class DiscretizationError(Exception): """ Called when discretization of models goes wrong. @@ -23,4 +24,4 @@ def has_even_axis(array): def raise_even_kernel_exception(): - raise KernelSizeError("Kernel size must be odd in all axes.") \ No newline at end of file + raise KernelSizeError("Kernel size must be odd in all axes.") diff --git a/hexrd/core/deprecation.py b/hexrd/core/deprecation.py index 0ac51b271..ed9f9fc94 100644 --- a/hexrd/core/deprecation.py +++ b/hexrd/core/deprecation.py @@ -4,6 +4,7 @@ class DeprecatedFunctionError(Exception): """Custom exception for deprecated functions.""" + pass diff --git a/hexrd/core/distortion/__init__.py b/hexrd/core/distortion/__init__.py index 5931e0e7e..d01fff19f 100644 --- a/hexrd/core/distortion/__init__.py +++ b/hexrd/core/distortion/__init__.py @@ -1,4 +1,5 @@ """Distortion package (python 3)""" + import abc import pkgutil from importlib import import_module diff --git a/hexrd/core/distortion/dexela_2923.py b/hexrd/core/distortion/dexela_2923.py index e4cb92dfe..a26407c5d 100644 --- a/hexrd/core/distortion/dexela_2923.py +++ b/hexrd/core/distortion/dexela_2923.py @@ -38,9 +38,7 @@ def apply(self, xy_in): return xy_in else: xy_out = np.empty_like(xy_in) - _dexela_2923_distortion( - xy_out, xy_in, np.asarray(self.params) - ) + _dexela_2923_distortion(xy_out, xy_in, np.asarray(self.params)) return xy_out def apply_inverse(self, xy_in): @@ -56,9 +54,9 @@ def apply_inverse(self, xy_in): def _find_quadrant(xy_in): quad_label = np.zeros(len(xy_in), dtype=int) - in_2_or_3 = xy_in[:, 0] < 0. + in_2_or_3 = xy_in[:, 0] < 0.0 in_1_or_4 = ~in_2_or_3 - in_3_or_4 = xy_in[:, 1] < 0. + in_3_or_4 = xy_in[:, 1] < 0.0 in_1_or_2 = ~in_3_or_4 quad_label[np.logical_and(in_1_or_4, in_1_or_2)] = 1 quad_label[np.logical_and(in_2_or_3, in_1_or_2)] = 2 @@ -71,15 +69,15 @@ def _find_quadrant(xy_in): def _dexela_2923_distortion(out_, in_, params): for el in range(len(in_)): xi, yi = in_[el, :] - if xi < 0.: - if yi < 0.: + if xi < 0.0: + if yi < 0.0: # 3rd quadrant out_[el, :] = in_[el, :] + params[4:6] else: # 2nd quadrant out_[el, :] = in_[el, :] + params[2:4] else: - if yi < 0.: + if yi < 0.0: # 4th quadrant out_[el, :] = in_[el, :] + params[6:8] else: @@ -91,18 +89,17 @@ def _dexela_2923_distortion(out_, in_, params): def _dexela_2923_inverse_distortion(out_, in_, params): for el in range(len(in_)): xi, yi = in_[el, :] - if xi < 0.: - if yi < 0.: + if xi < 0.0: + if yi < 0.0: # 3rd quadrant out_[el, :] = in_[el, :] - params[4:6] else: # 2nd quadrant out_[el, :] = in_[el, :] - params[2:4] else: - if yi < 0.: + if yi < 0.0: # 4th quadrant out_[el, :] = in_[el, :] - params[6:8] else: # 1st quadrant out_[el, :] = in_[el, :] - params[0:2] - diff --git a/hexrd/core/distortion/ge_41rt.py b/hexrd/core/distortion/ge_41rt.py index 241e87d40..681d2ac66 100644 --- a/hexrd/core/distortion/ge_41rt.py +++ b/hexrd/core/distortion/ge_41rt.py @@ -1,4 +1,5 @@ """GE41RT Detector Distortion""" + from typing import List import numpy as np diff --git a/hexrd/core/distortion/identity.py b/hexrd/core/distortion/identity.py index 801387b82..c6bcc8c13 100644 --- a/hexrd/core/distortion/identity.py +++ b/hexrd/core/distortion/identity.py @@ -2,6 +2,7 @@ Simple class that returns it's input. """ + from .distortionabc import DistortionABC from .registry import _RegisterDistortionClass diff --git a/hexrd/core/distortion/nyi.py b/hexrd/core/distortion/nyi.py index bc1421217..4105852cc 100644 --- a/hexrd/core/distortion/nyi.py +++ b/hexrd/core/distortion/nyi.py @@ -2,6 +2,7 @@ To illustrate error when abstract method is not implemented """ + from .distortionabc import DistortionABC from .registry import _RegisterDistortionClass diff --git a/hexrd/core/distortion/registry.py b/hexrd/core/distortion/registry.py index a36f2f7d0..28dc3a49e 100644 --- a/hexrd/core/distortion/registry.py +++ b/hexrd/core/distortion/registry.py @@ -1,4 +1,5 @@ """Distortion package (python 3)""" + import abc __all__ = ['maptypes', 'get_mapping'] @@ -13,6 +14,7 @@ def __init__(cls, name, bases, attrs): class Registry(object): """Registry for imageseries adapters""" + distortion_registry = dict() @classmethod diff --git a/hexrd/core/extensions/__init__.py b/hexrd/core/extensions/__init__.py index 9f9cfc8d9..424934712 100644 --- a/hexrd/core/extensions/__init__.py +++ b/hexrd/core/extensions/__init__.py @@ -1,3 +1,3 @@ from . import _new_transforms_capi from . import _transforms_CAPI -from . import inverse_distortion \ No newline at end of file +from . import inverse_distortion diff --git a/hexrd/core/fitting/calibration/abstract_grain.py b/hexrd/core/fitting/calibration/abstract_grain.py index 143b8b6ae..5ee41ed1b 100644 --- a/hexrd/core/fitting/calibration/abstract_grain.py +++ b/hexrd/core/fitting/calibration/abstract_grain.py @@ -10,15 +10,25 @@ from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl from .calibrator import Calibrator -from .lmfit_param_handling import create_grain_params, DEFAULT_EULER_CONVENTION, rename_to_avoid_collision +from .lmfit_param_handling import ( + create_grain_params, + DEFAULT_EULER_CONVENTION, + rename_to_avoid_collision, +) logger = logging.getLogger(__name__) class AbstractGrainCalibrator(Calibrator): - def __init__(self, instr, material, grain_params, - default_refinements=None, calibration_picks=None, - euler_convention=DEFAULT_EULER_CONVENTION): + def __init__( + self, + instr, + material, + grain_params, + default_refinements=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + ): self.instr = instr self.material = material self.grain_params = grain_params @@ -106,7 +116,12 @@ def grain_params_euler(self, v): # Grain parameters with orientation set using Euler angle convention grain_params = v.copy() if self.euler_convention is not None: - rme = RotMatEuler(np.zeros(3,), **self.euler_convention) + rme = RotMatEuler( + np.zeros( + 3, + ), + **self.euler_convention + ) rme.angles = np.radians(grain_params[:3]) phi, n = angleAxisOfRotMat(rme.rmat) grain_params[:3] = phi * n.flatten() diff --git a/hexrd/core/fitting/calibration/instrument.py b/hexrd/core/fitting/calibration/instrument.py index a413538d6..ee3f88297 100644 --- a/hexrd/core/fitting/calibration/instrument.py +++ b/hexrd/core/fitting/calibration/instrument.py @@ -4,8 +4,18 @@ import lmfit import numpy as np -from .lmfit_param_handling import add_engineering_constraints, create_instr_params, DEFAULT_EULER_CONVENTION, update_instrument_from_params, validate_params_list -from hexrd.core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType +from .lmfit_param_handling import ( + add_engineering_constraints, + create_instr_params, + DEFAULT_EULER_CONVENTION, + update_instrument_from_params, + validate_params_list, +) +from hexrd.core.fitting.calibration.relative_constraints import ( + create_relative_constraints, + RelativeConstraints, + RelativeConstraintsType, +) logger = logging.getLogger() logger.setLevel('INFO') @@ -16,10 +26,14 @@ def _normalized_ssqr(resd): class InstrumentCalibrator: - def __init__(self, *args, engineering_constraints=None, - set_refinements_from_instrument_flags=True, - euler_convention=DEFAULT_EULER_CONVENTION, - relative_constraints_type=RelativeConstraintsType.none): + def __init__( + self, + *args, + engineering_constraints=None, + set_refinements_from_instrument_flags=True, + euler_convention=DEFAULT_EULER_CONVENTION, + relative_constraints_type=RelativeConstraintsType.none, + ): """ Model for instrument calibration class as a function of @@ -39,20 +53,22 @@ def __init__(self, *args, engineering_constraints=None, assert len(args) > 0, "must have at least one calibrator" self.calibrators = args for calib in self.calibrators: - assert calib.instr is self.instr, \ - "all calibrators must refer to the same instrument" + assert ( + calib.instr is self.instr + ), "all calibrators must refer to the same instrument" self._engineering_constraints = engineering_constraints self._relative_constraints = create_relative_constraints( - relative_constraints_type, self.instr) + relative_constraints_type, self.instr + ) self.euler_convention = euler_convention self.params = self.make_lmfit_params() if set_refinements_from_instrument_flags: self.instr.set_calibration_flags_to_lmfit_params(self.params) - self.fitter = lmfit.Minimizer(self.minimizer_function, - self.params, - nan_policy='omit') + self.fitter = lmfit.Minimizer( + self.minimizer_function, self.params, nan_policy='omit' + ) def make_lmfit_params(self): params = create_instr_params( @@ -128,10 +144,9 @@ def minimize(self, method='least_squares', odict=None): result = self.fitter.least_squares(self.params, **odict) else: - result = self.fitter.scalar_minimize(method=method, - params=self.params, - max_nfev=50000, - **odict) + result = self.fitter.scalar_minimize( + method=method, params=self.params, max_nfev=50000, **odict + ) return result @@ -171,7 +186,8 @@ def relative_constraints_type(self, v: Optional[RelativeConstraintsType]): current = getattr(self, '_relative_constraints', None) if current is None or current.type != v: self.relative_constraints = create_relative_constraints( - v, self.instr) + v, self.instr + ) @property def relative_constraints(self) -> RelativeConstraints: @@ -199,7 +215,7 @@ def run_calibration(self, odict): nrm_ssr_1 = _normalized_ssqr(resd1) - delta_r = 1. - nrm_ssr_1/nrm_ssr_0 + delta_r = 1.0 - nrm_ssr_1 / nrm_ssr_0 if delta_r > 0: logger.info('OPTIMIZATION SUCCESSFUL') diff --git a/hexrd/core/fitting/calibration/laue.py b/hexrd/core/fitting/calibration/laue.py index aa5878298..68eac602c 100644 --- a/hexrd/core/fitting/calibration/laue.py +++ b/hexrd/core/fitting/calibration/laue.py @@ -18,17 +18,29 @@ # TODO: Resolve extra-workflow-dependency from hexrd.powder.fitting.calibration.calibrator import Calibrator -from hexrd.powder.fitting.calibration.lmfit_param_handling import create_grain_params, DEFAULT_EULER_CONVENTION, rename_to_avoid_collision +from hexrd.powder.fitting.calibration.lmfit_param_handling import ( + create_grain_params, + DEFAULT_EULER_CONVENTION, + rename_to_avoid_collision, +) class LaueCalibrator(Calibrator): type = 'laue' - def __init__(self, instr, material, grain_params, default_refinements=None, - min_energy=5, max_energy=25, tth_distortion=None, - calibration_picks=None, - euler_convention=DEFAULT_EULER_CONVENTION, - xray_source: Optional[str] = None): + def __init__( + self, + instr, + material, + grain_params, + default_refinements=None, + min_energy=5, + max_energy=25, + tth_distortion=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + xray_source: Optional[str] = None, + ): self.instr = instr self.material = material self.grain_params = grain_params @@ -103,7 +115,12 @@ def grain_params_euler(self, v): # Grain parameters with orientation set using Euler angle convention grain_params = v.copy() if self.euler_convention is not None: - rme = RotMatEuler(np.zeros(3,), **self.euler_convention) + rme = RotMatEuler( + np.zeros( + 3, + ), + **self.euler_convention + ) rme.angles = np.radians(grain_params[:3]) phi, n = angleAxisOfRotMat(rme.rmat) grain_params[:3] = phi * n.flatten() @@ -159,10 +176,20 @@ def calibration_picks(self, v): self.data_dict = data_dict - def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., - npdiv=2, do_smoothing=True, smoothing_sigma=2, - use_blob_detection=True, blob_threshold=0.25, - fit_peaks=True, min_peak_int=1., fit_tth_tol=0.1): + def autopick_points( + self, + raw_img_dict, + tth_tol=5.0, + eta_tol=5.0, + npdiv=2, + do_smoothing=True, + smoothing_sigma=2, + use_blob_detection=True, + blob_threshold=0.25, + fit_peaks=True, + min_peak_int=1.0, + fit_tth_tol=0.1, + ): """ Parameters ---------- @@ -206,13 +233,23 @@ def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., fit_tth_tol=fit_tth_tol, ) - def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., - npdiv=2, do_smoothing=True, smoothing_sigma=2, - use_blob_detection=True, blob_threshold=0.25, - fit_peaks=True, min_peak_int=1., fit_tth_tol=0.1): + def _autopick_points( + self, + raw_img_dict, + tth_tol=5.0, + eta_tol=5.0, + npdiv=2, + do_smoothing=True, + smoothing_sigma=2, + use_blob_detection=True, + blob_threshold=0.25, + fit_peaks=True, + min_peak_int=1.0, + fit_tth_tol=0.1, + ): labelStructure = ndimage.generate_binary_structure(2, 1) rmat_s = np.eye(3) # !!! forcing to identity - omega = 0. # !!! same ^^^ + omega = 0.0 # !!! same ^^^ rmat_c = xfcapi.make_rmat_of_expmap(self.grain_params[:3]) tvec_c = self.grain_params[3:6] @@ -224,7 +261,8 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., self.plane_data, minEnergy=self.energy_cutoffs[0], maxEnergy=self.energy_cutoffs[1], - rmat_s=None, grain_params=np.atleast_2d(self.grain_params), + rmat_s=None, + grain_params=np.atleast_2d(self.grain_params), ) # loop over detectors for results @@ -233,7 +271,7 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., det_config = det.config_dict( chi=self.instr.chi, tvec=self.instr.tvec, - beam_vector=self.instr.beam_vector + beam_vector=self.instr.beam_vector, ) xy_det, hkls, angles, dspacing, energy = laue_sim[det_key] @@ -256,57 +294,66 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., # make patches refl_patches = xrdutil.make_reflection_patches( det_config, - valid_angs, det.angularPixelSize(valid_xy), - rmat_c=rmat_c, tvec_c=tvec_c, - tth_tol=tth_tol, eta_tol=eta_tol, - npdiv=npdiv, quiet=True) + valid_angs, + det.angularPixelSize(valid_xy), + rmat_c=rmat_c, + tvec_c=tvec_c, + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + quiet=True, + ) reflInfoList = [] img = raw_img_dict[det_key] native_area = det.pixel_area num_patches = len(valid_angs) - meas_xy = np.nan*np.ones((num_patches, 2)) - meas_angs = np.nan*np.ones((num_patches, 2)) + meas_xy = np.nan * np.ones((num_patches, 2)) + meas_angs = np.nan * np.ones((num_patches, 2)) for iRefl, patch in enumerate(refl_patches): # check for overrun irow = patch[-1][0] jcol = patch[-1][1] - if np.any([irow < 0, irow >= det.rows, - jcol < 0, jcol >= det.cols]): + if np.any( + [irow < 0, irow >= det.rows, jcol < 0, jcol >= det.cols] + ): continue if not np.all( - det.clip_to_panel( - np.vstack([patch[1][0].flatten(), - patch[1][1].flatten()]).T - )[1] - ): + det.clip_to_panel( + np.vstack( + [patch[1][0].flatten(), patch[1][1].flatten()] + ).T + )[1] + ): continue # use nearest interpolation spot_data = img[irow, jcol] * patch[3] * npdiv**2 / native_area spot_data -= np.amin(spot_data) patch_size = spot_data.shape - sigmax = 0.25*np.min(spot_data.shape) * fwhm_to_sigma + sigmax = 0.25 * np.min(spot_data.shape) * fwhm_to_sigma # optional gaussian smoothing if do_smoothing: spot_data = filters.gaussian(spot_data, smoothing_sigma) if use_blob_detection: - spot_data_scl = 2.*spot_data/np.max(spot_data) - 1. + spot_data_scl = 2.0 * spot_data / np.max(spot_data) - 1.0 # Compute radii in the 3rd column. - blobs_log = blob_log(spot_data_scl, - min_sigma=2, - max_sigma=min(sigmax, 20), - num_sigma=10, - threshold=blob_threshold, - overlap=0.1) + blobs_log = blob_log( + spot_data_scl, + min_sigma=2, + max_sigma=min(sigmax, 20), + num_sigma=10, + threshold=blob_threshold, + overlap=0.1, + ) numPeaks = len(blobs_log) else: labels, numPeaks = ndimage.label( spot_data > np.percentile(spot_data, 99), - structure=labelStructure + structure=labelStructure, ) slabels = np.arange(1, numPeaks + 1) tth_edges = patch[0][0][0, :] @@ -321,11 +368,11 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., coms = np.array( ndimage.center_of_mass( spot_data, labels=labels, index=slabels - ) ) + ) if numPeaks > 1: # - center = np.r_[spot_data.shape]*0.5 + center = np.r_[spot_data.shape] * 0.5 com_diff = coms - np.tile(center, (numPeaks, 1)) closest_peak_idx = np.argmin( np.sum(com_diff**2, axis=1) @@ -337,20 +384,28 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., coms = coms[closest_peak_idx] # if fit_peaks: - sigm = 0.2*np.min(spot_data.shape) + sigm = 0.2 * np.min(spot_data.shape) if use_blob_detection: sigm = min(blobs_log[closest_peak_idx, 2], sigm) y0, x0 = coms.flatten() ampl = float(spot_data[int(y0), int(x0)]) # y0, x0 = 0.5*np.array(spot_data.shape) # ampl = np.max(spot_data) - a_par = c_par = 0.5/float(sigm**2) - b_par = 0. - bgx = bgy = 0. + a_par = c_par = 0.5 / float(sigm**2) + b_par = 0.0 + bgx = bgy = 0.0 bkg = np.min(spot_data) - params = [ampl, - a_par, b_par, c_par, - x0, y0, bgx, bgy, bkg] + params = [ + ampl, + a_par, + b_par, + c_par, + x0, + y0, + bgx, + bgy, + bkg, + ] # result = leastsq(gaussian_2d, params, args=(spot_data)) # @@ -365,24 +420,29 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., row_cen = fit_tth_tol * patch_size[0] col_cen = fit_tth_tol * patch_size[1] if np.any( - [coms[0] < row_cen, - coms[0] >= patch_size[0] - row_cen, - coms[1] < col_cen, - coms[1] >= patch_size[1] - col_cen] + [ + coms[0] < row_cen, + coms[0] >= patch_size[0] - row_cen, + coms[1] < col_cen, + coms[1] >= patch_size[1] - col_cen, + ] ): continue - if (fit_par[0] < min_peak_int): + if fit_par[0] < min_peak_int: continue # intensities spot_intensity, int_err = nquad( gaussian_2d_int, - [[0., 2.*y0], [0., 2.*x0]], - args=fit_par) - com_angs = np.hstack([ - tth_edges[0] + (0.5 + coms[1])*delta_tth, - eta_edges[0] + (0.5 + coms[0])*delta_eta - ]) + [[0.0, 2.0 * y0], [0.0, 2.0 * x0]], + args=fit_par, + ) + com_angs = np.hstack( + [ + tth_edges[0] + (0.5 + coms[1]) * delta_tth, + eta_edges[0] + (0.5 + coms[0]) * delta_eta, + ] + ) # grab intensities if not fit_peaks: @@ -405,12 +465,18 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., cmv, chi=self.instr.chi, rmat_c=rmat_c, - beam_vec=self.instr.beam_vector) + beam_vec=self.instr.beam_vector, + ) new_xy = xfcapi.gvec_to_xy( gvec_c, - det.rmat, rmat_s, rmat_c, - det.tvec, self.instr.tvec, tvec_c, - beam_vec=self.instr.beam_vector) + det.rmat, + rmat_s, + rmat_c, + det.tvec, + self.instr.tvec, + tvec_c, + beam_vec=self.instr.beam_vector, + ) meas_xy[iRefl, :] = new_xy if det.distortion is not None: meas_xy[iRefl, :] = det.distortion.apply_inverse( @@ -422,15 +488,20 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., # spot_intensity = np.nan max_intensity = np.nan - reflInfoList.append([peakId, valid_hkls[:, iRefl], - (spot_intensity, max_intensity), - valid_energy[iRefl], - valid_angs[iRefl, :], - meas_angs[iRefl, :], - meas_xy[iRefl, :]]) + reflInfoList.append( + [ + peakId, + valid_hkls[:, iRefl], + (spot_intensity, max_intensity), + valid_energy[iRefl], + valid_angs[iRefl, :], + meas_angs[iRefl, :], + meas_xy[iRefl, :], + ] + ) reflInfo = np.array( - [tuple(i) for i in reflInfoList], - dtype=reflInfo_dtype) + [tuple(i) for i in reflInfoList], dtype=reflInfo_dtype + ) refl_dict[det_key] = reflInfo # Convert to our data_dict format @@ -481,8 +552,12 @@ def _residual(self): energy_cutoffs = np.r_[0.5, 1.5] * np.asarray(self.energy_cutoffs) return sxcal_obj_func( - [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, - self.bmatx, energy_cutoffs + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + energy_cutoffs, ) def model(self): @@ -494,14 +569,26 @@ def _model(self): pick_hkls_dict, pick_xys_dict = self._evaluate() return sxcal_obj_func( - [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, - self.bmatx, self.energy_cutoffs, sim_only=True + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.energy_cutoffs, + sim_only=True, ) # Objective function for Laue fitting -def sxcal_obj_func(grain_params, instr, meas_xy, hkls_idx, - bmat, energy_cutoffs, sim_only=False): +def sxcal_obj_func( + grain_params, + instr, + meas_xy, + hkls_idx, + bmat, + energy_cutoffs, + sim_only=False, +): """ Objective function for Laue-based fitting. @@ -518,9 +605,10 @@ def sxcal_obj_func(grain_params, instr, meas_xy, hkls_idx, # returns xy_det, hkls_in, angles, dspacing, energy sim_results = panel.simulate_laue_pattern( [hkls_idx[det_key], bmat], - minEnergy=energy_cutoffs[0], maxEnergy=energy_cutoffs[1], + minEnergy=energy_cutoffs[0], + maxEnergy=energy_cutoffs[1], grain_params=grain_params, - beam_vec=instr.beam_vector + beam_vec=instr.beam_vector, ) calc_xy_tmp = sim_results[0][0] @@ -548,20 +636,30 @@ def sxcal_obj_func(grain_params, instr, meas_xy, hkls_idx, def gaussian_2d(p, data): shape = data.shape x, y = np.meshgrid(range(shape[1]), range(shape[0])) - func = p[0]*np.exp( - -(p[1]*(x-p[4])*(x-p[4]) - + p[2]*(x-p[4])*(y-p[5]) - + p[3]*(y-p[5])*(y-p[5])) - ) + p[6]*(x-p[4]) + p[7]*(y-p[5]) + p[8] + func = ( + p[0] + * np.exp( + -( + p[1] * (x - p[4]) * (x - p[4]) + + p[2] * (x - p[4]) * (y - p[5]) + + p[3] * (y - p[5]) * (y - p[5]) + ) + ) + + p[6] * (x - p[4]) + + p[7] * (y - p[5]) + + p[8] + ) return func.flatten() - data.flatten() def gaussian_2d_int(y, x, *p): - func = p[0]*np.exp( - -(p[1]*(x-p[4])*(x-p[4]) - + p[2]*(x-p[4])*(y-p[5]) - + p[3]*(y-p[5])*(y-p[5])) + func = p[0] * np.exp( + -( + p[1] * (x - p[4]) * (x - p[4]) + + p[2] * (x - p[4]) * (y - p[5]) + + p[3] * (y - p[5]) * (y - p[5]) ) + ) return func.flatten() diff --git a/hexrd/core/fitting/calibration/lmfit_param_handling.py b/hexrd/core/fitting/calibration/lmfit_param_handling.py index 9a3f95964..3ba79993d 100644 --- a/hexrd/core/fitting/calibration/lmfit_param_handling.py +++ b/hexrd/core/fitting/calibration/lmfit_param_handling.py @@ -3,11 +3,26 @@ import lmfit import numpy as np -from hexrd.core.instrument import calc_angles_from_beam_vec, calc_beam_vec, HEDMInstrument -from hexrd.core.rotations import angleAxisOfRotMat, expMapOfQuat, make_rmat_euler, quatOfRotMat, RotMatEuler, rotMatOfExpMap +from hexrd.core.instrument import ( + calc_angles_from_beam_vec, + calc_beam_vec, + Detector, + HEDMInstrument, +) +from hexrd.core.rotations import ( + angleAxisOfRotMat, + expMapOfQuat, + make_rmat_euler, + quatOfRotMat, + RotMatEuler, + rotMatOfExpMap, +) from hexrd.core.material.unitcell import _lpname from .relative_constraints import RelativeConstraints, RelativeConstraintsType -from hexrd.core.fitting.calibration.relative_constraints import RelativeConstraints, RelativeConstraintsType +from hexrd.core.fitting.calibration.relative_constraints import ( + RelativeConstraints, + RelativeConstraintsType, +) # First is the axes_order, second is extrinsic @@ -16,8 +31,9 @@ EULER_CONVENTION_TYPES = dict | tuple | None -def create_instr_params(instr, euler_convention=DEFAULT_EULER_CONVENTION, - relative_constraints=None): +def create_instr_params( + instr, euler_convention=DEFAULT_EULER_CONVENTION, relative_constraints=None +): # add with tuples: (NAME VALUE VARY MIN MAX EXPR BRUTE_STEP) parms_list = [] @@ -28,26 +44,30 @@ def create_instr_params(instr, euler_convention=DEFAULT_EULER_CONVENTION, energy = beam['energy'] names = beam_param_names[beam_name] - parms_list.append(( - names['beam_polar'], pol, False, pol - 1, pol + 1 - )) - parms_list.append(( - names['beam_azimuth'], azim, False, azim - 1, azim + 1 - )) - parms_list.append(( - names['beam_energy'], energy, False, energy - 0.2, energy + 0.2 - )) - - parms_list.append(('instr_chi', np.degrees(instr.chi), - False, np.degrees(instr.chi)-1, - np.degrees(instr.chi)+1)) + parms_list.append((names['beam_polar'], pol, False, pol - 1, pol + 1)) + parms_list.append( + (names['beam_azimuth'], azim, False, azim - 1, azim + 1) + ) + parms_list.append( + (names['beam_energy'], energy, False, energy - 0.2, energy + 0.2) + ) + + parms_list.append( + ( + 'instr_chi', + np.degrees(instr.chi), + False, + np.degrees(instr.chi) - 1, + np.degrees(instr.chi) + 1, + ) + ) parms_list.append(('instr_tvec_x', instr.tvec[0], False, -np.inf, np.inf)) parms_list.append(('instr_tvec_y', instr.tvec[1], False, -np.inf, np.inf)) parms_list.append(('instr_tvec_z', instr.tvec[2], False, -np.inf, np.inf)) if ( - relative_constraints is None or - relative_constraints.type == RelativeConstraintsType.none + relative_constraints is None + or relative_constraints.type == RelativeConstraintsType.none ): add_unconstrained_detector_parameters( instr, @@ -82,35 +102,51 @@ def add_unconstrained_detector_parameters(instr, euler_convention, parms_list): angle_names = param_names_euler_convention(det, euler_convention) for name, angle in zip(angle_names, angles): - parms_list.append((name, - angle, - False, - angle - 2, - angle + 2)) - - parms_list.append((f'{det}_tvec_x', - panel.tvec[0], - True, - panel.tvec[0]-1, - panel.tvec[0]+1)) - parms_list.append((f'{det}_tvec_y', - panel.tvec[1], - True, - panel.tvec[1]-0.5, - panel.tvec[1]+0.5)) - parms_list.append((f'{det}_tvec_z', - panel.tvec[2], - True, - panel.tvec[2]-1, - panel.tvec[2]+1)) + parms_list.append((name, angle, False, angle - 2, angle + 2)) + + parms_list.append( + ( + f'{det}_tvec_x', + panel.tvec[0], + True, + panel.tvec[0] - 1, + panel.tvec[0] + 1, + ) + ) + parms_list.append( + ( + f'{det}_tvec_y', + panel.tvec[1], + True, + panel.tvec[1] - 0.5, + panel.tvec[1] + 0.5, + ) + ) + parms_list.append( + ( + f'{det}_tvec_z', + panel.tvec[2], + True, + panel.tvec[2] - 1, + panel.tvec[2] + 1, + ) + ) if panel.distortion is not None: p = panel.distortion.params for ii, pp in enumerate(p): - parms_list.append((f'{det}_distortion_param_{ii}', pp, - False, -np.inf, np.inf)) + parms_list.append( + ( + f'{det}_distortion_param_{ii}', + pp, + False, + -np.inf, + np.inf, + ) + ) if panel.detector_type.lower() == 'cylindrical': - parms_list.append((f'{det}_radius', panel.radius, False, - -np.inf, np.inf)) + parms_list.append( + (f'{det}_radius', panel.radius, False, -np.inf, np.inf) + ) def _add_constrained_detector_parameters( @@ -126,7 +162,9 @@ def _add_constrained_detector_parameters( # Convert the tilt to the specified Euler convention normalized = normalize_euler_convention(euler_convention) rme = RotMatEuler( - np.zeros(3,), + np.zeros( + 3, + ), axes_order=normalized[0], extrinsic=normalized[1], ) @@ -225,9 +263,11 @@ def fix_detector_y( def update_instrument_from_params( - instr, params, - euler_convention=DEFAULT_EULER_CONVENTION, - relative_constraints: Optional[RelativeConstraints] = None): + instr, + params, + euler_convention=DEFAULT_EULER_CONVENTION, + relative_constraints: Optional[RelativeConstraints] = None, +): """ this function updates the instrument from the lmfit parameter list. we don't have to keep track @@ -237,8 +277,9 @@ def update_instrument_from_params( implemented. """ if not isinstance(params, lmfit.Parameters): - msg = ('Only lmfit.Parameters is acceptable input. ' - f'Received: {params}') + msg = ( + 'Only lmfit.Parameters is acceptable input. ' f'Received: {params}' + ) raise NotImplementedError(msg) # This supports single XRS or multi XRS @@ -257,14 +298,16 @@ def update_instrument_from_params( chi = np.radians(params['instr_chi'].value) instr.chi = chi - instr_tvec = [params['instr_tvec_x'].value, - params['instr_tvec_y'].value, - params['instr_tvec_z'].value] + instr_tvec = [ + params['instr_tvec_x'].value, + params['instr_tvec_y'].value, + params['instr_tvec_z'].value, + ] instr.tvec = np.r_[instr_tvec] if ( - relative_constraints is None or - relative_constraints.type == RelativeConstraintsType.none + relative_constraints is None + or relative_constraints.type == RelativeConstraintsType.none ): update_unconstrained_detector_parameters( instr, @@ -294,9 +337,11 @@ def update_unconstrained_detector_parameters(instr, params, euler_convention): det = det_name.replace('-', '_') set_detector_angles_euler(detector, det, params, euler_convention) - tvec = np.r_[params[f'{det}_tvec_x'].value, - params[f'{det}_tvec_y'].value, - params[f'{det}_tvec_z'].value] + tvec = np.r_[ + params[f'{det}_tvec_x'].value, + params[f'{det}_tvec_y'].value, + params[f'{det}_tvec_z'].value, + ] detector.tvec = tvec if detector.detector_type.lower() == 'cylindrical': rad = params[f'{det}_radius'].value @@ -326,7 +371,6 @@ def _update_constrained_detector_parameters( euler_convention: EULER_CONVENTION_TYPES, prefix: str, constraint_params: dict, - ): tvec = constraint_params['translation'] tilt = constraint_params['tilt'] @@ -424,8 +468,9 @@ def update_group_constrained_detector_parameters( ) -def _tilt_to_rmat(tilt: np.ndarray, - euler_convention: dict | tuple) -> np.ndarray: +def _tilt_to_rmat( + tilt: np.ndarray, euler_convention: dict | tuple +) -> np.ndarray: # Convert the tilt to exponential map parameters, and then # to the rotation matrix, and return. if euler_convention is None: @@ -464,11 +509,9 @@ def create_tth_parameters( val = np.degrees(np.mean(np.hstack(ds_ang))) - parms_list.append((f'{prefix}{ii}', - val, - True, - val-5., - val+5.)) + parms_list.append( + (f'{prefix}{ii}', val, True, val - 5.0, val + 5.0) + ) return parms_list @@ -505,13 +548,15 @@ def create_material_params(material, refinements=None): refine = True if refinements is None else refinements[refine_idx] val = material.lparms[i] * multiplier - parms_list.append(( - f'{material.name}_{lp_name}', - val, - refine, - val - diff, - val + diff, - )) + parms_list.append( + ( + f'{material.name}_{lp_name}', + val, + refine, + val - diff, + val + diff, + ) + ) refine_idx += 1 @@ -551,13 +596,15 @@ def create_grain_params(base_name, grain, refinements=None): parms_list = [] for i, name in enumerate(param_names): - parms_list.append(( - name, - grain[i], - refinements[i], - grain[i] - 2, - grain[i] + 2, - )) + parms_list.append( + ( + name, + grain[i], + refinements[i], + grain[i] - 2, + grain[i] + 2, + ) + ) return parms_list @@ -585,8 +632,9 @@ def add_engineering_constraints(params, engineering_constraints): if engineering_constraints == 'TARDIS': # Since these plates always have opposite signs in y, we can add # their absolute values to get the difference. - dist_plates = (np.abs(params['IMAGE_PLATE_2_tvec_y']) + - np.abs(params['IMAGE_PLATE_4_tvec_y'])) + dist_plates = np.abs(params['IMAGE_PLATE_2_tvec_y']) + np.abs( + params['IMAGE_PLATE_4_tvec_y'] + ) min_dist = 22.83 max_dist = 23.43 @@ -611,11 +659,13 @@ def add_engineering_constraints(params, engineering_constraints): params['IMAGE_PLATE_4_tvec_y'].value + 0.5 * delta ) - params.add('tardis_distance_between_plates', - value=dist_plates, - min=min_dist, - max=max_dist, - vary=True) + params.add( + 'tardis_distance_between_plates', + value=dist_plates, + min=min_dist, + max=max_dist, + vary=True, + ) expr = 'tardis_distance_between_plates - abs(IMAGE_PLATE_2_tvec_y)' params['IMAGE_PLATE_4_tvec_y'].expr = expr @@ -628,7 +678,7 @@ def validate_params_list(params_list): # Make sure there are no duplicate names duplicate_names = [] for i, x in enumerate(params_list): - for y in params_list[i + 1:]: + for y in params_list[i + 1 :]: if x[0] == y[0]: duplicate_names.append(x[0]) @@ -667,7 +717,7 @@ def detector_angles_euler(panel, euler_convention): normalized = normalize_euler_convention(euler_convention) rmat = panel.rmat rme = RotMatEuler( - np.zeros(3,), + np.zeros(3), axes_order=normalized[0], extrinsic=normalized[1], ) diff --git a/hexrd/core/fitting/calibration/multigrain.py b/hexrd/core/fitting/calibration/multigrain.py index afa1ef198..9e7bf59a2 100644 --- a/hexrd/core/fitting/calibration/multigrain.py +++ b/hexrd/core/fitting/calibration/multigrain.py @@ -15,22 +15,33 @@ logger.setLevel('INFO') # grains +# fmt: off grain_flags_DFLT = np.array( [1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0], dtype=bool ) +# fmt: on -ext_eta_tol = np.radians(5.) # for HEDM cal, may make this a user param +ext_eta_tol = np.radians(5.0) # for HEDM cal, may make this a user param def calibrate_instrument_from_sx( - instr, grain_params, bmat, xyo_det, hkls_idx, - param_flags=None, grain_flags=None, - ome_period=None, - xtol=cnst.sqrt_epsf, ftol=cnst.sqrt_epsf, - factor=10., sim_only=False, use_robust_lsq=False): + instr, + grain_params, + bmat, + xyo_det, + hkls_idx, + param_flags=None, + grain_flags=None, + ome_period=None, + xtol=cnst.sqrt_epsf, + ftol=cnst.sqrt_epsf, + factor=10.0, + sim_only=False, + use_robust_lsq=False, +): """ arguments xyo_det, hkls_idx are DICTs over panels @@ -51,8 +62,7 @@ def calibrate_instrument_from_sx( for det_key in instr.detectors: for ig in range(ngrains): xyo_det[det_key][ig][:, 2] = rotations.mapAngle( - xyo_det[det_key][ig][:, 2], - ome_period + xyo_det[det_key][ig][:, 2], ome_period ) # first grab the instrument parameters @@ -66,25 +76,35 @@ def calibrate_instrument_from_sx( if grain_flags is None: grain_flags = np.tile(grain_flags_DFLT, ngrains) - plist_full = np.concatenate( - [plist_full, np.hstack(grain_params)] - ) + plist_full = np.concatenate([plist_full, np.hstack(grain_params)]) plf_copy = np.copy(plist_full) # concatenate refinement flags refine_flags = np.hstack([param_flags, grain_flags]) plist_fit = plist_full[refine_flags] - fit_args = (plist_full, - param_flags, grain_flags, - instr, xyo_det, hkls_idx, - bmat, ome_period) + fit_args = ( + plist_full, + param_flags, + grain_flags, + instr, + xyo_det, + hkls_idx, + bmat, + ome_period, + ) if sim_only: return sxcal_obj_func( - plist_fit, plist_full, - param_flags, grain_flags, - instr, xyo_det, hkls_idx, - bmat, ome_period, - sim_only=True) + plist_fit, + plist_full, + param_flags, + grain_flags, + instr, + xyo_det, + hkls_idx, + bmat, + ome_period, + sim_only=True, + ) else: logger.info("Set up to refine:") for i in np.where(refine_flags)[0]: @@ -93,9 +113,13 @@ def calibrate_instrument_from_sx( # run optimization if use_robust_lsq: result = least_squares( - sxcal_obj_func, plist_fit, args=fit_args, - xtol=xtol, ftol=ftol, - loss='soft_l1', method='trf' + sxcal_obj_func, + plist_fit, + args=fit_args, + xtol=xtol, + ftol=ftol, + loss='soft_l1', + method='trf', ) x = result.x resd = result.fun @@ -104,9 +128,13 @@ def calibrate_instrument_from_sx( else: # do least squares problem x, cov_x, infodict, mesg, ierr = leastsq( - sxcal_obj_func, plist_fit, args=fit_args, - factor=factor, xtol=xtol, ftol=ftol, - full_output=1 + sxcal_obj_func, + plist_fit, + args=fit_args, + factor=factor, + xtol=xtol, + ftol=ftol, + full_output=1, ) resd = infodict['fvec'] if ierr not in [1, 2, 3, 4]: @@ -121,11 +149,17 @@ def calibrate_instrument_from_sx( # run simulation with optimized results sim_final = sxcal_obj_func( - x, plist_full, - param_flags, grain_flags, - instr, xyo_det, hkls_idx, - bmat, ome_period, - sim_only=True) + x, + plist_full, + param_flags, + grain_flags, + instr, + xyo_det, + hkls_idx, + bmat, + ome_period, + sim_only=True, + ) # ??? reset instrument here? instr.update_from_parameter_list(fit_params) @@ -133,8 +167,10 @@ def calibrate_instrument_from_sx( # report final logger.info("Optimization Reults:") for i in np.where(refine_flags)[0]: - logger.info("\t%s = %1.7e --> %1.7e" - % (pnames[i], plf_copy[i], fit_params[i])) + logger.info( + "\t%s = %1.7e --> %1.7e" + % (pnames[i], plf_copy[i], fit_params[i]) + ) return fit_params, resd, sim_final @@ -162,9 +198,7 @@ def generate_parameter_names(instr, grain_params): # now add distortion if there if panel.distortion is not None: for j in range(len(panel.distortion.params)): - pnames.append( - '{:>24s}'.format('%s dparam[%d]' % (det_key, j)) - ) + pnames.append('{:>24s}'.format('%s dparam[%d]' % (det_key, j))) grain_params = np.atleast_2d(grain_params) for ig, grain in enumerate(grain_params): @@ -180,26 +214,31 @@ def generate_parameter_names(instr, grain_params): '{:>24s}'.format('grain %d vinv_s[2]' % ig), '{:>24s}'.format('grain %d vinv_s[3]' % ig), '{:>24s}'.format('grain %d vinv_s[4]' % ig), - '{:>24s}'.format('grain %d vinv_s[5]' % ig) + '{:>24s}'.format('grain %d vinv_s[5]' % ig), ] return pnames -def sxcal_obj_func(plist_fit, plist_full, - param_flags, grain_flags, - instr, xyo_det, hkls_idx, - bmat, ome_period, - sim_only=False, return_value_flag=None): - """ - """ +def sxcal_obj_func( + plist_fit, + plist_full, + param_flags, + grain_flags, + instr, + xyo_det, + hkls_idx, + bmat, + ome_period, + sim_only=False, + return_value_flag=None, +): + """ """ npi = len(instr.calibration_parameters) NP_GRN = 12 # stack flags and force bool repr - refine_flags = np.array( - np.hstack([param_flags, grain_flags]), - dtype=bool) + refine_flags = np.array(np.hstack([param_flags, grain_flags]), dtype=bool) # fill out full parameter list # !!! no scaling for now @@ -247,7 +286,7 @@ def sxcal_obj_func(plist_fit, plist_full, xy_unwarped[det_key].append(xyo[:, :2]) meas_omes[det_key].append(xyo[:, 2]) - if panel.distortion is not None: # do unwarping + if panel.distortion is not None: # do unwarping xy_unwarped[det_key][ig] = panel.distortion.apply( xy_unwarped[det_key][ig] ) @@ -267,22 +306,28 @@ def sxcal_obj_func(plist_fit, plist_full, ghat_c = np.dot(rmat_c.T, ghat_s) match_omes, calc_omes_tmp = grainutil.matchOmegas( - xyo, ghkls.T, - chi, rmat_c, bmat, wavelength, + xyo, + ghkls.T, + chi, + rmat_c, + bmat, + wavelength, vInv=vinv_s, beamVec=bvec, - omePeriod=ome_period) + omePeriod=ome_period, + ) rmat_s_arr = xfcapi.make_sample_rmat( chi, np.ascontiguousarray(calc_omes_tmp) ) calc_xy_tmp = xfcapi.gvec_to_xy( - ghat_c.T, rmat_d, rmat_s_arr, rmat_c, - tvec_d, tvec_s, tvec_c + ghat_c.T, rmat_d, rmat_s_arr, rmat_c, tvec_d, tvec_s, tvec_c ) if np.any(np.isnan(calc_xy_tmp)): - logger.warning("infeasible parameters: may want to scale back " - "finite difference step size") + logger.warning( + "infeasible parameters: may want to scale back " + "finite difference step size" + ) calc_omes[det_key].append(calc_omes_tmp) calc_xy[det_key].append(calc_xy_tmp) @@ -317,18 +362,17 @@ def sxcal_obj_func(plist_fit, plist_full, diff_vecs_xy = calc_xy_all - meas_xy_all diff_ome = rotations.angularDifference(calc_omes_all, meas_omes_all) retval = np.hstack( - [diff_vecs_xy, - diff_ome.reshape(npts_tot, 1)] + [diff_vecs_xy, diff_ome.reshape(npts_tot, 1)] ).flatten() if return_value_flag == 1: retval = sum(abs(retval)) elif return_value_flag == 2: - denom = npts_tot - len(plist_fit) - 1. + denom = npts_tot - len(plist_fit) - 1.0 if denom != 0: - nu_fac = 1. / denom + nu_fac = 1.0 / denom else: - nu_fac = 1. - nu_fac = 1 / (npts_tot - len(plist_fit) - 1.) + nu_fac = 1.0 + nu_fac = 1 / (npts_tot - len(plist_fit) - 1.0) retval = nu_fac * sum(retval**2) return retval @@ -346,15 +390,14 @@ def parse_reflection_tables(cfg, instr, grain_ids, refit_idx=None): idx_0[det_key] = [] for ig, grain_id in enumerate(grain_ids): spots_filename = os.path.join( - cfg.analysis_dir, os.path.join( - det_key, 'spots_%05d.out' % grain_id - ) + cfg.analysis_dir, + os.path.join(det_key, 'spots_%05d.out' % grain_id), ) # load pull_spots output table gtable = np.loadtxt(spots_filename, ndmin=2) if len(gtable) == 0: - gtable = np.nan*np.ones((1, 17)) + gtable = np.nan * np.ones((1, 17)) # apply conditions for accepting valid data valid_reflections = gtable[:, 0] >= 0 # is indexed @@ -362,30 +405,37 @@ def parse_reflection_tables(cfg, instr, grain_ids, refit_idx=None): # throw away extremem etas p90 = rotations.angularDifference(gtable[:, 8], cnst.piby2) m90 = rotations.angularDifference(gtable[:, 8], -cnst.piby2) - accept_etas = np.logical_or(p90 > ext_eta_tol, - m90 > ext_eta_tol) + accept_etas = np.logical_or(p90 > ext_eta_tol, m90 > ext_eta_tol) logger.info(f"panel '{det_key}', grain {grain_id}") - logger.info(f"{sum(valid_reflections)} of {len(gtable)} " - "reflections are indexed") - logger.info(f"{sum(not_saturated)} of {sum(valid_reflections)}" - " valid reflections be are below" + - f" saturation threshold of {panel.saturation_level}") - logger.info(f"{sum(accept_etas)} of {len(gtable)}" - " reflections be are greater than " + - f" {np.degrees(ext_eta_tol)} from the rotation axis") + logger.info( + f"{sum(valid_reflections)} of {len(gtable)} " + "reflections are indexed" + ) + logger.info( + f"{sum(not_saturated)} of {sum(valid_reflections)}" + " valid reflections be are below" + + f" saturation threshold of {panel.saturation_level}" + ) + logger.info( + f"{sum(accept_etas)} of {len(gtable)}" + " reflections be are greater than " + + f" {np.degrees(ext_eta_tol)} from the rotation axis" + ) # valid reflections index if refit_idx is None: idx = np.logical_and( valid_reflections, - np.logical_and(not_saturated, accept_etas) + np.logical_and(not_saturated, accept_etas), ) idx_0[det_key].append(idx) else: idx = refit_idx[det_key][ig] idx_0[det_key].append(idx) - logger.info(f"input reflection specify {sum(idx)} of " - f"{len(gtable)} total valid reflections") + logger.info( + f"input reflection specify {sum(idx)} of " + f"{len(gtable)} total valid reflections" + ) hkls[det_key].append(gtable[idx, 2:5]) meas_omes = gtable[idx, 12].reshape(sum(idx), 1) diff --git a/hexrd/core/fitting/calibration/powder.py b/hexrd/core/fitting/calibration/powder.py index dc25f32f3..bdb94f53e 100644 --- a/hexrd/core/fitting/calibration/powder.py +++ b/hexrd/core/fitting/calibration/powder.py @@ -8,7 +8,10 @@ from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl from .calibrator import Calibrator -from .lmfit_param_handling import create_material_params, update_material_from_params +from .lmfit_param_handling import ( + create_material_params, + update_material_from_params, +) nfields_powder_data = 8 @@ -16,14 +19,26 @@ class PowderCalibrator(Calibrator): type = 'powder' - def __init__(self, instr, material, img_dict, default_refinements=None, - tth_tol=None, eta_tol=0.25, - fwhm_estimate=None, min_pk_sep=1e-3, min_ampl=0., - pktype='pvoigt', bgtype='linear', - tth_distortion=None, calibration_picks=None, - xray_source: Optional[str] = None): - assert list(instr.detectors.keys()) == list(img_dict.keys()), \ - "instrument and image dict must have the same keys" + def __init__( + self, + instr, + material, + img_dict, + default_refinements=None, + tth_tol=None, + eta_tol=0.25, + fwhm_estimate=None, + min_pk_sep=1e-3, + min_ampl=0.0, + pktype='pvoigt', + bgtype='linear', + tth_distortion=None, + calibration_picks=None, + xray_source: Optional[str] = None, + ): + assert list(instr.detectors.keys()) == list( + img_dict.keys() + ), "instrument and image dict must have the same keys" self.instr = instr self.material = material @@ -79,8 +94,9 @@ def _update_tth_distortion_panels(self): def create_lmfit_params(self, current_params): # There shouldn't be more than one calibrator for a given material, so # just assume we have a unique name... - params = create_material_params(self.material, - self.default_refinements) + params = create_material_params( + self.material, self.default_refinements + ) # If multiple powder calibrators were used for the same material (such # as in 2XRS), then don't add params again. @@ -110,11 +126,13 @@ def tth_tol(self, x): @property def spectrum_kwargs(self): - return dict(pktype=self.pktype, - bgtype=self.bgtype, - fwhm_init=self.fwhm_estimate, - min_ampl=self.min_ampl, - min_pk_sep=self.min_pk_sep) + return dict( + pktype=self.pktype, + bgtype=self.bgtype, + fwhm_init=self.fwhm_estimate, + min_ampl=self.min_ampl, + min_pk_sep=self.min_pk_sep, + ) @property def calibration_picks(self): @@ -152,7 +170,7 @@ def calibration_picks(self, v): self.data_dict = data_dict - def autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): + def autopick_points(self, fit_tth_tol=5.0, int_cutoff=1e-4): """ return the RHS for the instrument DOF and image dict @@ -169,7 +187,7 @@ def autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): with switch_xray_source(self.instr, self.xray_source): return self._autopick_points(fit_tth_tol, int_cutoff) - def _autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): + def _autopick_points(self, fit_tth_tol=5.0, int_cutoff=1e-4): # ideal tth dsp_ideal = np.atleast_1d(self.plane_data.getPlaneSpacings()) hkls_ref = self.plane_data.hkls.T @@ -244,13 +262,15 @@ def _autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): ) # cat results - output = np.hstack([ - xy_meas, - tth_meas.reshape(npeaks, 1), - this_hkl, - this_dsp0.reshape(npeaks, 1), - eta_ref_tile.reshape(npeaks, 1), - ]) + output = np.hstack( + [ + xy_meas, + tth_meas.reshape(npeaks, 1), + this_hkl, + this_dsp0.reshape(npeaks, 1), + eta_ref_tile.reshape(npeaks, 1), + ] + ) ret.append(output) if not ret: @@ -311,18 +331,16 @@ def _evaluate(self, output='residual'): # to (tth, eta) meas_xy = pdata[:, :2] updated_angles, _ = panel.cart_to_angles( - meas_xy, - tvec_s=self.instr.tvec, - apply_distortion=True + meas_xy, tvec_s=self.instr.tvec, apply_distortion=True ) # derive ideal tth positions from additional ring point info hkls = pdata[:, 3:6] gvecs = np.dot(hkls, bmat.T) - dsp0 = 1./np.sqrt(np.sum(gvecs*gvecs, axis=1)) + dsp0 = 1.0 / np.sqrt(np.sum(gvecs * gvecs, axis=1)) # updated reference Bragg angles - tth0 = 2.*np.arcsin(0.5*wlen/dsp0) + tth0 = 2.0 * np.arcsin(0.5 * wlen / dsp0) # !!! get eta from mapped markers rather than ref # eta0 = pdata[:, -1] @@ -346,23 +364,16 @@ def _evaluate(self, output='residual'): # meas_xy.flatten() - calc_xy.flatten() # ) retval = np.append( - retval, - updated_angles[:, 0].flatten() - tth0.flatten() + retval, updated_angles[:, 0].flatten() - tth0.flatten() ) elif output == 'model': calc_xy = panel.angles_to_cart( - tth_eta, - tvec_s=self.instr.tvec, - apply_distortion=True - ) - retval = np.append( - retval, - calc_xy.flatten() + tth_eta, tvec_s=self.instr.tvec, apply_distortion=True ) + retval = np.append(retval, calc_xy.flatten()) else: raise RuntimeError( - "unrecognized output flag '%s'" - % output + "unrecognized output flag '%s'" % output ) return retval diff --git a/hexrd/core/fitting/calibration/relative_constraints.py b/hexrd/core/fitting/calibration/relative_constraints.py index 547fdd29b..762719187 100644 --- a/hexrd/core/fitting/calibration/relative_constraints.py +++ b/hexrd/core/fitting/calibration/relative_constraints.py @@ -8,6 +8,7 @@ class RelativeConstraintsType(Enum): """These are relative constraints between the detectors""" + # 'none' means no relative constraints none = 'None' # 'group' means constrain tilts/translations within a group @@ -18,6 +19,7 @@ class RelativeConstraintsType(Enum): class RotationCenter(Enum): """These are different centers for relative constraint rotations""" + # Rotate about the mean center of all the detectors instrument_mean_center = 'InstrumentMeanCenter' @@ -164,8 +166,9 @@ def center_of_rotation(self, instr: HEDMInstrument) -> np.ndarray: raise NotImplementedError(self.rotation_center) -def create_relative_constraints(type: RelativeConstraintsType, - instr: HEDMInstrument): +def create_relative_constraints( + type: RelativeConstraintsType, instr: HEDMInstrument +): types = { 'None': RelativeConstraintsNone, 'Group': RelativeConstraintsGroup, diff --git a/hexrd/core/fitting/calibration/structureless.py b/hexrd/core/fitting/calibration/structureless.py index 2ceafeaa0..5704b88a0 100644 --- a/hexrd/core/fitting/calibration/structureless.py +++ b/hexrd/core/fitting/calibration/structureless.py @@ -6,8 +6,19 @@ from hexrd.core.instrument import switch_xray_source -from .lmfit_param_handling import add_engineering_constraints, create_instr_params, create_tth_parameters, DEFAULT_EULER_CONVENTION, tth_parameter_prefixes, update_instrument_from_params -from hexrd.core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType +from .lmfit_param_handling import ( + add_engineering_constraints, + create_instr_params, + create_tth_parameters, + DEFAULT_EULER_CONVENTION, + tth_parameter_prefixes, + update_instrument_from_params, +) +from hexrd.core.fitting.calibration.relative_constraints import ( + create_relative_constraints, + RelativeConstraints, + RelativeConstraintsType, +) class StructurelessCalibrator: @@ -29,20 +40,24 @@ class StructurelessCalibrator: 22.83 mm <= |IMAGE-PLATE-2 tvec[1]| + |IMAGE-PLATE-2 tvec[1]| <= 23.43 mm """ - def __init__(self, - instr, - data, - tth_distortion=None, - engineering_constraints=None, - relative_constraints_type=RelativeConstraintsType.none, - euler_convention=DEFAULT_EULER_CONVENTION): + + def __init__( + self, + instr, + data, + tth_distortion=None, + engineering_constraints=None, + relative_constraints_type=RelativeConstraintsType.none, + euler_convention=DEFAULT_EULER_CONVENTION, + ): self._instr = instr self._data = data self._tth_distortion = tth_distortion self._engineering_constraints = engineering_constraints self._relative_constraints = create_relative_constraints( - relative_constraints_type, self.instr) + relative_constraints_type, self.instr + ) self.euler_convention = euler_convention self._update_tth_distortion_panels() self.make_lmfit_params() @@ -80,10 +95,9 @@ def calc_residual(self, params): prefixes = tth_parameter_prefixes(self.instr) for xray_source in self.data: prefix = prefixes[xray_source] - for ii, (rng, corr_rng) in enumerate(zip( - meas_angles[xray_source], - tth_correction[xray_source] - )): + for ii, (rng, corr_rng) in enumerate( + zip(meas_angles[xray_source], tth_correction[xray_source]) + ): for det_name, panel in self.instr.detectors.items(): if rng[det_name] is None or rng[det_name].size == 0: continue @@ -98,13 +112,11 @@ def calc_residual(self, params): return np.hstack(residual) def set_minimizer(self): - self.fitter = lmfit.Minimizer(self.calc_residual, - self.params, - nan_policy='omit') + self.fitter = lmfit.Minimizer( + self.calc_residual, self.params, nan_policy='omit' + ) - def run_calibration(self, - method='least_squares', - odict=None): + def run_calibration(self, method='least_squares', odict=None): """ odict is the options dictionary """ @@ -124,14 +136,12 @@ def run_calibration(self, } fdict.update(odict) - self.res = self.fitter.least_squares(self.params, - **fdict) + self.res = self.fitter.least_squares(self.params, **fdict) else: fdict = odict - self.res = self.fitter.scalar_minimize(method=method, - params=self.params, - max_nfev=50000, - **fdict) + self.res = self.fitter.scalar_minimize( + method=method, params=self.params, max_nfev=50000, **fdict + ) self.params = self.res.params # res = self.fitter.least_squares(**fdict) @@ -169,7 +179,8 @@ def relative_constraints_type(self, v: Optional[RelativeConstraintsType]): current = getattr(self, '_relative_constraints', None) if current is None or current.type != v: self.relative_constraints = create_relative_constraints( - v, self.instr) + v, self.instr + ) @property def relative_constraints(self) -> RelativeConstraints: @@ -245,9 +256,10 @@ def meas_angles(self) -> dict: panel = self.instr.detectors[det_name] angles, _ = panel.cart_to_angles( - meas_xy, - tvec_s=self.instr.tvec, - apply_distortion=True) + meas_xy, + tvec_s=self.instr.tvec, + apply_distortion=True, + ) ang_dict[det_name] = angles ang_list.append(ang_dict) diff --git a/hexrd/core/fitting/fitpeak.py b/hexrd/core/fitting/fitpeak.py index 77215f38b..3d175821b 100644 --- a/hexrd/core/fitting/fitpeak.py +++ b/hexrd/core/fitting/fitpeak.py @@ -26,6 +26,7 @@ # ============================================================ import numpy as np + # from numpy.polynomial import chebyshev from scipy import integrate @@ -51,11 +52,11 @@ # dcs param values # !!! converted from deg^-1 in Von Dreele's paper -alpha0, alpha1, beta0, beta1 = np.r_[14.4, 0., 3.016, -7.94] +alpha0, alpha1, beta0, beta1 = np.r_[14.4, 0.0, 3.016, -7.94] def cnst_fit_obj(x, b): - return np.ones_like(x)*b + return np.ones_like(x) * b def cnst_fit_jac(x, b): @@ -63,7 +64,7 @@ def cnst_fit_jac(x, b): def lin_fit_obj(x, m, b): - return m*np.asarray(x) + b + return m * np.asarray(x) + b def lin_fit_jac(x, m, b): @@ -72,19 +73,19 @@ def lin_fit_jac(x, m, b): def quad_fit_obj(x, a, b, c): x = np.asarray(x) - return a*x**2 + b*x + c + return a * x**2 + b * x + c def quad_fit_jac(x, a, b, c): x = np.asarray(x) - return a*x**2 + b*x + c + return a * x**2 + b * x + c return np.vstack([x**2, x, np.ones_like(x)]).T def _amplitude_guess(x, x0, y, fwhm): - pt_l = np.argmin(np.abs(x - (x0 - 0.5*fwhm))) - pt_h = np.argmin(np.abs(x - (x0 + 0.5*fwhm))) - return np.max(y[pt_l:pt_h + 1]) + pt_l = np.argmin(np.abs(x - (x0 - 0.5 * fwhm))) + pt_h = np.argmin(np.abs(x - (x0 + 0.5 * fwhm))) + return np.max(y[pt_l : pt_h + 1]) # ============================================================================= @@ -119,7 +120,9 @@ def estimate_pk_parms_1d(x, f, pktype='pvoigt'): # handle background # ??? make kernel width a kwarg? - bkg = snip1d(np.atleast_2d(f), w=int(2*npts/3.), max_workers=1).flatten() + bkg = snip1d( + np.atleast_2d(f), w=int(2 * npts / 3.0), max_workers=1 + ).flatten() # fit linear bg and grab params bp, _ = optimize.curve_fit(lin_fit_obj, x, bkg, jac=lin_fit_jac) @@ -134,23 +137,23 @@ def estimate_pk_parms_1d(x, f, pktype='pvoigt'): # fix center index if cen_index > 0 and cen_index < npts - 1: - left_hm = np.argmin(abs(pint[:cen_index] - 0.5*A)) - right_hm = np.argmin(abs(pint[cen_index:] - 0.5*A)) + left_hm = np.argmin(abs(pint[:cen_index] - 0.5 * A)) + right_hm = np.argmin(abs(pint[cen_index:] - 0.5 * A)) elif cen_index == 0: - right_hm = np.argmin(abs(pint[cen_index:] - 0.5*A)) + right_hm = np.argmin(abs(pint[cen_index:] - 0.5 * A)) left_hm = right_hm elif cen_index == npts - 1: - left_hm = np.argmin(abs(pint[:cen_index] - 0.5*A)) + left_hm = np.argmin(abs(pint[:cen_index] - 0.5 * A)) right_hm = left_hm # FWHM estimation try: FWHM = x[cen_index + right_hm] - x[left_hm] - except(IndexError): + except IndexError: FWHM = 0 - if FWHM <= 0 or FWHM > 0.75*npts: + if FWHM <= 0 or FWHM > 0.75 * npts: # something is weird, so punt... - FWHM = 0.25*(x[-1] - x[0]) + FWHM = 0.25 * (x[-1] - x[0]) # set params if pktype in ['gaussian', 'lorentzian']: @@ -195,62 +198,69 @@ def fit_pk_parms_1d(p0, x, f, pktype='pvoigt'): peak type """ - weight = np.max(f)*10. # hard coded should be changed + weight = np.max(f) * 10.0 # hard coded should be changed fitArgs = (x, f, pktype) if pktype == 'gaussian': p, outflag = optimize.leastsq( - fit_pk_obj_1d, p0, - args=fitArgs, Dfun=eval_pk_deriv_1d, - ftol=ftol, xtol=xtol + fit_pk_obj_1d, + p0, + args=fitArgs, + Dfun=eval_pk_deriv_1d, + ftol=ftol, + xtol=xtol, ) elif pktype == 'lorentzian': p, outflag = optimize.leastsq( - fit_pk_obj_1d, p0, - args=fitArgs, Dfun=eval_pk_deriv_1d, - ftol=ftol, xtol=xtol + fit_pk_obj_1d, + p0, + args=fitArgs, + Dfun=eval_pk_deriv_1d, + ftol=ftol, + xtol=xtol, ) elif pktype == 'pvoigt': - lb = [p0[0]*0.5, np.min(x), 0., 0., 0., None] - ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 1., 2.*p0[4], None] + lb = [p0[0] * 0.5, np.min(x), 0.0, 0.0, 0.0, None] + ub = [p0[0] * 2.0, np.max(x), 4.0 * p0[2], 1.0, 2.0 * p0[4], None] fitArgs = (x, f, pktype, weight, lb, ub) p, outflag = optimize.leastsq( - fit_pk_obj_1d_bnded, p0, - args=fitArgs, - ftol=ftol, xtol=xtol + fit_pk_obj_1d_bnded, p0, args=fitArgs, ftol=ftol, xtol=xtol ) elif pktype == 'split_pvoigt': + # fmt: off lb = [p0[0]*0.5, np.min(x), 0., 0., 0., 0., 0., None] ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 4.*p0[2], 1., 1., 2.*p0[4], None] + # fmt: on fitArgs = (x, f, pktype, weight, lb, ub) p, outflag = optimize.leastsq( - fit_pk_obj_1d_bnded, p0, - args=fitArgs, - ftol=ftol, xtol=xtol + fit_pk_obj_1d_bnded, p0, args=fitArgs, ftol=ftol, xtol=xtol ) elif pktype == 'tanh_stepdown': p, outflag = optimize.leastsq( - fit_pk_obj_1d, p0, - args=fitArgs, - ftol=ftol, xtol=xtol) + fit_pk_obj_1d, p0, args=fitArgs, ftol=ftol, xtol=xtol + ) elif pktype == 'dcs_pinkbeam': # !!!: for some reason the 'trf' method was not behaving well, # so switched to 'lm' + # fmt: off lb = np.array([0.0, x.min(), -100., -100., -100., -100., 0., 0., -np.inf, -np.inf, -np.inf]) ub = np.array([np.inf, x.max(), 100., 100., 100., 100., 10., 10., np.inf, np.inf, np.inf]) + # fmt: on res = optimize.least_squares( - fit_pk_obj_1d, p0, + fit_pk_obj_1d, + p0, jac='2-point', # bounds=(), # (lb, ub), method='lm', args=fitArgs, ftol=ftol, - xtol=xtol) + xtol=xtol, + ) p = res['x'] # outflag = res['success'] else: @@ -264,10 +274,7 @@ def fit_pk_parms_1d(p0, x, f, pktype='pvoigt'): return p -def fit_mpk_parms_1d( - p0, x, f0, pktype, num_pks, - bgtype=None, bnds=None - ): +def fit_mpk_parms_1d(p0, x, f0, pktype, num_pks, bgtype=None, bnds=None): """ Fit MULTIPLE 1d analytic functions to diffraction data. @@ -303,23 +310,26 @@ def fit_mpk_parms_1d( if bnds is None: p = optimize.least_squares( - fit_mpk_obj_1d, p0, - args=fitArgs, ftol=ftol, xtol=xtol + fit_mpk_obj_1d, p0, args=fitArgs, ftol=ftol, xtol=xtol ) else: p = optimize.least_squares( - fit_mpk_obj_1d, p0, - bounds=bnds, args=fitArgs, ftol=ftol, xtol=xtol + fit_mpk_obj_1d, p0, bounds=bnds, args=fitArgs, ftol=ftol, xtol=xtol ) return p.x def estimate_mpk_parms_1d( - pk_pos_0, x, f, - pktype='pvoigt', bgtype='linear', - fwhm_guess=None, center_bnd=0.02, - amp_lim_mult=[0.1, 10.], fwhm_lim_mult=[0.5, 2.] - ): + pk_pos_0, + x, + f, + pktype='pvoigt', + bgtype='linear', + fwhm_guess=None, + center_bnd=0.02, + amp_lim_mult=[0.1, 10.0], + fwhm_lim_mult=[0.5, 2.0], +): """ Generate function-specific estimate for multi-peak parameters. @@ -360,21 +370,21 @@ def estimate_mpk_parms_1d( num_pks = len(pk_pos_0) center_bnd = np.atleast_1d(center_bnd) - if(len(center_bnd) < 2): - center_bnd = center_bnd*np.ones(num_pks) + if len(center_bnd) < 2: + center_bnd = center_bnd * np.ones(num_pks) if fwhm_guess is None: - fwhm_guess = (np.max(x) - np.min(x))/(20.*num_pks) + fwhm_guess = (np.max(x) - np.min(x)) / (20.0 * num_pks) fwhm_guess = np.atleast_1d(fwhm_guess) - if(len(fwhm_guess) < 2): - fwhm_guess = fwhm_guess*np.ones(num_pks) + if len(fwhm_guess) < 2: + fwhm_guess = fwhm_guess * np.ones(num_pks) min_val = np.min(f) # estimate background with SNIP1d bkg = snip1d( np.atleast_2d(f), - w=int(np.floor(0.25*len(f))), + w=int(np.floor(0.25 * len(f))), max_workers=1, ).flatten() @@ -414,20 +424,16 @@ def estimate_mpk_parms_1d( amp_guess = _amplitude_guess( x, pk_pos_0[ii], fsubtr, fwhm_guess[ii] ) - p0tmp[ii, :] = [ - amp_guess, - pk_pos_0[ii], - fwhm_guess[ii] - ] + p0tmp[ii, :] = [amp_guess, pk_pos_0[ii], fwhm_guess[ii]] p0tmp_lb[ii, :] = [ - amp_guess*amp_lim_mult[0], + amp_guess * amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[0] + fwhm_guess[ii] * fwhm_lim_mult[0], ] p0tmp_ub[ii, :] = [ - amp_guess*amp_lim_mult[1], + amp_guess * amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[1] + fwhm_guess[ii] * fwhm_lim_mult[1], ] elif pktype == 'pvoigt': # x is just 2theta values @@ -436,23 +442,18 @@ def estimate_mpk_parms_1d( amp_guess = _amplitude_guess( x, pk_pos_0[ii], fsubtr, fwhm_guess[ii] ) - p0tmp[ii, :] = [ - amp_guess, - pk_pos_0[ii], - fwhm_guess[ii], - 0.5 - ] + p0tmp[ii, :] = [amp_guess, pk_pos_0[ii], fwhm_guess[ii], 0.5] p0tmp_lb[ii, :] = [ - amp_guess*amp_lim_mult[0], + amp_guess * amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[0], - 0.0 + fwhm_guess[ii] * fwhm_lim_mult[0], + 0.0, ] p0tmp_ub[ii, :] = [ - (amp_guess - min_val + 1.)*amp_lim_mult[1], + (amp_guess - min_val + 1.0) * amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[1], - 1.0 + fwhm_guess[ii] * fwhm_lim_mult[1], + 1.0, ] elif pktype == 'split_pvoigt': # x is just 2theta values @@ -467,23 +468,23 @@ def estimate_mpk_parms_1d( fwhm_guess[ii], fwhm_guess[ii], 0.5, - 0.5 + 0.5, ] p0tmp_lb[ii, :] = [ - amp_guess*amp_lim_mult[0], + amp_guess * amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[0], - fwhm_guess[ii]*fwhm_lim_mult[0], + fwhm_guess[ii] * fwhm_lim_mult[0], + fwhm_guess[ii] * fwhm_lim_mult[0], + 0.0, 0.0, - 0.0 ] p0tmp_ub[ii, :] = [ - amp_guess*amp_lim_mult[1], + amp_guess * amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[1], - fwhm_guess[ii]*fwhm_lim_mult[1], + fwhm_guess[ii] * fwhm_lim_mult[1], + fwhm_guess[ii] * fwhm_lim_mult[1], + 1.0, 1.0, - 1.0 ] elif pktype == 'pink_beam_dcs': # x is just 2theta values @@ -503,24 +504,24 @@ def estimate_mpk_parms_1d( fwhm_guess[ii], ] p0tmp_lb[ii, :] = [ - amp_guess*amp_lim_mult[0], + amp_guess * amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], -1e5, -1e5, -1e5, -1e5, - fwhm_guess[ii]*fwhm_lim_mult[0], - fwhm_guess[ii]*fwhm_lim_mult[0], + fwhm_guess[ii] * fwhm_lim_mult[0], + fwhm_guess[ii] * fwhm_lim_mult[0], ] p0tmp_ub[ii, :] = [ - amp_guess*amp_lim_mult[1], + amp_guess * amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], 1e5, 1e5, 1e5, 1e5, - fwhm_guess[ii]*fwhm_lim_mult[1], - fwhm_guess[ii]*fwhm_lim_mult[1], + fwhm_guess[ii] * fwhm_lim_mult[1], + fwhm_guess[ii] * fwhm_lim_mult[1], ] num_pk_parms = len(p0tmp.ravel()) @@ -627,10 +628,10 @@ def fit_pk_obj_1d(p, x, f0, pktype): f = pkfuncs.tanh_stepdown_nobg(p, x) elif pktype == 'dcs_pinkbeam': f = pkfuncs.pink_beam_dcs(p, x) - ww = 1./np.sqrt(f0) + ww = 1.0 / np.sqrt(f0) ww[np.isnan(ww)] = 0.0 - resd = (f - f0)*ww + resd = (f - f0) * ww return resd @@ -645,7 +646,7 @@ def fit_pk_obj_1d_bnded(p, x, f0, pktype, weight, lb, ub): f = pkfuncs.split_pvoigt1d(p, x) elif pktype == 'dcs_pinkbeam': f = pkfuncs.pink_beam_dcs(p, x) - ww = 1./np.sqrt(f0) + ww = 1.0 / np.sqrt(f0) ww[np.isnan(ww)] = 0.0 num_data = len(f) @@ -656,8 +657,9 @@ def fit_pk_obj_1d_bnded(p, x, f0, pktype, weight, lb, ub): resd[:num_data] = f - f0 for ii in range(num_parm): if lb[ii] is not None: - resd[num_data + ii] = \ - weight*np.max([-(p[ii] - lb[ii]), 0., (p[ii] - ub[ii])]) + resd[num_data + ii] = weight * np.max( + [-(p[ii] - lb[ii]), 0.0, (p[ii] - ub[ii])] + ) return resd @@ -700,14 +702,16 @@ def estimate_pk_parms_2d(x, y, f, pktype): """ bg0 = np.mean([f[0, 0], f[-1, 0], f[-1, -1], f[0, -1]]) - bg1x = (np.mean([f[-1, -1], f[0, -1]]) - np.mean([f[0, 0], f[-1, 0]])) \ - / (x[0, -1] - x[0, 0]) - bg1y = (np.mean([f[-1, -1], f[-1, 0]]) - np.mean([f[0, 0], f[0, -1]])) \ - / (y[-1, 0] - y[0, 0]) + bg1x = (np.mean([f[-1, -1], f[0, -1]]) - np.mean([f[0, 0], f[-1, 0]])) / ( + x[0, -1] - x[0, 0] + ) + bg1y = (np.mean([f[-1, -1], f[-1, 0]]) - np.mean([f[0, 0], f[0, -1]])) / ( + y[-1, 0] - y[0, 0] + ) fnobg = f - (bg0 + bg1x * x + bg1y * y) - labels, numlabels = imgproc.label(fnobg > 0.5*np.max(fnobg)) + labels, numlabels = imgproc.label(fnobg > 0.5 * np.max(fnobg)) # looks for the largest peak areas = np.zeros(numlabels) @@ -727,10 +731,12 @@ def estimate_pk_parms_2d(x, y, f, pktype): if pktype == 'gaussian': p = [A, x0, y0, FWHMx, FWHMy, bg0, bg1x, bg1y] elif pktype == 'gaussian_rot': - p = [A, x0, y0, FWHMx, FWHMy, 0., bg0, bg1x, bg1y] + p = [A, x0, y0, FWHMx, FWHMy, 0.0, bg0, bg1x, bg1y] elif pktype == 'split_pvoigt_rot': + # fmt: off p = [A, x0, y0, FWHMx, FWHMx, FWHMy, FWHMy, 0.5, 0.5, 0.5, 0.5, 0., bg0, bg1x, bg1y] + # fmt: of p = np.array(p) return p @@ -831,15 +837,15 @@ def goodness_of_fit(f, f0): """ - R = np.sum((f - f0)**2) / np.sum(f0**2) - Rw = np.sum(np.abs(f0 * (f - f0)**2)) / np.sum(np.abs(f0**3)) + R = np.sum((f - f0) ** 2) / np.sum(f0**2) + Rw = np.sum(np.abs(f0 * (f - f0) ** 2)) / np.sum(np.abs(f0**3)) return R, Rw -def direct_pk_analysis(x, f, - remove_bg=True, low_int=1., - edge_pts=3, pts_per_meas=100): +def direct_pk_analysis( + x, f, remove_bg=True, low_int=1.0, edge_pts=3, pts_per_meas=100 +): """ Analyze a single peak that is not well matched to any analytic functions @@ -873,12 +879,12 @@ def direct_pk_analysis(x, f, # subtract background, assumed linear if remove_bg: - bg_data = np.hstack((f[:(edge_pts+1)], f[-edge_pts:])) - bg_pts = np.hstack((x[:(edge_pts+1)], x[-edge_pts:])) + bg_data = np.hstack((f[: (edge_pts + 1)], f[-edge_pts:])) + bg_pts = np.hstack((x[: (edge_pts + 1)], x[-edge_pts:])) bg_parm = np.polyfit(bg_pts, bg_data, 1) - f = f - (bg_parm[0]*x + bg_parm[1]) # pull out high background + f = f - (bg_parm[0] * x + bg_parm[1]) # pull out high background f = f - np.min(f) # set the minimum to 0 @@ -886,7 +892,7 @@ def direct_pk_analysis(x, f, plt.plot(x, f, 'r') # make a fine grid of points - spacing = np.diff(x)[0]/pts_per_meas + spacing = np.diff(x)[0] / pts_per_meas xfine = np.arange(np.min(x), np.max(x) + spacing, spacing) ffine = np.interp(xfine, x, f) @@ -901,16 +907,16 @@ def direct_pk_analysis(x, f, # center of mass calculation # !!! this cutoff value is arbitrary, maybe set higher? - if(total_int < low_int): + if total_int < low_int: com = float('NaN') FWHM = float('NaN') total_int = total_int print('Analysis Failed... Intensity too low') else: - com = np.sum(xfine*ffine)/np.sum(ffine) + com = np.sum(xfine * ffine) / np.sum(ffine) - a = np.abs(ffine[cen_index+1:]-A/2.) - b = np.abs(ffine[:cen_index]-A/2.) + a = np.abs(ffine[cen_index + 1 :] - A / 2.0) + b = np.abs(ffine[:cen_index] - A / 2.0) # this is a check to see if the peak is falling out of the bnds if a.size == 0 or b.size == 0: @@ -962,32 +968,28 @@ def calc_pk_integrated_intensities(p, x, pktype, num_pks): ints = np.zeros(num_pks) if pktype == 'gaussian' or pktype == 'lorentzian': - p_fit = np.reshape(p[:3*num_pks], [num_pks, 3]) + p_fit = np.reshape(p[: 3 * num_pks], [num_pks, 3]) elif pktype == 'pvoigt': - p_fit = np.reshape(p[:4*num_pks], [num_pks, 4]) + p_fit = np.reshape(p[: 4 * num_pks], [num_pks, 4]) elif pktype == 'split_pvoigt': - p_fit = np.reshape(p[:6*num_pks], [num_pks, 6]) + p_fit = np.reshape(p[: 6 * num_pks], [num_pks, 6]) for ii in np.arange(num_pks): if pktype == 'gaussian': ints[ii] = integrate.simpson( - pkfuncs._gaussian1d_no_bg(p_fit[ii], x), - x + pkfuncs._gaussian1d_no_bg(p_fit[ii], x), x ) elif pktype == 'lorentzian': ints[ii] = integrate.simpson( - pkfuncs._lorentzian1d_no_bg(p_fit[ii], x), - x + pkfuncs._lorentzian1d_no_bg(p_fit[ii], x), x ) elif pktype == 'pvoigt': ints[ii] = integrate.simpson( - pkfuncs._pvoigt1d_no_bg(p_fit[ii], x), - x + pkfuncs._pvoigt1d_no_bg(p_fit[ii], x), x ) elif pktype == 'split_pvoigt': ints[ii] = integrate.simpson( - pkfuncs._split_pvoigt1d_no_bg(p_fit[ii], x), - x + pkfuncs._split_pvoigt1d_no_bg(p_fit[ii], x), x ) return ints diff --git a/hexrd/core/fitting/peakfunctions.py b/hexrd/core/fitting/peakfunctions.py index 8ae64ca31..08e657029 100644 --- a/hexrd/core/fitting/peakfunctions.py +++ b/hexrd/core/fitting/peakfunctions.py @@ -29,10 +29,15 @@ from numba import njit import copy from hexrd.core import constants -from hexrd.core.constants import c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp +from hexrd.core.constants import ( + c_erf, + cnum_exp1exp, + cden_exp1exp, + c_coeff_exp1exp, +) gauss_width_fact = constants.sigma_to_fwhm -lorentz_width_fact = 2. +lorentz_width_fact = 2.0 # FIXME: we need this for the time being to be able to parse multipeak fitting # results; need to wrap all this up in a class in the future! @@ -41,7 +46,7 @@ 'lorentzian': 3, 'pvoigt': 4, 'split_pvoigt': 6, - 'pink_beam_dcs': 8 + 'pink_beam_dcs': 8, } """ @@ -65,10 +70,12 @@ def erfc(x): a1, a2, a3, a4, a5, p = c_erf # A&S formula 7.1.26 - t = 1.0/(1.0 + p*x) - y = 1. - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x) - erf = sign*y # erf(-x) = -erf(x) - return 1. - erf + t = 1.0 / (1.0 + p * x) + y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp( + -x * x + ) + erf = sign * y # erf(-x) = -erf(x) + return 1.0 - erf """ @@ -83,10 +90,10 @@ def erfc(x): def exp1exp_under1(x): f = np.zeros(x.shape).astype(np.complex128) for i in range(6): - xx = x**(i+1) - f += c_coeff_exp1exp[i]*xx + xx = x ** (i + 1) + f += c_coeff_exp1exp[i] * xx - return (f - np.log(x) - np.euler_gamma)*np.exp(x) + return (f - np.log(x) - np.euler_gamma) * np.exp(x) """ @@ -104,21 +111,21 @@ def exp1exp_over1(x): den = np.zeros(x.shape).astype(np.complex128) for i in range(11): - p = 10-i + p = 10 - i if p != 0: xx = x**p - num += cnum_exp1exp[i]*xx - den += cden_exp1exp[i]*xx + num += cnum_exp1exp[i] * xx + den += cden_exp1exp[i] * xx else: num += cnum_exp1exp[i] den += cden_exp1exp[i] - return (num/den)*(1./x) + return (num / den) * (1.0 / x) @njit(cache=True, nogil=True) def exp1exp(x): - mask = np.sign(x.real)*np.abs(x) > 1. + mask = np.sign(x.real) * np.abs(x) > 1.0 f = np.zeros(x.shape).astype(np.complex128) f[mask] = exp1exp_over1(x[mask]) @@ -126,6 +133,7 @@ def exp1exp(x): return f + # ============================================================================= # 1-D Gaussian Functions # ============================================================================= @@ -144,9 +152,9 @@ def _unit_gaussian(p, x): x0 = p[0] FWHM = p[1] - sigma = FWHM/gauss_width_fact + sigma = FWHM / gauss_width_fact - f = np.exp(-(x - x0)**2/(2.*sigma**2.)) + f = np.exp(-((x - x0) ** 2) / (2.0 * sigma**2.0)) return f @@ -161,7 +169,7 @@ def _gaussian1d_no_bg(p, x): """ A = p[0] - f = A*_unit_gaussian(p[[1, 2]], x) + f = A * _unit_gaussian(p[[1, 2]], x) return f @@ -178,7 +186,7 @@ def gaussian1d(p, x): bg0 = p[3] bg1 = p[4] - f = _gaussian1d_no_bg(p[:3], x) + bg0 + bg1*x + f = _gaussian1d_no_bg(p[:3], x) + bg0 + bg1 * x return f @@ -196,12 +204,15 @@ def _gaussian1d_no_bg_deriv(p, x): x0 = p[1] FWHM = p[2] - sigma = FWHM/gauss_width_fact + sigma = FWHM / gauss_width_fact - dydx0 = _gaussian1d_no_bg(p, x)*((x - x0)/(sigma**2.)) + dydx0 = _gaussian1d_no_bg(p, x) * ((x - x0) / (sigma**2.0)) dydA = _unit_gaussian(p[[1, 2]], x) - dydFWHM = _gaussian1d_no_bg(p, x) \ - * ((x - x0)**2./(sigma**3.))/gauss_width_fact + dydFWHM = ( + _gaussian1d_no_bg(p, x) + * ((x - x0) ** 2.0 / (sigma**3.0)) + / gauss_width_fact + ) d_mat = np.zeros((len(p), len(x))) @@ -224,7 +235,7 @@ def gaussian1d_deriv(p, x): d_mat = np.zeros((len(p), len(x))) d_mat[0:3, :] = _gaussian1d_no_bg_deriv(p[0:3], x) - d_mat[3, :] = 1. + d_mat[3, :] = 1.0 d_mat[4, :] = x return d_mat @@ -246,9 +257,9 @@ def _unit_lorentzian(p, x): x0 = p[0] FWHM = p[1] - gamma = FWHM/lorentz_width_fact + gamma = FWHM / lorentz_width_fact - f = gamma**2 / ((x-x0)**2 + gamma**2) + f = gamma**2 / ((x - x0) ** 2 + gamma**2) return f @@ -263,7 +274,7 @@ def _lorentzian1d_no_bg(p, x): """ A = p[0] - f = A*_unit_lorentzian(p[[1, 2]], x) + f = A * _unit_lorentzian(p[[1, 2]], x) return f @@ -281,7 +292,7 @@ def lorentzian1d(p, x): bg0 = p[3] bg1 = p[4] - f = _lorentzian1d_no_bg(p[:3], x)+bg0+bg1*x + f = _lorentzian1d_no_bg(p[:3], x) + bg0 + bg1 * x return f @@ -299,12 +310,17 @@ def _lorentzian1d_no_bg_deriv(p, x): x0 = p[1] FWHM = p[2] - gamma = FWHM/lorentz_width_fact + gamma = FWHM / lorentz_width_fact - dydx0 = _lorentzian1d_no_bg(p, x)*((2.*(x-x0))/((x-x0)**2 + gamma**2)) + dydx0 = _lorentzian1d_no_bg(p, x) * ( + (2.0 * (x - x0)) / ((x - x0) ** 2 + gamma**2) + ) dydA = _unit_lorentzian(p[[1, 2]], x) - dydFWHM = _lorentzian1d_no_bg(p, x) \ - * ((2.*(x-x0)**2.)/(gamma*((x-x0)**2 + gamma**2)))/lorentz_width_fact + dydFWHM = ( + _lorentzian1d_no_bg(p, x) + * ((2.0 * (x - x0) ** 2.0) / (gamma * ((x - x0) ** 2 + gamma**2))) + / lorentz_width_fact + ) d_mat = np.zeros((len(p), len(x))) d_mat[0, :] = dydA @@ -326,7 +342,7 @@ def lorentzian1d_deriv(p, x): d_mat = np.zeros((len(p), len(x))) d_mat[0:3, :] = _lorentzian1d_no_bg_deriv(p[0:3], x) - d_mat[3, :] = 1. + d_mat[3, :] = 1.0 d_mat[4, :] = x return d_mat @@ -336,6 +352,7 @@ def lorentzian1d_deriv(p, x): # 1-D Psuedo Voigt Functions # ============================================================================= + # Split the unit function so this can be called for 2d and 3d functions def _unit_pvoigt1d(p, x): """ @@ -349,7 +366,7 @@ def _unit_pvoigt1d(p, x): n = p[2] - f = (n*_unit_gaussian(p[:2], x)+(1.-n)*_unit_lorentzian(p[:2], x)) + f = n * _unit_gaussian(p[:2], x) + (1.0 - n) * _unit_lorentzian(p[:2], x) return f @@ -364,7 +381,7 @@ def _pvoigt1d_no_bg(p, x): """ A = p[0] - f = A*_unit_pvoigt1d(p[[1, 2, 3]], x) + f = A * _unit_pvoigt1d(p[[1, 2, 3]], x) return f @@ -381,7 +398,7 @@ def pvoigt1d(p, x): bg0 = p[4] bg1 = p[5] - f = _pvoigt1d_no_bg(p[:4], x) + bg0 + bg1*x + f = _pvoigt1d_no_bg(p[:4], x) + bg0 + bg1 * x return f @@ -390,6 +407,7 @@ def pvoigt1d(p, x): # 1-D Split Psuedo Voigt Functions # ============================================================================= + def _split_pvoigt1d_no_bg(p, x): """ Required Arguments: @@ -412,11 +430,11 @@ def _split_pvoigt1d_no_bg(p, x): # + right = np.where(xr)[0] - f[right] = A*_unit_pvoigt1d(p[[1, 3, 5]], x[right]) + f[right] = A * _unit_pvoigt1d(p[[1, 3, 5]], x[right]) # - left = np.where(xl)[0] - f[left] = A*_unit_pvoigt1d(p[[1, 2, 4]], x[left]) + f[left] = A * _unit_pvoigt1d(p[[1, 2, 4]], x[left]) return f @@ -434,7 +452,7 @@ def split_pvoigt1d(p, x): bg0 = p[6] bg1 = p[7] - f = _split_pvoigt1d_no_bg(p[:6], x) + bg0 + bg1*x + f = _split_pvoigt1d_no_bg(p[:6], x) + bg0 + bg1 * x return f @@ -459,13 +477,13 @@ def split_pvoigt1d(p, x): @njit(cache=True, nogil=True) def _calc_alpha(alpha, x0): a0, a1 = alpha - return (a0 + a1*np.tan(np.radians(0.5*x0))) + return a0 + a1 * np.tan(np.radians(0.5 * x0)) @njit(cache=True, nogil=True) def _calc_beta(beta, x0): b0, b1 = beta - return b0 + b1*np.tan(np.radians(0.5*x0)) + return b0 + b1 * np.tan(np.radians(0.5 * x0)) @njit(cache=True, nogil=True) @@ -480,20 +498,25 @@ def _mixing_factor_pv(fwhm_g, fwhm_l): @DETAILS: calculates the mixing factor eta to best approximate voight peak shapes """ - fwhm = fwhm_g**5 + 2.69269 * fwhm_g**4 * fwhm_l + \ - 2.42843 * fwhm_g**3 * fwhm_l**2 + \ - 4.47163 * fwhm_g**2 * fwhm_l**3 +\ - 0.07842 * fwhm_g * fwhm_l**4 +\ - fwhm_l**5 + fwhm = ( + fwhm_g**5 + + 2.69269 * fwhm_g**4 * fwhm_l + + 2.42843 * fwhm_g**3 * fwhm_l**2 + + 4.47163 * fwhm_g**2 * fwhm_l**3 + + 0.07842 * fwhm_g * fwhm_l**4 + + fwhm_l**5 + ) fwhm = fwhm**0.20 - eta = 1.36603 * (fwhm_l/fwhm) - \ - 0.47719 * (fwhm_l/fwhm)**2 + \ - 0.11116 * (fwhm_l/fwhm)**3 - if eta < 0.: - eta = 0. - elif eta > 1.: - eta = 1. + eta = ( + 1.36603 * (fwhm_l / fwhm) + - 0.47719 * (fwhm_l / fwhm) ** 2 + + 0.11116 * (fwhm_l / fwhm) ** 3 + ) + if eta < 0.0: + eta = 0.0 + elif eta > 1.0: + eta = 1.0 return eta, fwhm @@ -517,15 +540,15 @@ def _gaussian_pink_beam(p, x): del_tth = x - x0 sigsqr = fwhm_g**2 - f1 = alpha*sigsqr + 2.0*del_tth - f2 = beta*sigsqr - 2.0*del_tth - f3 = np.sqrt(2.0)*fwhm_g + f1 = alpha * sigsqr + 2.0 * del_tth + f2 = beta * sigsqr - 2.0 * del_tth + f3 = np.sqrt(2.0) * fwhm_g - u = 0.5*alpha*f1 - v = 0.5*beta*f2 + u = 0.5 * alpha * f1 + v = 0.5 * beta * f2 - y = (f1-del_tth)/f3 - z = (f2+del_tth)/f3 + y = (f1 - del_tth) / f3 + z = (f2 + del_tth) / f3 t1 = erfc(y) t2 = erfc(z) @@ -533,11 +556,11 @@ def _gaussian_pink_beam(p, x): g = np.zeros(x.shape) zmask = np.abs(del_tth) > 5.0 - g[~zmask] = \ - (0.5*(alpha*beta)/(alpha + beta)) * np.exp(u[~zmask])*t1[~zmask] \ - + np.exp(v[~zmask])*t2[~zmask] + g[~zmask] = (0.5 * (alpha * beta) / (alpha + beta)) * np.exp( + u[~zmask] + ) * t1[~zmask] + np.exp(v[~zmask]) * t2[~zmask] mask = np.isnan(g) - g[mask] = 0. + g[mask] = 0.0 g *= A / g.max() return g @@ -561,19 +584,19 @@ def _lorentzian_pink_beam(p, x): del_tth = x - x0 - p = -alpha*del_tth + 1j*0.5*alpha*fwhm_l - q = -beta*del_tth + 1j*0.5*beta*fwhm_l + p = -alpha * del_tth + 1j * 0.5 * alpha * fwhm_l + q = -beta * del_tth + 1j * 0.5 * beta * fwhm_l y = np.zeros(x.shape) f1 = exp1exp(p) f2 = exp1exp(q) - y = -(alpha*beta)/(np.pi*(alpha + beta))*(f1 + f2).imag + y = -(alpha * beta) / (np.pi * (alpha + beta)) * (f1 + f2).imag mask = np.isnan(y) - y[mask] = 0. + y[mask] = 0.0 ymax = y.max() - y *= A/ymax + y *= A / ymax return y @@ -607,7 +630,7 @@ def _pink_beam_dcs_no_bg(p, x): G = _gaussian_pink_beam(p_g, x) L = _lorentzian_pink_beam(p_l, x) - return eta*L + (1. - eta)*G + return eta * L + (1.0 - eta) * G def pink_beam_dcs(p, x): @@ -621,11 +644,12 @@ def pink_beam_dcs(p, x): p has the following 10 parameters p = [A, x0, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l, bkg_c0, bkg_c1] """ - return _pink_beam_dcs_no_bg(p[:-2], x) + p[-2] + p[-1]*x + return _pink_beam_dcs_no_bg(p[:-2], x) + p[-2] + p[-1] * x def pink_beam_dcs_lmfit( - x, A, x0, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l): + x, A, x0, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 10/18/2021 SS 1.0 original @@ -647,7 +671,7 @@ def pink_beam_dcs_lmfit( G = _gaussian_pink_beam(p_g, x) L = _lorentzian_pink_beam(p_l, x) - return eta*L + (1. - eta)*G + return eta * L + (1.0 - eta) * G """ @@ -675,7 +699,7 @@ def tanh_stepdown_nobg(p, x): x0 = p[1] w = p[2] - f = A*(0.5*(1.-np.tanh((x-x0)/w))) + f = A * (0.5 * (1.0 - np.tanh((x - x0) / w))) return f @@ -684,12 +708,13 @@ def tanh_stepdown_nobg(p, x): # 2-D Rotation Coordinate Transform # ============================================================================= + def _2d_coord_transform(theta, x0, y0, x, y): - xprime = np.cos(theta)*x+np.sin(theta)*y - yprime = -np.sin(theta)*x+np.cos(theta)*y + xprime = np.cos(theta) * x + np.sin(theta) * y + yprime = -np.sin(theta) * x + np.cos(theta) * y - x0prime = np.cos(theta)*x0+np.sin(theta)*y0 - y0prime = -np.sin(theta)*x0+np.cos(theta)*y0 + x0prime = np.cos(theta) * x0 + np.sin(theta) * y0 + y0prime = -np.sin(theta) * x0 + np.cos(theta) * y0 return x0prime, y0prime, xprime, yprime @@ -698,6 +723,7 @@ def _2d_coord_transform(theta, x0, y0, x, y): # 2-D Gaussian Function # ============================================================================= + def _gaussian2d_no_bg(p, x, y): """ Required Arguments: @@ -710,7 +736,7 @@ def _gaussian2d_no_bg(p, x, y): """ A = p[0] - f = A*_unit_gaussian(p[[1, 3]], x)*_unit_gaussian(p[[2, 4]], y) + f = A * _unit_gaussian(p[[1, 3]], x) * _unit_gaussian(p[[2, 4]], y) return f @@ -728,7 +754,8 @@ def _gaussian2d_rot_no_bg(p, x, y): theta = p[5] x0prime, y0prime, xprime, yprime = _2d_coord_transform( - theta, p[1], p[2], x, y) + theta, p[1], p[2], x, y + ) # this copy was needed so original parameters set isn't changed newp = copy.copy(p) @@ -756,7 +783,7 @@ def gaussian2d_rot(p, x, y): bg1x = p[7] bg1y = p[8] - f = _gaussian2d_rot_no_bg(p[:6], x, y)+(bg0+bg1x*x+bg1y*y) + f = _gaussian2d_rot_no_bg(p[:6], x, y) + (bg0 + bg1x * x + bg1y * y) return f @@ -775,7 +802,7 @@ def gaussian2d(p, x, y): bg1x = p[6] bg1y = p[7] - f = _gaussian2d_no_bg(p[:5], x, y)+(bg0+bg1x*x+bg1y*y) + f = _gaussian2d_no_bg(p[:5], x, y) + (bg0 + bg1x * x + bg1y * y) return f @@ -783,6 +810,7 @@ def gaussian2d(p, x, y): # 2-D Split Psuedo Voigt Function # ============================================================================= + def _split_pvoigt2d_no_bg(p, x, y): """ Required Arguments: @@ -808,23 +836,35 @@ def _split_pvoigt2d_no_bg(p, x, y): # ++ q1 = np.where(xr & yr) - f[q1] = A*_unit_pvoigt1d(p[[1, 4, 8]], x[q1]) * \ - _unit_pvoigt1d(p[[2, 6, 10]], y[q1]) + f[q1] = ( + A + * _unit_pvoigt1d(p[[1, 4, 8]], x[q1]) + * _unit_pvoigt1d(p[[2, 6, 10]], y[q1]) + ) # +- q2 = np.where(xr & yl) - f[q2] = A*_unit_pvoigt1d(p[[1, 4, 8]], x[q2]) * \ - _unit_pvoigt1d(p[[2, 5, 9]], y[q2]) + f[q2] = ( + A + * _unit_pvoigt1d(p[[1, 4, 8]], x[q2]) + * _unit_pvoigt1d(p[[2, 5, 9]], y[q2]) + ) # -+ q3 = np.where(xl & yr) - f[q3] = A*_unit_pvoigt1d(p[[1, 3, 7]], x[q3]) * \ - _unit_pvoigt1d(p[[2, 6, 10]], y[q3]) + f[q3] = ( + A + * _unit_pvoigt1d(p[[1, 3, 7]], x[q3]) + * _unit_pvoigt1d(p[[2, 6, 10]], y[q3]) + ) # -- q4 = np.where(xl & yl) - f[q4] = A*_unit_pvoigt1d(p[[1, 3, 7]], x[q4]) * \ - _unit_pvoigt1d(p[[2, 5, 9]], y[q4]) + f[q4] = ( + A + * _unit_pvoigt1d(p[[1, 3, 7]], x[q4]) + * _unit_pvoigt1d(p[[2, 5, 9]], y[q4]) + ) return f @@ -843,7 +883,8 @@ def _split_pvoigt2d_rot_no_bg(p, x, y): theta = p[11] x0prime, y0prime, xprime, yprime = _2d_coord_transform( - theta, p[1], p[2], x, y) + theta, p[1], p[2], x, y + ) # this copy was needed so original parameters set isn't changed newp = copy.copy(p) @@ -872,7 +913,7 @@ def split_pvoigt2d_rot(p, x, y): bg1x = p[13] bg1y = p[14] - f = _split_pvoigt2d_rot_no_bg(p[:12], x, y)+(bg0+bg1x*x+bg1y*y) + f = _split_pvoigt2d_rot_no_bg(p[:12], x, y) + (bg0 + bg1x * x + bg1y * y) return f @@ -881,6 +922,7 @@ def split_pvoigt2d_rot(p, x, y): # 3-D Gaussian Function # ============================================================================= + def _gaussian3d_no_bg(p, x, y, z): """ Required Arguments: @@ -894,9 +936,12 @@ def _gaussian3d_no_bg(p, x, y, z): """ A = p[0] - f = A * _unit_gaussian(p[[1, 4]], x) \ - * _unit_gaussian(p[[2, 5]], y) \ + f = ( + A + * _unit_gaussian(p[[1, 4]], x) + * _unit_gaussian(p[[2, 5]], y) * _unit_gaussian(p[[3, 6]], z) + ) return f @@ -917,7 +962,7 @@ def gaussian3d(p, x, y, z): bg1y = p[9] bg1z = p[10] - f = _gaussian3d_no_bg(p[:5], x, y)+(bg0+bg1x*x+bg1y*y+bg1z*z) + f = _gaussian3d_no_bg(p[:5], x, y) + (bg0 + bg1x * x + bg1y * y + bg1z * z) return f @@ -925,6 +970,7 @@ def gaussian3d(p, x, y, z): # Mutlipeak # ============================================================================= + def _mpeak_1d_no_bg(p, x, pktype, num_pks): """ Required Arguments: @@ -947,7 +993,7 @@ def _mpeak_1d_no_bg(p, x, pktype, num_pks): npp = mpeak_nparams_dict[pktype] - p_fit = np.reshape(p[:npp*num_pks], [num_pks, npp]) + p_fit = np.reshape(p[: npp * num_pks], [num_pks, npp]) for ii in np.arange(num_pks): if pktype == 'gaussian': @@ -986,10 +1032,12 @@ def mpeak_1d(p, x, pktype, num_pks, bgtype=None): f = _mpeak_1d_no_bg(p, x, pktype, num_pks) if bgtype == 'linear': - f = f+p[-2]+p[-1]*x # c0=p[-2], c1=p[-1] + f = f + p[-2] + p[-1] * x # c0=p[-2], c1=p[-1] elif bgtype == 'constant': - f = f+p[-1] # c0=p[-1] + f = f + p[-1] # c0=p[-1] elif bgtype == 'quadratic': - f = f+p[-3]+p[-2]*x+p[-1]*x**2 # c0=p[-3], c1=p[-2], c2=p[-1], + f = ( + f + p[-3] + p[-2] * x + p[-1] * x**2 + ) # c0=p[-3], c1=p[-2], c2=p[-1], return f diff --git a/hexrd/core/fitting/spectrum.py b/hexrd/core/fitting/spectrum.py index 4f35b3ad3..264f9f9e4 100644 --- a/hexrd/core/fitting/spectrum.py +++ b/hexrd/core/fitting/spectrum.py @@ -6,7 +6,20 @@ from hexrd.core.constants import fwhm_to_sigma from hexrd.core.imageutil import snip1d -from .utils import _calc_alpha, _calc_beta, _mixing_factor_pv, _gaussian_pink_beam, _lorentzian_pink_beam, _parameter_arg_constructor, _extract_parameters_by_name, _set_bound_constraints, _set_refinement_by_name, _set_width_mixing_bounds, _set_equality_constraints, _set_peak_center_bounds +from .utils import ( + _calc_alpha, + _calc_beta, + _mixing_factor_pv, + _gaussian_pink_beam, + _lorentzian_pink_beam, + _parameter_arg_constructor, + _extract_parameters_by_name, + _set_bound_constraints, + _set_refinement_by_name, + _set_width_mixing_bounds, + _set_equality_constraints, + _set_peak_center_bounds, +) # ============================================================================= # PARAMETERS @@ -17,16 +30,22 @@ 'lorentzian': ['amp', 'cen', 'fwhm'], 'pvoigt': ['amp', 'cen', 'fwhm', 'mixing'], 'split_pvoigt': ['amp', 'cen', 'fwhm_l', 'fwhm_h', 'mixing_l', 'mixing_h'], - 'pink_beam_dcs': ['amp', 'cen', - 'alpha0', 'alpha1', - 'beta0', 'beta1', - 'fwhm_g', 'fwhm_l'], + 'pink_beam_dcs': [ + 'amp', + 'cen', + 'alpha0', + 'alpha1', + 'beta0', + 'beta1', + 'fwhm_g', + 'fwhm_l', + ], 'constant': ['c0'], 'linear': ['c0', 'c1'], 'quadratic': ['c0', 'c1', 'c2'], 'cubic': ['c0', 'c1', 'c2', 'c3'], 'quartic': ['c0', 'c1', 'c2', 'c3', 'c4'], - 'quintic': ['c0', 'c1', 'c2', 'c3', 'c4', 'c5'] + 'quintic': ['c0', 'c1', 'c2', 'c3', 'c4', 'c5'], } num_func_params = dict.fromkeys(_function_dict_1d) @@ -59,25 +78,19 @@ def constant_bkg(x, c0): def linear_bkg(x, c0, c1): # return c0 + c1*x - cheb_cls = chebyshev.Chebyshev( - [c0, c1], domain=(min(x), max(x)) - ) + cheb_cls = chebyshev.Chebyshev([c0, c1], domain=(min(x), max(x))) return cheb_cls(x) def quadratic_bkg(x, c0, c1, c2): # return c0 + c1*x + c2*x**2 - cheb_cls = chebyshev.Chebyshev( - [c0, c1, c2], domain=(min(x), max(x)) - ) + cheb_cls = chebyshev.Chebyshev([c0, c1, c2], domain=(min(x), max(x))) return cheb_cls(x) def cubic_bkg(x, c0, c1, c2, c3): # return c0 + c1*x + c2*x**2 + c3*x**3 - cheb_cls = chebyshev.Chebyshev( - [c0, c1, c2, c3], domain=(min(x), max(x)) - ) + cheb_cls = chebyshev.Chebyshev([c0, c1, c2, c3], domain=(min(x), max(x))) return cheb_cls(x) @@ -103,24 +116,27 @@ def chebyshev_bkg(x, *args): def gaussian_1d(x, amp, cen, fwhm): - return amp * np.exp(-(x - cen)**2 / (2*(fwhm_to_sigma*fwhm)**2)) + return amp * np.exp(-((x - cen) ** 2) / (2 * (fwhm_to_sigma * fwhm) ** 2)) def lorentzian_1d(x, amp, cen, fwhm): - return amp * (0.5*fwhm)**2 / ((x - cen)**2 + (0.5*fwhm)**2) + return amp * (0.5 * fwhm) ** 2 / ((x - cen) ** 2 + (0.5 * fwhm) ** 2) def pvoigt_1d(x, amp, cen, fwhm, mixing): - return mixing*gaussian_1d(x, amp, cen, fwhm) \ - + (1 - mixing)*lorentzian_1d(x, amp, cen, fwhm) + return mixing * gaussian_1d(x, amp, cen, fwhm) + ( + 1 - mixing + ) * lorentzian_1d(x, amp, cen, fwhm) def split_pvoigt_1d(x, amp, cen, fwhm_l, fwhm_h, mixing_l, mixing_h): idx_l = x <= cen idx_h = x > cen return np.concatenate( - [pvoigt_1d(x[idx_l], amp, cen, fwhm_l, mixing_l), - pvoigt_1d(x[idx_h], amp, cen, fwhm_h, mixing_h)] + [ + pvoigt_1d(x[idx_l], amp, cen, fwhm_l, mixing_l), + pvoigt_1d(x[idx_h], amp, cen, fwhm_h, mixing_h), + ] ) @@ -146,18 +162,24 @@ def pink_beam_dcs(x, amp, cen, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l): G = _gaussian_pink_beam(p_g, x) L = _lorentzian_pink_beam(p_l, x) - return eta*L + (1. - eta)*G + return eta * L + (1.0 - eta) * G def _amplitude_guess(x, x0, y, fwhm): - pt_l = np.argmin(np.abs(x - (x0 - 0.5*fwhm))) - pt_h = np.argmin(np.abs(x - (x0 + 0.5*fwhm))) - return np.max(y[pt_l:pt_h + 1]) - - -def _initial_guess(peak_positions, x, f, - pktype='pvoigt', bgtype='linear', - fwhm_guess=None, min_ampl=0.): + pt_l = np.argmin(np.abs(x - (x0 - 0.5 * fwhm))) + pt_h = np.argmin(np.abs(x - (x0 + 0.5 * fwhm))) + return np.max(y[pt_l : pt_h + 1]) + + +def _initial_guess( + peak_positions, + x, + f, + pktype='pvoigt', + bgtype='linear', + fwhm_guess=None, + min_ampl=0.0, +): """ Generate function-specific estimate for multi-peak parameters. @@ -187,22 +209,20 @@ def _initial_guess(peak_positions, x, f, num_pks = len(peak_positions) if fwhm_guess is None: - fwhm_guess = (np.max(x) - np.min(x))/(20.*num_pks) + fwhm_guess = (np.max(x) - np.min(x)) / (20.0 * num_pks) fwhm_guess = np.atleast_1d(fwhm_guess) - if(len(fwhm_guess) < 2): - fwhm_guess = fwhm_guess*np.ones(num_pks) + if len(fwhm_guess) < 2: + fwhm_guess = fwhm_guess * np.ones(num_pks) # estimate background with snip1d # !!! using a window size based on abcissa bkg = snip1d( np.atleast_2d(f), - w=int(np.floor(len(f)/num_pks/2.)), + w=int(np.floor(len(f) / num_pks / 2.0)), max_workers=1, ).flatten() - bkg_mod = chebyshev.Chebyshev( - [0., 0.], domain=(min(x), max(x)) - ) + bkg_mod = chebyshev.Chebyshev([0.0, 0.0], domain=(min(x), max(x))) fit_bkg = bkg_mod.fit(x, bkg, 1) coeff = fit_bkg.coef @@ -227,7 +247,7 @@ def _initial_guess(peak_positions, x, f, pkparams[ii, :] = [ max(amp_guess, min_ampl), peak_positions[ii], - fwhm_guess[ii] + fwhm_guess[ii], ] elif pktype == 'pvoigt': # x is just 2theta values @@ -240,7 +260,7 @@ def _initial_guess(peak_positions, x, f, max(amp_guess, min_ampl), peak_positions[ii], fwhm_guess[ii], - 0.5 + 0.5, ] elif pktype == 'split_pvoigt': # x is just 2theta values @@ -255,7 +275,7 @@ def _initial_guess(peak_positions, x, f, fwhm_guess[ii], fwhm_guess[ii], 0.5, - 0.5 + 0.5, ] elif pktype == 'pink_beam_dcs': # x is just 2theta values @@ -282,6 +302,7 @@ def _initial_guess(peak_positions, x, f, return np.hstack([pkparams.flatten(), bgparams]) + # ============================================================================= # MODELS # ============================================================================= @@ -324,9 +345,16 @@ def _build_composite_model(npeaks=1, pktype='gaussian', bgtype='linear'): class SpectrumModel(object): - def __init__(self, data, peak_centers, - pktype='pvoigt', bgtype='linear', - fwhm_init=None, min_ampl=1e-4, min_pk_sep=pk_sep_min): + def __init__( + self, + data, + peak_centers, + pktype='pvoigt', + bgtype='linear', + fwhm_init=None, + min_ampl=1e-4, + min_pk_sep=pk_sep_min, + ): """ Instantiates spectrum model. @@ -352,10 +380,12 @@ def __init__(self, data, peak_centers, """ # peak and background spec - assert pktype in _function_dict_1d.keys(), \ + assert pktype in _function_dict_1d.keys(), ( "peak type '%s' not recognized" % pktype - assert bgtype in _function_dict_1d.keys(), \ + ) + assert bgtype in _function_dict_1d.keys(), ( "background type '%s' not recognized" % bgtype + ) self._pktype = pktype self._bgtype = bgtype @@ -364,10 +394,12 @@ def __init__(self, data, peak_centers, # spectrum data data = np.atleast_2d(data) - assert data.shape[1] == 2, \ - "data must be [[tth_0, int_0], ..., [tth_N, int_N]" - assert len(data > 10), \ - "check your input spectrum; you provided fewer than 10 points." + assert ( + data.shape[1] == 2 + ), "data must be [[tth_0, int_0], ..., [tth_N, int_N]" + assert len( + data > 10 + ), "check your input spectrum; you provided fewer than 10 points." self._data = data xdata, ydata = data.T @@ -378,7 +410,7 @@ def __init__(self, data, peak_centers, num_peaks = len(peak_centers) if fwhm_init is None: - fwhm_init = np.diff(window_range)/(20.*num_peaks) + fwhm_init = np.diff(window_range) / (20.0 * num_peaks) self._min_pk_sep = min_pk_sep @@ -389,9 +421,13 @@ def __init__(self, data, peak_centers, self._model = spectrum_model p0 = _initial_guess( - self._tth0, xdata, ydata, - pktype=self._pktype, bgtype=self._bgtype, - fwhm_guess=fwhm_init, min_ampl=min_ampl + self._tth0, + xdata, + ydata, + pktype=self._pktype, + bgtype=self._bgtype, + fwhm_guess=fwhm_init, + min_ampl=min_ampl, ) psplit = num_func_params[bgtype] p0_pks = np.reshape(p0[:-psplit], (num_peaks, num_func_params[pktype])) @@ -411,10 +447,10 @@ def __init__(self, data, peak_centers, _set_width_mixing_bounds( initial_params_pks, min_w=fwhm_min, - max_w=0.9*float(np.diff(window_range)) + max_w=0.9 * float(np.diff(window_range)), ) _set_bound_constraints( - initial_params_pks, 'amp', min_val=min_ampl, max_val=1.5*ymax + initial_params_pks, 'amp', min_val=min_ampl, max_val=1.5 * ymax ) _set_peak_center_bounds( initial_params_pks, window_range, min_sep=min_pk_sep @@ -426,8 +462,10 @@ def __init__(self, data, peak_centers, _set_refinement_by_name(initial_params_pks, 'beta', vary=False) _set_equality_constraints( initial_params_pks, - zip(_extract_parameters_by_name(initial_params_pks, 'fwhm_g'), - _extract_parameters_by_name(initial_params_pks, 'fwhm_l')) + zip( + _extract_parameters_by_name(initial_params_pks, 'fwhm_g'), + _extract_parameters_by_name(initial_params_pks, 'fwhm_l'), + ), ) elif pktype == 'split_pvoigt': mparams = _extract_parameters_by_name( @@ -435,22 +473,21 @@ def __init__(self, data, peak_centers, ) for mp in mparams[1:]: _set_equality_constraints( - initial_params_pks, ((mp, mparams[0]), ) + initial_params_pks, ((mp, mparams[0]),) ) mparams = _extract_parameters_by_name( initial_params_pks, 'mixing_h' ) for mp in mparams[1:]: _set_equality_constraints( - initial_params_pks, ((mp, mparams[0]), ) + initial_params_pks, ((mp, mparams[0]),) ) # background initial_params_bkg = Parameters() initial_params_bkg.add_many( *_parameter_arg_constructor( - dict(zip(master_keys_bkg, p0_bkg)), - param_hints_DFLT + dict(zip(master_keys_bkg, p0_bkg)), param_hints_DFLT ) ) @@ -513,28 +550,27 @@ def fit(self): _set_refinement_by_name(new_p, 'beta', vary=True) _set_equality_constraints(new_p, 'alpha') _set_equality_constraints(new_p, 'beta') - _set_bound_constraints( - new_p, 'alpha', min_val=-10, max_val=30 - ) - _set_bound_constraints( - new_p, 'beta', min_val=-10, max_val=30 - ) + _set_bound_constraints(new_p, 'alpha', min_val=-10, max_val=30) + _set_bound_constraints(new_p, 'beta', min_val=-10, max_val=30) _set_width_mixing_bounds( new_p, min_w=fwhm_min, - max_w=0.9*float(np.diff(window_range)) + max_w=0.9 * float(np.diff(window_range)), ) # !!! not sure on this, but it seems # to give more stable results with many peaks _set_equality_constraints( new_p, - zip(_extract_parameters_by_name(new_p, 'fwhm_g'), - _extract_parameters_by_name(new_p, 'fwhm_l')) + zip( + _extract_parameters_by_name(new_p, 'fwhm_g'), + _extract_parameters_by_name(new_p, 'fwhm_l'), + ), ) try: - _set_peak_center_bounds(new_p, window_range, - min_sep=self.min_pk_sep) - except(RuntimeError): + _set_peak_center_bounds( + new_p, window_range, min_sep=self.min_pk_sep + ) + except RuntimeError: return res0 # refit diff --git a/hexrd/core/fitting/utils.py b/hexrd/core/fitting/utils.py index d0421ff17..52995a616 100644 --- a/hexrd/core/fitting/utils.py +++ b/hexrd/core/fitting/utils.py @@ -3,7 +3,12 @@ import numpy as np from numba import njit -from hexrd.core.constants import c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp +from hexrd.core.constants import ( + c_erf, + cnum_exp1exp, + cden_exp1exp, + c_coeff_exp1exp, +) from hexrd.core.matrixutil import uniqueVectors @@ -40,23 +45,27 @@ def _set_equality_constraints(params, pname_spec): raise RuntimeWarning("Only 1 parameter found; exiting") else: for name_pair in pname_spec: - assert len(name_pair) == 2, \ - "entries in name spec must be 2-tuples" + assert len(name_pair) == 2, "entries in name spec must be 2-tuples" params[name_pair[0]].expr = name_pair[1] -def _set_bound_constraints(params, pname_spec, - min_val=-np.inf, max_val=np.inf, - box=None, percentage=False): +def _set_bound_constraints( + params, + pname_spec, + min_val=-np.inf, + max_val=np.inf, + box=None, + percentage=False, +): target_pnames = _extract_parameters_by_name(params, pname_spec) for pname in target_pnames: if box is None: params[pname].min = min_val params[pname].max = max_val else: - hval = 0.5*box + hval = 0.5 * box if percentage: - hval = 0.5*abs(params[pname].value*(box/100.)) + hval = 0.5 * abs(params[pname].value * (box / 100.0)) params[pname].min = params[pname].value - hval params[pname].max = params[pname].value + hval @@ -67,8 +76,8 @@ def _set_width_mixing_bounds(params, min_w=0.01, max_w=np.inf): param.min = min_w param.max = max_w if 'mixing' in pname: - param.min = 0. - param.max = 1. + param.min = 0.0 + param.max = 1.0 def _set_peak_center_bounds(params, window_range, min_sep=0.01): @@ -107,11 +116,13 @@ def _set_peak_center_bounds(params, window_range, min_sep=0.01): for ip, pname in enumerate(sorted_pnames[1:]): curr_peak = params[pname] new_pname = 'pksep%d' % ip - params.add(name=new_pname, - value=curr_peak.value - prev_peak.value, - min=min_sep, - max=window_range[1] - window_range[0], - vary=True) + params.add( + name=new_pname, + value=curr_peak.value - prev_peak.value, + min=min_sep, + max=window_range[1] - window_range[0], + vary=True, + ) curr_peak.expr = '+'.join([prev_peak.name, new_pname]) prev_peak = curr_peak else: @@ -146,10 +157,12 @@ def erfc(x): a1, a2, a3, a4, a5, p = c_erf # A&S formula 7.1.26 - t = 1.0/(1.0 + p*x) - y = 1. - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x) - erf = sign*y # erf(-x) = -erf(x) - return 1. - erf + t = 1.0 / (1.0 + p * x) + y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp( + -x * x + ) + erf = sign * y # erf(-x) = -erf(x) + return 1.0 - erf """ @@ -164,10 +177,10 @@ def erfc(x): def exp1exp_under1(x): f = np.zeros(x.shape).astype(np.complex128) for i in range(6): - xx = x**(i+1) - f += c_coeff_exp1exp[i]*xx + xx = x ** (i + 1) + f += c_coeff_exp1exp[i] * xx - return (f - np.log(x) - np.euler_gamma)*np.exp(x) + return (f - np.log(x) - np.euler_gamma) * np.exp(x) """ @@ -185,21 +198,21 @@ def exp1exp_over1(x): den = np.zeros(x.shape).astype(np.complex128) for i in range(11): - p = 10-i + p = 10 - i if p != 0: xx = x**p - num += cnum_exp1exp[i]*xx - den += cden_exp1exp[i]*xx + num += cnum_exp1exp[i] * xx + den += cden_exp1exp[i] * xx else: num += cnum_exp1exp[i] den += cden_exp1exp[i] - return (num/den)*(1./x) + return (num / den) * (1.0 / x) @njit(cache=True, nogil=True) def exp1exp(x): - mask = np.sign(x.real)*np.abs(x) > 1. + mask = np.sign(x.real) * np.abs(x) > 1.0 f = np.zeros(x.shape).astype(np.complex128) f[mask] = exp1exp_over1(x[mask]) @@ -211,13 +224,13 @@ def exp1exp(x): @njit(cache=True, nogil=True) def _calc_alpha(alpha, x0): a0, a1 = alpha - return (a0 + a1*np.tan(np.radians(0.5*x0))) + return a0 + a1 * np.tan(np.radians(0.5 * x0)) @njit(cache=True, nogil=True) def _calc_beta(beta, x0): b0, b1 = beta - return b0 + b1*np.tan(np.radians(0.5*x0)) + return b0 + b1 * np.tan(np.radians(0.5 * x0)) @njit(cache=True, nogil=True) @@ -232,20 +245,25 @@ def _mixing_factor_pv(fwhm_g, fwhm_l): @DETAILS: calculates the mixing factor eta to best approximate voight peak shapes """ - fwhm = fwhm_g**5 + 2.69269 * fwhm_g**4 * fwhm_l + \ - 2.42843 * fwhm_g**3 * fwhm_l**2 + \ - 4.47163 * fwhm_g**2 * fwhm_l**3 +\ - 0.07842 * fwhm_g * fwhm_l**4 +\ - fwhm_l**5 + fwhm = ( + fwhm_g**5 + + 2.69269 * fwhm_g**4 * fwhm_l + + 2.42843 * fwhm_g**3 * fwhm_l**2 + + 4.47163 * fwhm_g**2 * fwhm_l**3 + + 0.07842 * fwhm_g * fwhm_l**4 + + fwhm_l**5 + ) fwhm = fwhm**0.20 - eta = 1.36603 * (fwhm_l/fwhm) - \ - 0.47719 * (fwhm_l/fwhm)**2 + \ - 0.11116 * (fwhm_l/fwhm)**3 - if eta < 0.: - eta = 0. - elif eta > 1.: - eta = 1. + eta = ( + 1.36603 * (fwhm_l / fwhm) + - 0.47719 * (fwhm_l / fwhm) ** 2 + + 0.11116 * (fwhm_l / fwhm) ** 3 + ) + if eta < 0.0: + eta = 0.0 + elif eta > 1.0: + eta = 1.0 return eta, fwhm @@ -269,15 +287,15 @@ def _gaussian_pink_beam(p, x): del_tth = x - x0 sigsqr = fwhm_g**2 - f1 = alpha*sigsqr + 2.0*del_tth - f2 = beta*sigsqr - 2.0*del_tth - f3 = np.sqrt(2.0)*fwhm_g + f1 = alpha * sigsqr + 2.0 * del_tth + f2 = beta * sigsqr - 2.0 * del_tth + f3 = np.sqrt(2.0) * fwhm_g - u = 0.5*alpha*f1 - v = 0.5*beta*f2 + u = 0.5 * alpha * f1 + v = 0.5 * beta * f2 - y = (f1-del_tth)/f3 - z = (f2+del_tth)/f3 + y = (f1 - del_tth) / f3 + z = (f2 + del_tth) / f3 t1 = erfc(y) t2 = erfc(z) @@ -285,12 +303,12 @@ def _gaussian_pink_beam(p, x): g = np.zeros(x.shape) zmask = np.abs(del_tth) > 5.0 - g[~zmask] = \ - (0.5*(alpha*beta)/(alpha + beta)) * np.exp(u[~zmask])*t1[~zmask] \ - + np.exp(v[~zmask])*t2[~zmask] + g[~zmask] = (0.5 * (alpha * beta) / (alpha + beta)) * np.exp( + u[~zmask] + ) * t1[~zmask] + np.exp(v[~zmask]) * t2[~zmask] mask = np.isnan(g) - g[mask] = 0. + g[mask] = 0.0 g *= A return g @@ -314,28 +332,30 @@ def _lorentzian_pink_beam(p, x): del_tth = x - x0 - p = -alpha*del_tth + 1j*0.5*alpha*fwhm_l - q = -beta*del_tth + 1j*0.5*beta*fwhm_l + p = -alpha * del_tth + 1j * 0.5 * alpha * fwhm_l + q = -beta * del_tth + 1j * 0.5 * beta * fwhm_l y = np.zeros(x.shape) f1 = exp1exp(p) f2 = exp1exp(q) - y = -(alpha*beta)/(np.pi*(alpha + beta))*(f1 + f2).imag + y = -(alpha * beta) / (np.pi * (alpha + beta)) * (f1 + f2).imag mask = np.isnan(y) - y[mask] = 0. + y[mask] = 0.0 y *= A return y + # ============================================================================= # pseudo-Voigt # ============================================================================= -def fit_ring(tth_centers, lineout, tth_pred, spectrum_kwargs, - int_cutoff, fit_tth_tol): +def fit_ring( + tth_centers, lineout, tth_pred, spectrum_kwargs, int_cutoff, fit_tth_tol +): # tth_centers and tth_pred should be in degrees. # The returned tth_meas is in degrees as well. @@ -347,26 +367,26 @@ def fit_ring(tth_centers, lineout, tth_pred, spectrum_kwargs, npeaks = len(tth_pred) # spectrum fitting - sm = SpectrumModel( - spec_data, tth_pred, - **spectrum_kwargs - ) + sm = SpectrumModel(spec_data, tth_pred, **spectrum_kwargs) fit_results = sm.fit() if not fit_results.success: return - fit_params = np.vstack([ - (fit_results.best_values['pk%d_amp' % i], - fit_results.best_values['pk%d_cen' % i]) - for i in range(npeaks) - ]).T + fit_params = np.vstack( + [ + ( + fit_results.best_values['pk%d_amp' % i], + fit_results.best_values['pk%d_cen' % i], + ) + for i in range(npeaks) + ] + ).T pk_amp, tth_meas = fit_params # !!! this is where we can kick out bunk fits - center_err = 100*abs(tth_meas/tth_pred - 1.) + center_err = 100 * abs(tth_meas / tth_pred - 1.0) failed_fit_heuristic = np.logical_or( - pk_amp < int_cutoff, - center_err > fit_tth_tol + pk_amp < int_cutoff, center_err > fit_tth_tol ) if np.any(failed_fit_heuristic): return diff --git a/hexrd/core/gridutil.py b/hexrd/core/gridutil.py index 3455400f5..3ff8f1d8c 100644 --- a/hexrd/core/gridutil.py +++ b/hexrd/core/gridutil.py @@ -32,7 +32,6 @@ from hexrd.core.constants import sqrt_epsf - def cellIndices(edges, points_1d): """ get indices in a 1-d regular grid. @@ -95,13 +94,13 @@ def cellIndices(edges, points_1d): def _fill_connectivity(out, m, n, p): i_con = 0 for k in range(p): - extra = k*(n+1)*(m+1) + extra = k * (n + 1) * (m + 1) for j in range(m): for i in range(n): - out[i_con, 0] = i + j*(n + 1) + 1 + extra - out[i_con, 1] = i + j*(n + 1) + extra - out[i_con, 2] = i + j + n*(j+1) + 1 + extra - out[i_con, 3] = i + j + n*(j+1) + 2 + extra + out[i_con, 0] = i + j * (n + 1) + 1 + extra + out[i_con, 1] = i + j * (n + 1) + extra + out[i_con, 2] = i + j + n * (j + 1) + 1 + extra + out[i_con, 3] = i + j + n * (j + 1) + 2 + extra i_con += 1 @@ -113,14 +112,14 @@ def cellConnectivity(m, n, p=1, origin='ul'): choice will affect handedness (cw or ccw) """ - nele = p*m*n + nele = p * m * n con = np.empty((nele, 4), dtype=int) _fill_connectivity(con, m, n, p) if p > 1: - nele = m*n*(p-1) - tmp_con3 = con.reshape((p, m*n, 4)) + nele = m * n * (p - 1) + tmp_con3 = con.reshape((p, m * n, 4)) hex_con = [] for layer in range(p - 1): hex_con.append(np.hstack([tmp_con3[layer], tmp_con3[layer + 1]])) @@ -135,7 +134,7 @@ def cellCentroids(crd, con): nele, conn_count = con.shape dim = crd.shape[1] out = np.empty((nele, dim)) - inv_conn = 1.0/conn_count + inv_conn = 1.0 / conn_count for i in range(nele): for j in range(dim): acc = 0.0 @@ -151,13 +150,13 @@ def compute_areas(xy_eval_vtx, conn): for i in range(len(conn)): vtx0x, vtx0y = xy_eval_vtx[conn[i, 0]] vtx1x, vtx1y = xy_eval_vtx[conn[i, 1]] - v0x, v0y = vtx1x-vtx0x, vtx1y-vtx0y + v0x, v0y = vtx1x - vtx0x, vtx1y - vtx0y acc = 0 for j in range(2, 4): vtx_x, vtx_y = xy_eval_vtx[conn[i, j]] v1x = vtx_x - vtx0x v1y = vtx_y - vtx0y - acc += v0x*v1y - v1x*v0y + acc += v0x * v1y - v1x * v0y areas[i] = 0.5 * acc return areas @@ -175,24 +174,32 @@ def computeArea(polygon): area = 0 for [s1, s2] in triv: tvp = np.diff( - np.hstack([polygon[s1, :], - polygon[s2, :]]), axis=0).flatten() + np.hstack([polygon[s1, :], polygon[s2, :]]), axis=0 + ).flatten() area += 0.5 * np.cross(tvp[:2], tvp[2:]) return area -def make_tolerance_grid(bin_width, window_width, num_subdivisions, - adjust_window=False, one_sided=False): +def make_tolerance_grid( + bin_width, + window_width, + num_subdivisions, + adjust_window=False, + one_sided=False, +): bin_width = min(bin_width, window_width) if adjust_window: - window_width = np.ceil(window_width/bin_width)*bin_width + window_width = np.ceil(window_width / bin_width) * bin_width if one_sided: - ndiv = abs(int(window_width/bin_width)) - grid = (np.arange(0, 2*ndiv+1) - ndiv)*bin_width + ndiv = abs(int(window_width / bin_width)) + grid = (np.arange(0, 2 * ndiv + 1) - ndiv) * bin_width ndiv *= 2 else: - ndiv = int(num_subdivisions*np.ceil(window_width/float(bin_width))) - grid = np.arange(0, ndiv+1)*window_width/float(ndiv) - 0.5*window_width + ndiv = int(num_subdivisions * np.ceil(window_width / float(bin_width))) + grid = ( + np.arange(0, ndiv + 1) * window_width / float(ndiv) + - 0.5 * window_width + ) return ndiv, grid @@ -217,15 +224,15 @@ def computeIntersection(line1, line2): [x3, y3] = line2[0] [x4, y4] = line2[1] - denom = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4) + denom = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) if denom == 0: return [] - subterm1 = x1*y2 - y1*x2 - subterm2 = x3*y4 - y3*x4 + subterm1 = x1 * y2 - y1 * x2 + subterm2 = x3 * y4 - y3 * x4 - intersection[0] = (subterm1*(x3-x4) - subterm2*(x1-x2)) / denom - intersection[1] = (subterm1*(y3-y4) - subterm2*(y1-y2)) / denom + intersection[0] = (subterm1 * (x3 - x4) - subterm2 * (x1 - x2)) / denom + intersection[1] = (subterm1 * (y3 - y4) - subterm2 * (y1 - y2)) / denom return intersection @@ -233,8 +240,8 @@ def isinside(point, boundary, ccw=True): """ Assumes CCW boundary ordering """ - pointPositionVector = np.hstack([point - boundary[0, :], 0.]) - boundaryVector = np.hstack([boundary[1, :] - boundary[0, :], 0.]) + pointPositionVector = np.hstack([point - boundary[0, :], 0.0]) + boundaryVector = np.hstack([boundary[1, :] - boundary[0, :], 0.0]) crossVector = np.cross(pointPositionVector, boundaryVector) @@ -268,10 +275,7 @@ def sutherlandHodgman(subjectPolygon, clipPolygon): curr_clipVertex = clipPolygon[iClip, :] - clipBoundary = np.vstack( - [curr_clipVertex, - prev_clipVertex] - ) + clipBoundary = np.vstack([curr_clipVertex, prev_clipVertex]) inputList = np.array(outputList) if len(inputList) > 0: diff --git a/hexrd/core/imageseries/__init__.py b/hexrd/core/imageseries/__init__.py index 68dede415..143945e75 100644 --- a/hexrd/core/imageseries/__init__.py +++ b/hexrd/core/imageseries/__init__.py @@ -4,6 +4,7 @@ and a function for loading. Adapters for particular data formats are managed in the "load" subpackage. """ + from .baseclass import ImageSeries from . import imageseriesabc from . import load @@ -12,6 +13,7 @@ from . import process from . import omega + def open(filename, format=None, **kwargs): # find the appropriate adapter based on format specified reg = load.Registry.adapter_registry @@ -21,4 +23,5 @@ def open(filename, format=None, **kwargs): raise RuntimeError("zero length imageseries") return ims + write = save.write diff --git a/hexrd/core/imageseries/baseclass.py b/hexrd/core/imageseries/baseclass.py index 8d0181d5e..775dc655f 100644 --- a/hexrd/core/imageseries/baseclass.py +++ b/hexrd/core/imageseries/baseclass.py @@ -1,5 +1,5 @@ -"""Base class for imageseries -""" +"""Base class for imageseries""" + import numpy as np from .imageseriesabc import ImageSeriesABC, RegionType diff --git a/hexrd/core/imageseries/imageseriesabc.py b/hexrd/core/imageseries/imageseriesabc.py index 65ef7771b..7864b3476 100644 --- a/hexrd/core/imageseries/imageseriesabc.py +++ b/hexrd/core/imageseries/imageseriesabc.py @@ -1,4 +1,5 @@ """Abstract Base Class""" + import collections.abc # Type for extracting regions diff --git a/hexrd/core/imageseries/imageseriesiter.py b/hexrd/core/imageseries/imageseriesiter.py index 9d4855981..868daa90c 100644 --- a/hexrd/core/imageseries/imageseriesiter.py +++ b/hexrd/core/imageseries/imageseriesiter.py @@ -2,6 +2,7 @@ For use by adapter classes. """ + import collections.abc diff --git a/hexrd/core/imageseries/load/__init__.py b/hexrd/core/imageseries/load/__init__.py index e03a1e717..d5d3d7721 100644 --- a/hexrd/core/imageseries/load/__init__.py +++ b/hexrd/core/imageseries/load/__init__.py @@ -7,26 +7,38 @@ # Metaclass for adapter registry + class _RegisterAdapterClass(abc.ABCMeta): def __init__(cls, name, bases, attrs): abc.ABCMeta.__init__(cls, name, bases, attrs) Registry.register(cls) + class ImageSeriesAdapter(ImageSeriesABC, metaclass=_RegisterAdapterClass): format = None def get_region(self, frame_idx: int, region: RegionType) -> np.ndarray: r = region - return self[frame_idx][r[0][0]:r[0][1], r[1][0]:r[1][1]] + return self[frame_idx][r[0][0] : r[0][1], r[1][0] : r[1][1]] def __getitem__(self, _): pass + # import all adapter modules -from . import array, framecache, function, hdf5, imagefiles, rawimage, metadata, trivial +from . import ( + array, + framecache, + function, + hdf5, + imagefiles, + rawimage, + metadata, + trivial, +) try: from dectris.compression import decompress @@ -38,9 +50,9 @@ def __getitem__(self, _): from . import eiger_stream_v1 -#for loader, name, ispkg in pkgutil.iter_modules(__path__): +# for loader, name, ispkg in pkgutil.iter_modules(__path__): # if name is not 'registry': # __import__(name, globals=globals()) - # - # couldn't get the following line to work due to relative import issue: - # loader.find_module(name).load_module(name) +# +# couldn't get the following line to work due to relative import issue: +# loader.find_module(name).load_module(name) diff --git a/hexrd/core/imageseries/load/array.py b/hexrd/core/imageseries/load/array.py index 2f199057d..213f365d0 100644 --- a/hexrd/core/imageseries/load/array.py +++ b/hexrd/core/imageseries/load/array.py @@ -1,5 +1,5 @@ -"""Adapter class for numpy array (3D) -""" +"""Adapter class for numpy array (3D)""" + from . import ImageSeriesAdapter from ..imageseriesiter import ImageSeriesIterator @@ -18,6 +18,7 @@ class ArrayImageSeriesAdapter(ImageSeriesAdapter): metadata: dict (optional) the metadata dictionary """ + format = 'array' def __init__(self, fname, **kwargs): @@ -28,9 +29,9 @@ def __init__(self, fname, **kwargs): self._data = data_arr else: raise RuntimeError( - 'input array must be 2-d or 3-d; you provided ndim=%d' - % data_arr.ndim - ) + 'input array must be 2-d or 3-d; you provided ndim=%d' + % data_arr.ndim + ) self._meta = kwargs.pop('meta', dict()) self._shape = self._data.shape diff --git a/hexrd/core/imageseries/load/eiger_stream_v1.py b/hexrd/core/imageseries/load/eiger_stream_v1.py index d8222e9a7..8b6c0e883 100644 --- a/hexrd/core/imageseries/load/eiger_stream_v1.py +++ b/hexrd/core/imageseries/load/eiger_stream_v1.py @@ -1,5 +1,5 @@ -"""HDF5 adapter class -""" +"""HDF5 adapter class""" + import warnings from dectris.compression import decompress @@ -133,6 +133,7 @@ def _decompress_frame(d: dict) -> np.ndarray: if compression_type is None: return np.frombuffer(data, dtype=dtype).reshape(shape) - decompressed_bytes = decompress(data, compression_type, - elem_size=elem_size) + decompressed_bytes = decompress( + data, compression_type, elem_size=elem_size + ) return np.frombuffer(decompressed_bytes, dtype=dtype).reshape(shape) diff --git a/hexrd/core/imageseries/load/framecache.py b/hexrd/core/imageseries/load/framecache.py index 485d0770e..775a2ad99 100644 --- a/hexrd/core/imageseries/load/framecache.py +++ b/hexrd/core/imageseries/load/framecache.py @@ -1,5 +1,5 @@ -"""Adapter class for frame caches -""" +"""Adapter class for frame caches""" + import os from threading import Lock @@ -49,9 +49,11 @@ def __init__(self, fname, style='npz', **kwargs): self._from_yml = False self._load_cache() else: - raise TypeError(f"Unknown style format for loading data: {style}." - "Known style formats: 'npz', 'fch5' 'yml', ", - "'yaml', 'test'") + raise TypeError( + f"Unknown style format for loading data: {style}." + "Known style formats: 'npz', 'fch5' 'yml', ", + "'yaml', 'test'", + ) def _load_yml(self): with open(self._fname, "r") as f: @@ -72,13 +74,16 @@ def _load_cache(self): def _load_cache_fch5(self): with h5py.File(self._fname, "r") as file: if 'HEXRD_FRAMECACHE_VERSION' not in file.attrs.keys(): - raise NotImplementedError("Unsupported file. " - "HEXRD_FRAMECACHE_VERSION " - "is missing!") + raise NotImplementedError( + "Unsupported file. " + "HEXRD_FRAMECACHE_VERSION " + "is missing!" + ) version = file.attrs.get('HEXRD_FRAMECACHE_VERSION', 0) if version != 1: - raise NotImplementedError("Framecache version is not " - f"supported: {version}") + raise NotImplementedError( + "Framecache version is not " f"supported: {version}" + ) self._shape = file["shape"][()] self._nframes = file["nframes"][()] @@ -134,14 +139,16 @@ def _load_framelist_fch5(self): indices = file["indices"] def read_list_arrays_method_thread(i): - frame_data = data[frame_id[2*i]: frame_id[2*i+1]] - frame_indices = indices[frame_id[2*i]: frame_id[2*i+1]] + frame_data = data[frame_id[2 * i] : frame_id[2 * i + 1]] + frame_indices = indices[frame_id[2 * i] : frame_id[2 * i + 1]] row = frame_indices[:, 0] col = frame_indices[:, 1] mat_data = frame_data[:, 0] - frame = csr_matrix((mat_data, (row, col)), - shape=self._shape, - dtype=self._dtype) + frame = csr_matrix( + (mat_data, (row, col)), + shape=self._shape, + dtype=self._dtype, + ) self._framelist[i] = frame return @@ -152,8 +159,11 @@ def read_list_arrays_method_thread(i): # Evaluate the results via `list()`, so that if an exception is # raised in a thread, it will be re-raised and visible to the # user. - list(executor.map(read_list_arrays_method_thread, - range(self._nframes))) + list( + executor.map( + read_list_arrays_method_thread, range(self._nframes) + ) + ) def _load_framelist_npz(self): self._framelist = [] @@ -171,21 +181,20 @@ def _load_framelist_npz(self): row = arrs[f"{i}_row"] col = arrs[f"{i}_col"] data = arrs[f"{i}_data"] - frame = csr_matrix((data, (row, col)), - shape=self._shape, - dtype=self._dtype) + frame = csr_matrix( + (data, (row, col)), shape=self._shape, dtype=self._dtype + ) self._framelist.append(frame) def get_region(self, frame_idx: int, region: RegionType) -> np.ndarray: self._load_framelist_if_needed() csr_frame = self._framelist[frame_idx] r = region - return csr_frame[r[0][0]:r[0][1], r[1][0]:r[1][1]].toarray() + return csr_frame[r[0][0] : r[0][1], r[1][0] : r[1][1]].toarray() @property def metadata(self): - """(read-only) Image sequence metadata - """ + """(read-only) Image sequence metadata""" return self._meta def load_metadata(self, indict): diff --git a/hexrd/core/imageseries/load/function.py b/hexrd/core/imageseries/load/function.py index d06f01b63..499c11190 100644 --- a/hexrd/core/imageseries/load/function.py +++ b/hexrd/core/imageseries/load/function.py @@ -1,6 +1,7 @@ """Adapter class for a custom function that takes an int as an argument and returns a 2D numpy array. """ + from . import ImageSeriesAdapter from ..imageseriesiter import ImageSeriesIterator @@ -26,6 +27,7 @@ class FunctionImageSeriesAdapter(ImageSeriesAdapter): metadata: dict (optional) the metadata dictionary """ + format = 'function' def __init__(self, fname, **kwargs): diff --git a/hexrd/core/imageseries/load/hdf5.py b/hexrd/core/imageseries/load/hdf5.py index d5efde499..4f7bb98a4 100644 --- a/hexrd/core/imageseries/load/hdf5.py +++ b/hexrd/core/imageseries/load/hdf5.py @@ -1,5 +1,5 @@ -"""HDF5 adapter class -""" +"""HDF5 adapter class""" + import h5py import warnings @@ -53,7 +53,7 @@ def __del__(self): # an issue arises at some point try: self.close() - except(Exception): + except Exception: warnings.warn("HDF5ImageSeries could not close h5 file") def __getitem__(self, key): @@ -69,7 +69,9 @@ def __getitem__(self, key): def get_region(self, frame_idx: int, region: RegionType) -> np.ndarray: r = region - return self.__image_dataset[frame_idx][r[0][0]:r[0][1], r[1][0]:r[1][1]] + return self.__image_dataset[frame_idx][ + r[0][0] : r[0][1], r[1][0] : r[1][1] + ] def __iter__(self): return ImageSeriesIterator(self) diff --git a/hexrd/core/imageseries/load/imagefiles.py b/hexrd/core/imageseries/load/imagefiles.py index 532d2c006..ee77c5db8 100644 --- a/hexrd/core/imageseries/load/imagefiles.py +++ b/hexrd/core/imageseries/load/imagefiles.py @@ -1,9 +1,8 @@ -"""Adapter class for list of image files -""" - +"""Adapter class for list of image files""" # import sys import os + # import logging import glob @@ -58,7 +57,7 @@ def __getitem__(self, key): # !!! handled in self._process_files try: dinfo = np.iinfo(self._dtype) - except(ValueError): + except ValueError: dinfo = np.finfo(self._dtype) if np.max(data) > dinfo.max: raise RuntimeError("specified dtype will truncate image") @@ -77,8 +76,14 @@ def __str__(self): dtype: %s shape: %s single frames: %s - """ % (self.fabioclass, len(self._files), len(self), - self.dtype, self.shape, self.singleframes) + """ % ( + self.fabioclass, + len(self._files), + len(self), + self.dtype, + self.shape, + self.singleframes, + ) return s @property @@ -114,12 +119,15 @@ def _load_yml(self): self._files.sort() self.optsd = d['options'] if 'options' else None self._empty = self.optsd[EMPTY] if EMPTY in self.optsd else 0 - self._maxframes_tot = self.optsd[MAXTOTF] \ - if MAXTOTF in self.optsd else 0 - self._maxframes_file = self.optsd[MAXFILF] \ - if MAXFILF in self.optsd else 0 - self._dtype = np.dtype(self.optsd[DTYPE]) \ - if DTYPE in self.optsd else None + self._maxframes_tot = ( + self.optsd[MAXTOTF] if MAXTOTF in self.optsd else 0 + ) + self._maxframes_file = ( + self.optsd[MAXFILF] if MAXFILF in self.optsd else 0 + ) + self._dtype = ( + np.dtype(self.optsd[DTYPE]) if DTYPE in self.optsd else None + ) self._meta = yamlmeta(d['meta']) # , path=imgsd) @@ -134,17 +142,19 @@ def _process_files(self): for imgf in self._files: info = FileInfo(imgf, **kw) infolist.append(info) - shp = self._checkvalue(shp, info.shape, - "inconsistent image shapes") + shp = self._checkvalue( + shp, info.shape, "inconsistent image shapes" + ) if self._dtype is not None: dtp = self._dtype else: dtp = self._checkvalue( - dtp, info.dtype, - "inconsistent image dtypes") - fcl = self._checkvalue(fcl, info.fabioclass, - "inconsistent image types") + dtp, info.dtype, "inconsistent image dtypes" + ) + fcl = self._checkvalue( + fcl, info.fabioclass, "inconsistent image types" + ) nf += info.nframes if info.nframes > 1: self._singleframes = False @@ -261,8 +271,13 @@ def __str__(self): fabio class: %s frames: %s dtype: %s - shape: %s\n""" % (self.filename, self.fabioclass, - self.nframes, self.dtype, self.shape) + shape: %s\n""" % ( + self.filename, + self.fabioclass, + self.nframes, + self.dtype, + self.shape, + ) return s @@ -308,4 +323,4 @@ def _process_gel_data(array): """Convert a gel data array to regular image data""" # An inversion seems to be necessary for our examples array = np.invert(array) - return array.astype(np.float64)**2 * GEL_SCALE_FACTOR + return array.astype(np.float64) ** 2 * GEL_SCALE_FACTOR diff --git a/hexrd/core/imageseries/load/metadata.py b/hexrd/core/imageseries/load/metadata.py index 882bebd95..edb0ca211 100644 --- a/hexrd/core/imageseries/load/metadata.py +++ b/hexrd/core/imageseries/load/metadata.py @@ -1,23 +1,25 @@ """metadata tools for imageseries""" + import os import yaml import numpy as np + def yamlmeta(meta, path=None): - """ Image sequence metadata + """Image sequence metadata - *path* is a full path or directory used to find the relative location - of files loaded via the trigger mechanism + *path* is a full path or directory used to find the relative location + of files loaded via the trigger mechanism -The usual yaml dictionary is returned with the exception that -if the first word of a multiword string is an exclamation mark ("!"), -it will trigger further processing determined by the rest of the string. -Currently only one trigger is used: + The usual yaml dictionary is returned with the exception that + if the first word of a multiword string is an exclamation mark ("!"), + it will trigger further processing determined by the rest of the string. + Currently only one trigger is used: -! load-numpy-object - the returned value will the numpy object read from the file -""" + ! load-numpy-object + the returned value will the numpy object read from the file + """ if path is not None: path = os.path.dirname(path) else: @@ -31,7 +33,7 @@ def yamlmeta(meta, path=None): words = v.split() istrigger = (words[0] == "!") and (len(words) > 1) - if v == '++np.array': # old way used in frame-cache (obsolescent) + if v == '++np.array': # old way used in frame-cache (obsolescent) newk = k + '-array' metad[k] = np.array(meta.pop(newk)) metad.pop(newk, None) diff --git a/hexrd/core/imageseries/load/rawimage.py b/hexrd/core/imageseries/load/rawimage.py index a272ea1b8..fc0f84bff 100644 --- a/hexrd/core/imageseries/load/rawimage.py +++ b/hexrd/core/imageseries/load/rawimage.py @@ -1,4 +1,5 @@ -""" Adapter class for raw image reader""" +"""Adapter class for raw image reader""" + import os import threading @@ -86,13 +87,13 @@ def typechars(numtype, bytes_=4, signed=False, little=True): 1: "b", 2: "h", 4: "i", - 8: "l" + 8: "l", } typechar = { "f": "f", "d": "d", - "b": "?" + "b": "?", } if numtype == "i": @@ -102,7 +103,7 @@ def typechars(numtype, bytes_=4, signed=False, little=True): else: char = typechar[numtype] - return "<"+char if little else ">"+char + return "<" + char if little else ">" + char def __len__(self): return self._len diff --git a/hexrd/core/imageseries/load/registry.py b/hexrd/core/imageseries/load/registry.py index f87315f45..070c426cd 100644 --- a/hexrd/core/imageseries/load/registry.py +++ b/hexrd/core/imageseries/load/registry.py @@ -1,7 +1,9 @@ -"""Adapter registry -""" +"""Adapter registry""" + + class Registry(object): """Registry for imageseries adapters""" + adapter_registry = dict() @classmethod diff --git a/hexrd/core/imageseries/load/trivial.py b/hexrd/core/imageseries/load/trivial.py index c0d189c83..e1b3162fe 100644 --- a/hexrd/core/imageseries/load/trivial.py +++ b/hexrd/core/imageseries/load/trivial.py @@ -1,6 +1,8 @@ """Trivial adapter: just for testing""" + from . import ImageSeriesAdapter + class TrivialAdapter(ImageSeriesAdapter): def __init__(self, fname): diff --git a/hexrd/core/imageseries/omega.py b/hexrd/core/imageseries/omega.py index f0a61ef2d..0db39d1c6 100644 --- a/hexrd/core/imageseries/omega.py +++ b/hexrd/core/imageseries/omega.py @@ -2,14 +2,17 @@ * OmegaWedges class specifies omega metadata in wedges """ + import numpy as np from .baseclass import ImageSeries OMEGA_KEY = 'omega' + class OmegaImageSeries(ImageSeries): """ImageSeries with omega metadata""" + DFLT_TOL = 1.0e-6 TAU = 360 @@ -40,8 +43,8 @@ def _make_wedges(self, tol=DFLT_TOL): if delta <= 0: raise OmegaSeriesError('omega array must be increasing') # check whether delta changes or ranges not contiguous - d = om[f,1] - om[f,0] - if (np.abs(d - delta) > tol) or (np.abs(om[f,0] - omlast) > tol): + d = om[f, 1] - om[f, 0] + if (np.abs(d - delta) > tol) or (np.abs(om[f, 0] - omlast) > tol): starts.append(f) delta = d omlast = om[f, 1] @@ -55,15 +58,15 @@ def _make_wedges(self, tol=DFLT_TOL): for s in range(nw): ostart = om[starts[s], 0] ostop = om[starts[s + 1] - 1, 1] - steps = starts[s+1] - starts[s] + steps = starts[s + 1] - starts[s] self._omegawedges.addwedge(ostart, ostop, steps) # - delta = (ostop - ostart)/steps + delta = (ostop - ostart) / steps self._wedge_om[s, :] = (ostart, ostop, delta) self._wedge_f[s, 0] = nf0 self._wedge_f[s, 1] = steps nf0 += steps - assert(nf0 == nf) + assert nf0 == nf @property def omega(self): @@ -83,7 +86,7 @@ def nwedges(self): def wedge(self, i): """return i'th wedge as a dictionary""" d = self.omegawedges.wedges[i] - delta = (d['ostop'] - d['ostart'])/d['nsteps'] + delta = (d['ostop'] - d['ostart']) / d['nsteps'] d.update(delta=delta) return d @@ -97,7 +100,9 @@ def omega_to_frame(self, om): omcheck = omin + np.mod(om - omin, self.TAU) if omcheck < omax: odel = self._wedge_om[i, 2] - f = self._wedge_f[i,0] + int(np.floor((omcheck - omin)/odel)) + f = self._wedge_f[i, 0] + int( + np.floor((omcheck - omin) / odel) + ) w = i break @@ -115,7 +120,7 @@ def omegarange_to_frames(self, omin, omax): # if same wedge, require frames be increasing if (w0 == w1) and (f1 > f0): - return list(range(f0, f1+1)) + return list(range(f0, f1 + 1)) # case: adjacent wedges with 2pi jump in omega w0max = self._wedge_om[w0, 1] @@ -137,9 +142,11 @@ class OmegaWedges(object): nframes: int number of frames in imageseries """ + def __init__(self, nframes): self.nframes = nframes self._wedges = [] + # # ============================== API # @@ -147,8 +154,10 @@ def __init__(self, nframes): def omegas(self): """n x 2 array of omega values, one per frame""" if self.nframes != self.wframes: - msg = "number of frames (%s) does not match "\ - "number of wedge frames (%s)" %(self.nframes, self.wframes) + msg = ( + "number of frames (%s) does not match " + "number of wedge frames (%s)" % (self.nframes, self.wframes) + ) raise OmegaSeriesError(msg) oa = np.zeros((self.nframes, 2)) @@ -217,5 +226,6 @@ def save_omegas(self, fname): class OmegaSeriesError(Exception): def __init__(self, value): self.value = value + def __str__(self): return repr(self.value) diff --git a/hexrd/core/imageseries/process.py b/hexrd/core/imageseries/process.py index 99924a2a9..aee16f270 100644 --- a/hexrd/core/imageseries/process.py +++ b/hexrd/core/imageseries/process.py @@ -1,4 +1,5 @@ """Class for processing individual frames""" + import copy import numpy as np @@ -20,6 +21,7 @@ class ProcessedImageSeries(ImageSeries): frame_list: list of ints or None, default = None specify subset of frames by list; if None, then all frames are used """ + FLIP = 'flip' DARK = 'dark' RECT = 'rectangle' @@ -32,7 +34,7 @@ def __init__(self, imser, oplist, **kwargs): self._meta = copy.deepcopy(imser.metadata) self._oplist = oplist self._frames = kwargs.pop('frame_list', None) - self._hasframelist = (self._frames is not None) + self._hasframelist = self._frames is not None if self._hasframelist: self._update_omega() self._opdict = {} @@ -56,16 +58,16 @@ def __iter__(self): return (self[i] for i in range(len(self))) def _process_frame(self, key): - # note: key refers to original imageseries + # note: key refers to original imageseries oplist = self.oplist # when rectangle is the first operation we can try to call the # optimized version. If the adapter provides one it should be # significantly faster if not it will fallback to the same # implementation that _rectangle provides. - if oplist and oplist[0][0] == self.RECT: + if oplist and oplist[0][0] == self.RECT: region = oplist[0][1] - img = self._rectangle_optimized(key,region) + img = self._rectangle_optimized(key, region) # remove the first operation since we already used it oplist = oplist[1:] @@ -100,7 +102,7 @@ def _rectangle_optimized(self, img_key, r): def _rectangle(self, img, r): # restrict to rectangle - return img[r[0][0]:r[0][1], r[1][0]:r[1][1]] + return img[r[0][0] : r[0][1], r[1][0] : r[1][1]] def _flip(self, img, flip): if flip in ('y', 'v'): # about y-axis (vertical) @@ -134,6 +136,7 @@ def _update_omega(self): if "omega" in self.metadata: omega = self.metadata["omega"] self.metadata["omega"] = omega[self._frames] + # # ==================== API # diff --git a/hexrd/core/imageseries/stats.py b/hexrd/core/imageseries/stats.py index 6f5f7eef3..d39ddab5e 100644 --- a/hexrd/core/imageseries/stats.py +++ b/hexrd/core/imageseries/stats.py @@ -23,13 +23,14 @@ * Perhaps we should rename min -> minimum and max -> maximum to avoid conflicting with the python built-ins """ + import numpy as np from psutil import virtual_memory # Default Buffer Size: half of available memory vmem = virtual_memory() -STATS_BUFFER = int(0.5*vmem.available) +STATS_BUFFER = int(0.5 * vmem.available) del vmem @@ -105,7 +106,7 @@ def average_iter(ims, nchunk, nframes=0): """average over frames Note: average returns a float even if images are uint -""" + """ nf = _nframes(ims, nframes) stops = _chunk_stops(nf, nchunk) s0, stop = 0, stops[0] @@ -135,7 +136,7 @@ def percentile(ims, pctl, nframes=0): return np.percentile(_toarray(ims, nf), pctl, axis=0).astype(np.float32) -def percentile_iter(ims, pctl, nchunks, nframes=0, use_buffer=True): +def percentile_iter(ims, pctl, nchunks, nframes=0, use_buffer=True): """iterator for percentile function""" nf = _nframes(ims, nframes) nr, nc = ims.shape @@ -146,8 +147,7 @@ def percentile_iter(ims, pctl, nchunks, nframes=0, use_buffer=True): for s in stops: r1 = s + 1 img[r0:r1] = np.percentile( - _toarray(ims, nf, rows=(r0, r1), buffer=buffer), - pctl, axis=0 + _toarray(ims, nf, rows=(r0, r1), buffer=buffer), pctl, axis=0 ) r0 = r1 yield img.astype(np.float32) @@ -176,12 +176,12 @@ def _chunk_stops(n, nchunks): n -- number of items to be chunked (e.g. frames/rows) nchunks -- number of chunks -""" + """ if nchunks > n: raise ValueError("number of chunks cannot exceed number of items") - csize = n//nchunks + csize = n // nchunks rem = n % nchunks - pieces = csize*np.ones(nchunks, dtype=int) + pieces = csize * np.ones(nchunks, dtype=int) pieces[:rem] += 1 pieces[0] += -1 @@ -231,8 +231,8 @@ def _toarray(ims, nframes, rows=None, buffer=None): def _alloc_buffer(ims, nf): """Allocate buffer to save as many full frames as possible""" shp, dt = ims.shape, ims.dtype - framesize = shp[0]*shp[1]*dt.itemsize - nf = np.minimum(nf, np.floor(STATS_BUFFER/framesize).astype(int)) + framesize = shp[0] * shp[1] * dt.itemsize + nf = np.minimum(nf, np.floor(STATS_BUFFER / framesize).astype(int)) bshp = (nf,) + shp return np.empty(bshp, dt) diff --git a/hexrd/core/imageutil.py b/hexrd/core/imageutil.py index 7d6e04691..e4592fce5 100644 --- a/hexrd/core/imageutil.py +++ b/hexrd/core/imageutil.py @@ -16,6 +16,7 @@ # BACKGROUND REMOVAL # ============================================================================= + def _scale_image_snip(y, offset, invert=False): """ Log-Log scale image for snip @@ -40,14 +41,13 @@ def _scale_image_snip(y, offset, invert=False): """ if invert: - return (np.exp(np.exp(y) - 1.) - 1.)**2 + offset + return (np.exp(np.exp(y) - 1.0) - 1.0) ** 2 + offset else: - return np.log(np.log(np.sqrt(y - offset) + 1.) + 1.) + return np.log(np.log(np.sqrt(y - offset) + 1.0) + 1.0) def fast_snip1d(y, w=4, numiter=2): - """ - """ + """ """ bkg = np.zeros_like(y) min_val = np.nanmin(y) zfull = _scale_image_snip(y, min_val, invert=False) @@ -55,7 +55,7 @@ def fast_snip1d(y, w=4, numiter=2): b = z for i in range(numiter): for p in range(w, 0, -1): - kernel = np.zeros(p*2 + 1) + kernel = np.zeros(p * 2 + 1) kernel[0] = 0.5 kernel[-1] = 0.5 b = np.minimum(b, signal.convolve(z, kernel, mode='same')) @@ -111,14 +111,18 @@ def _run_snip1d_row(task, numiter, w, min_val): b = z for i in range(numiter): for p in range(w, 0, -1): - kernel = np.zeros(p*2 + 1) - kernel[0] = kernel[-1] = 1./2. + kernel = np.zeros(p * 2 + 1) + kernel[0] = kernel[-1] = 1.0 / 2.0 b = np.minimum( b, convolution.convolve( - z, kernel, boundary='extend', mask=mask, - nan_treatment='interpolate', preserve_nan=True - ) + z, + kernel, + boundary='extend', + mask=mask, + nan_treatment='interpolate', + preserve_nan=True, + ), ) z = b return k, _scale_image_snip(b, min_val, invert=True) @@ -134,19 +138,21 @@ def snip1d_quad(y, w=4, numiter=2): N = p * 2 + 1 # linear kernel kern1 = np.zeros(N) - kern1[0] = kern1[-1] = 1./2. + kern1[0] = kern1[-1] = 1.0 / 2.0 # quadratic kernel kern2 = np.zeros(N) - kern2[0] = kern2[-1] = -1./6. - kern2[int(p/2.)] = kern2[int(3.*p/2.)] = 4./6. + kern2[0] = kern2[-1] = -1.0 / 6.0 + kern2[int(p / 2.0)] = kern2[int(3.0 * p / 2.0)] = 4.0 / 6.0 kernels.append([kern1, kern2]) z = b = _scale_image_snip(y, min_val, invert=False) for i in range(numiter): - for (kern1, kern2) in kernels: - c = np.maximum(ndimage.convolve1d(z, kern1, mode='nearest'), - ndimage.convolve1d(z, kern2, mode='nearest')) + for kern1, kern2 in kernels: + c = np.maximum( + ndimage.convolve1d(z, kern1, mode='nearest'), + ndimage.convolve1d(z, kern2, mode='nearest'), + ) b = np.minimum(b, c) z = b @@ -194,16 +200,16 @@ def snip2d(y, w=4, numiter=2, order=1): # linear filter kernel kern1 = np.zeros((N, N)) # initialize a kernel with all zeros xx, yy = np.indices(kern1.shape) # x-y indices of kernel points - ij = np.round( - np.hypot(xx - p1, yy - p1) - ) == p1 # select circular shape + ij = ( + np.round(np.hypot(xx - p1, yy - p1)) == p1 + ) # select circular shape kern1[ij] = 1 / ij.sum() # normalize so sum of kernel elements is 1 kernels.append([kern1]) if order >= 2: # add quadratic filter kernel p2 = p1 // 2 kern2 = np.zeros_like(kern1) - radii, norms = (p2, 2 * p2), (4/3, -1/3) + radii, norms = (p2, 2 * p2), (4 / 3, -1 / 3) for radius, norm in zip(radii, norms): ij = np.round(np.hypot(xx - p1, yy - p1)) == radius kern2[ij] = norm / ij.sum() @@ -214,8 +220,10 @@ def snip2d(y, w=4, numiter=2, order=1): for i in range(numiter): for kk in kernels: if order > 1: - c = maximum(ndimage.convolve(z, kk[0], mode='nearest'), - ndimage.convolve(z, kk[1], mode='nearest')) + c = maximum( + ndimage.convolve(z, kk[0], mode='nearest'), + ndimage.convolve(z, kk[1], mode='nearest'), + ) else: c = ndimage.convolve(z, kk[0], mode='nearest') b = minimum(b, c) @@ -238,21 +246,16 @@ def find_peaks_2d(img, method, method_kwargs): filter_fwhm = method_kwargs['filter_radius'] if filter_fwhm: filt_stdev = fwhm_to_sigma * filter_fwhm - img = -ndimage.filters.gaussian_laplace( - img, filt_stdev - ) + img = -ndimage.filters.gaussian_laplace(img, filt_stdev) labels_t, numSpots_t = ndimage.label( - img > method_kwargs['threshold'], - structureNDI_label - ) + img > method_kwargs['threshold'], structureNDI_label + ) coms_t = np.atleast_2d( ndimage.center_of_mass( - img, - labels=labels_t, - index=np.arange(1, np.amax(labels_t) + 1) - ) + img, labels=labels_t, index=np.arange(1, np.amax(labels_t) + 1) ) + ) elif method in ['blob_log', 'blob_dog']: # must scale map # TODO: we should so a parameter study here @@ -265,13 +268,9 @@ def find_peaks_2d(img, method, method_kwargs): # for 'blob_dog': min_sigma=0.5, max_sigma=5, # sigma_ratio=1.6, threshold=0.01, overlap=0.1 if method == 'blob_log': - blobs = np.atleast_2d( - blob_log(scl_map, **method_kwargs) - ) + blobs = np.atleast_2d(blob_log(scl_map, **method_kwargs)) else: # blob_dog - blobs = np.atleast_2d( - blob_dog(scl_map, **method_kwargs) - ) + blobs = np.atleast_2d(blob_dog(scl_map, **method_kwargs)) numSpots_t = len(blobs) coms_t = blobs[:, :2] diff --git a/hexrd/core/instrument/__init__.py b/hexrd/core/instrument/__init__.py index 10f7fda33..c5de3a79b 100644 --- a/hexrd/core/instrument/__init__.py +++ b/hexrd/core/instrument/__init__.py @@ -1,4 +1,15 @@ -from .hedm_instrument import calc_angles_from_beam_vec, calc_beam_vec, centers_of_edge_vec, GenerateEtaOmeMaps, GrainDataWriter, HEDMInstrument, max_tth, switch_xray_source, unwrap_dict_to_h5, unwrap_h5_to_dict +from .hedm_instrument import ( + calc_angles_from_beam_vec, + calc_beam_vec, + centers_of_edge_vec, + GenerateEtaOmeMaps, + GrainDataWriter, + HEDMInstrument, + max_tth, + switch_xray_source, + unwrap_dict_to_h5, + unwrap_h5_to_dict, +) from .cylindrical_detector import CylindricalDetector from .detector import Detector from .planar_detector import PlanarDetector diff --git a/hexrd/core/instrument/constants.py b/hexrd/core/instrument/constants.py index 98ce323b5..85c0b0a2a 100644 --- a/hexrd/core/instrument/constants.py +++ b/hexrd/core/instrument/constants.py @@ -5,27 +5,30 @@ class FILTER_DEFAULTS: TARDIS = { 'material': 'Ge', - 'density' : DENSITY['Ge'], - 'thickness' : 10 # microns + 'density': DENSITY['Ge'], + 'thickness': 10, # microns } PXRDIP = { 'material': 'Cu', - 'density' : DENSITY['Cu'], - 'thickness' : 10 # microns + 'density': DENSITY['Cu'], + 'thickness': 10, # microns } + COATING_DEFAULT = { 'material': 'C10H8O4', 'density': DENSITY_COMPOUNDS['C10H8O4'], - 'thickness': 9 # microns + 'thickness': 9, # microns } PHOSPHOR_DEFAULT = { 'material': 'Ba2263F2263Br1923I339C741H1730N247O494', - 'density': DENSITY_COMPOUNDS['Ba2263F2263Br1923I339C741H1730N247O494'], # g/cc - 'thickness': 115, # microns - 'readout_length': 222, #microns - 'pre_U0': 0.695 + 'density': DENSITY_COMPOUNDS[ + 'Ba2263F2263Br1923I339C741H1730N247O494' + ], # g/cc + 'thickness': 115, # microns + 'readout_length': 222, # microns + 'pre_U0': 0.695, } @@ -34,10 +37,10 @@ class PHYSICS_PACKAGE_DEFAULTS: HED = { 'sample_material': 'Fe', 'sample_density': DENSITY['Fe'], - 'sample_thickness': 15, # in microns + 'sample_thickness': 15, # in microns 'window_material': 'LiF', 'window_density': DENSITY_COMPOUNDS['LiF'], - 'window_thickness': 150, # in microns + 'window_thickness': 150, # in microns } # # Template for HEDM type physics package # HEDM = { @@ -51,14 +54,14 @@ class PHYSICS_PACKAGE_DEFAULTS: # Default pinhole area correction parameters class PINHOLE_DEFAULTS: TARDIS = { - 'pinhole_material' : 'Ta', - 'pinhole_diameter' : 400, # in microns - 'pinhole_thickness' : 100, # in microns - 'pinhole_density' : 16.65, # g/cc + 'pinhole_material': 'Ta', + 'pinhole_diameter': 400, # in microns + 'pinhole_thickness': 100, # in microns + 'pinhole_density': 16.65, # g/cc } PXRDIP = { - 'pinhole_material' : 'Ta', - 'pinhole_diameter' : 130, # in microns - 'pinhole_thickness' : 70, # in microns - 'pinhole_density' : 16.65, # g/cc + 'pinhole_material': 'Ta', + 'pinhole_diameter': 130, # in microns + 'pinhole_thickness': 70, # in microns + 'pinhole_density': 16.65, # g/cc } diff --git a/hexrd/core/instrument/cylindrical_detector.py b/hexrd/core/instrument/cylindrical_detector.py index 1edab44f4..e94ca97a7 100644 --- a/hexrd/core/instrument/cylindrical_detector.py +++ b/hexrd/core/instrument/cylindrical_detector.py @@ -3,11 +3,17 @@ import numpy as np from hexrd.core import constants as ct + # TODO: Resolve extra-core dependency from hexrd.hedm import xrdutil from hexrd.core.utils.decorators import memoize -from .detector import Detector, _solid_angle_of_triangle, _row_edge_vec, _col_edge_vec +from .detector import ( + Detector, + _solid_angle_of_triangle, + _row_edge_vec, + _col_edge_vec, +) from functools import partial from hexrd.core.gridutil import cellConnectivity @@ -21,11 +27,11 @@ class CylindricalDetector(Detector): """2D cylindrical detector - A cylindrical detector is a simple rectangular - row-column detector which has been bent in the - shape of a cylinder. Inherting the PlanarDetector - class except for a few changes to account for the - cylinder ray intersection. + A cylindrical detector is a simple rectangular + row-column detector which has been bent in the + shape of a cylinder. Inherting the PlanarDetector + class except for a few changes to account for the + cylinder ray intersection. """ def __init__(self, radius=49.51, **detector_kwargs): @@ -36,10 +42,15 @@ def __init__(self, radius=49.51, **detector_kwargs): def detector_type(self): return 'cylindrical' - def cart_to_angles(self, xy_data, - rmat_s=None, - tvec_s=None, tvec_c=None, - apply_distortion=False, normalize=True): + def cart_to_angles( + self, + xy_data, + rmat_s=None, + tvec_s=None, + tvec_c=None, + apply_distortion=False, + normalize=True, + ): xy_data = np.asarray(xy_data) if rmat_s is None: rmat_s = ct.identity_3x3 @@ -50,23 +61,30 @@ def cart_to_angles(self, xy_data, if apply_distortion and self.distortion is not None: xy_data = self.distortion.apply(xy_data) - dvecs = xrdutil.utils._warp_to_cylinder(xy_data, - self.tvec, - self.radius, - self.caxis, - self.paxis, - tVec_s=tvec_s, - tVec_c=tvec_c, - rmat_s=rmat_s, - normalize=normalize) + dvecs = xrdutil.utils._warp_to_cylinder( + xy_data, + self.tvec, + self.radius, + self.caxis, + self.paxis, + tVec_s=tvec_s, + tVec_c=tvec_c, + rmat_s=rmat_s, + normalize=normalize, + ) tth, eta = xrdutil.utils._dvec_to_angs(dvecs, self.bvec, self.evec) tth_eta = np.vstack((tth, eta)).T return tth_eta, dvecs - def angles_to_cart(self, tth_eta, - rmat_s=None, tvec_s=None, - rmat_c=None, tvec_c=None, - apply_distortion=False): + def angles_to_cart( + self, + tth_eta, + rmat_s=None, + tvec_s=None, + rmat_c=None, + tvec_c=None, + apply_distortion=False, + ): if rmat_s is None: rmat_s = ct.identity_3x3 if tvec_s is None: @@ -84,15 +102,24 @@ def angles_to_cart(self, tth_eta, ome = np.arccos(rmat_s[0, 0]) angs = np.hstack([tth_eta, np.tile(ome, (len(tth_eta), 1))]) - kwargs = {'beamVec': self.bvec, - 'etaVec': self.evec, - 'tVec_s': tvec_s, - 'rmat_s': rmat_s, - 'tVec_c': tvec_c} - args = (angs, chi, self.tvec, - self.caxis, self.paxis, - self.radius, self.physical_size, - self.angle_extent, self.distortion) + kwargs = { + 'beamVec': self.bvec, + 'etaVec': self.evec, + 'tVec_s': tvec_s, + 'rmat_s': rmat_s, + 'tVec_c': tvec_c, + } + args = ( + angs, + chi, + self.tvec, + self.caxis, + self.paxis, + self.radius, + self.physical_size, + self.angle_extent, + self.distortion, + ) proj_func = xrdutil.utils._project_on_detector_cylinder valid_xy, rMat_ss, valid_mask = proj_func(*args, **kwargs) @@ -101,20 +128,24 @@ def angles_to_cart(self, tth_eta, xy_det[valid_mask, :] = valid_xy return xy_det - def cart_to_dvecs(self, - xy_data, - tvec_s=ct.zeros_3x1, - rmat_s=ct.identity_3x3, - tvec_c=ct.zeros_3x1): - return xrdutil.utils._warp_to_cylinder(xy_data, - self.tvec, - self.radius, - self.caxis, - self.paxis, - tVec_s=tvec_s, - rmat_s=rmat_s, - tVec_c=tvec_c, - normalize=False) + def cart_to_dvecs( + self, + xy_data, + tvec_s=ct.zeros_3x1, + rmat_s=ct.identity_3x3, + tvec_c=ct.zeros_3x1, + ): + return xrdutil.utils._warp_to_cylinder( + xy_data, + self.tvec, + self.radius, + self.caxis, + self.paxis, + tVec_s=tvec_s, + rmat_s=rmat_s, + tVec_c=tvec_c, + normalize=False, + ) def pixel_angles(self, origin=ct.zeros_3): return _pixel_angles(origin=origin, **self._pixel_angle_kwargs) @@ -131,7 +162,7 @@ def local_normal(self): num = x.shape[0] naxis = np.cross(self.paxis, self.caxis) - th = x/self.radius + th = x / self.radius xp = np.sin(th) xn = -np.cos(th) @@ -163,7 +194,7 @@ def calc_filter_coating_transmission(self, energy): t_f = self.filter.thickness t_c = self.coating.thickness t_p = self.phosphor.thickness - L = self.phosphor.readout_length + L = self.phosphor.readout_length pre_U0 = self.phosphor.pre_U0 det_normal = self.local_normal() @@ -171,19 +202,21 @@ def calc_filter_coating_transmission(self, energy): y, x = self.pixel_coords xy_data = np.vstack((x.flatten(), y.flatten())).T dvecs = self.cart_to_dvecs(xy_data) - dvecs = dvecs/np.tile(np.linalg.norm(dvecs, axis=1), [3, 1]).T + dvecs = dvecs / np.tile(np.linalg.norm(dvecs, axis=1), [3, 1]).T - secb = (1./np.sum(dvecs*det_normal, axis=1)).reshape(self.shape) + secb = (1.0 / np.sum(dvecs * det_normal, axis=1)).reshape(self.shape) - transmission_filter = self.calc_transmission_generic(secb, t_f, al_f) + transmission_filter = self.calc_transmission_generic(secb, t_f, al_f) transmission_coating = self.calc_transmission_generic(secb, t_c, al_c) - transmission_phosphor = ( - self.calc_transmission_phosphor(secb, t_p, al_p, L, energy, pre_U0)) + transmission_phosphor = self.calc_transmission_phosphor( + secb, t_p, al_p, L, energy, pre_U0 + ) - transmission_filter = transmission_filter.reshape(self.shape) + transmission_filter = transmission_filter.reshape(self.shape) transmission_coating = transmission_coating.reshape(self.shape) transmission_filter_coating = ( - transmission_filter * transmission_coating) + transmission_filter * transmission_coating + ) return transmission_filter_coating, transmission_phosphor @@ -234,8 +267,9 @@ def radius(self, r): def physical_size(self): # return physical size of detector # in mm after dewarped to rectangle - return np.array([self.rows*self.pixel_size_row, - self.cols*self.pixel_size_col]) + return np.array( + [self.rows * self.pixel_size_row, self.cols * self.pixel_size_col] + ) @property def beam_position(self): @@ -244,13 +278,24 @@ def beam_position(self): frame {Xd, Yd, Zd}. NaNs if no intersection. """ output = np.nan * np.ones(2) - args = (np.atleast_2d(self.bvec), self.caxis, self.paxis, - self.radius, self.tvec) + args = ( + np.atleast_2d(self.bvec), + self.caxis, + self.paxis, + self.radius, + self.tvec, + ) pt_on_cylinder = xrdutil.utils._unitvec_to_cylinder(*args) - args = (pt_on_cylinder, self.tvec, self.caxis, - self.paxis, self.radius, self.physical_size, - self.angle_extent) + args = ( + pt_on_cylinder, + self.tvec, + self.caxis, + self.paxis, + self.radius, + self.physical_size, + self.angle_extent, + ) pt_on_cylinder, _ = xrdutil.utils._clip_to_cylindrical_detector(*args) args = (pt_on_cylinder, self.tvec, self.caxis, self.paxis, self.radius) @@ -282,7 +327,9 @@ def pixel_solid_angles(self): def update_memoization_sizes(all_panels): Detector.update_memoization_sizes(all_panels) - num_matches = sum(isinstance(x, CylindricalDetector) for x in all_panels) + num_matches = sum( + isinstance(x, CylindricalDetector) for x in all_panels + ) funcs = [ _pixel_angles, _pixel_tth_gradient, @@ -299,35 +346,30 @@ def extra_config_kwargs(self): @memoize -def _pixel_angles(origin, - pixel_coords, - distortion, - caxis, - paxis, - tvec_d, - radius, - bvec, - evec, - rows, - cols): +def _pixel_angles( + origin, + pixel_coords, + distortion, + caxis, + paxis, + tvec_d, + radius, + bvec, + evec, + rows, + cols, +): assert len(origin) == 3, "origin must have 3 elements" pix_i, pix_j = pixel_coords - xy = np.ascontiguousarray( - np.vstack([ - pix_j.flatten(), pix_i.flatten() - ]).T - ) + xy = np.ascontiguousarray(np.vstack([pix_j.flatten(), pix_i.flatten()]).T) if distortion is not None: xy = distortion.apply(xy) - dvecs = xrdutil.utils._warp_to_cylinder(xy, - tvec_d-origin, - radius, - caxis, - paxis, - normalize=True) + dvecs = xrdutil.utils._warp_to_cylinder( + xy, tvec_d - origin, radius, caxis, paxis, normalize=True + ) angs = xrdutil.utils._dvec_to_angs(dvecs, bvec, evec) @@ -360,13 +402,21 @@ def _pixel_eta_gradient(origin, **pixel_angle_kwargs): def _fix_branch_cut_in_gradients(pgarray): return np.min( - np.abs(np.stack([pgarray - np.pi, pgarray, pgarray + np.pi])), - axis=0 + np.abs(np.stack([pgarray - np.pi, pgarray, pgarray + np.pi])), axis=0 ) -def _generate_pixel_solid_angles(start_stop, rows, cols, pixel_size_row, - pixel_size_col, caxis, paxis, radius, tvec): +def _generate_pixel_solid_angles( + start_stop, + rows, + cols, + pixel_size_row, + pixel_size_col, + caxis, + paxis, + radius, + tvec, +): start, stop = start_stop row_edge_vec = _row_edge_vec(rows, pixel_size_row) col_edge_vec = _col_edge_vec(cols, pixel_size_col) @@ -377,12 +427,9 @@ def _generate_pixel_solid_angles(start_stop, rows, cols, pixel_size_row, # transform to lab frame using the _warp_to_cylinder # function - pcrd_array_full = xrdutil.utils._warp_to_cylinder(xy_data, - tvec, - radius, - caxis, - paxis, - normalize=False) + pcrd_array_full = xrdutil.utils._warp_to_cylinder( + xy_data, tvec, radius, caxis, paxis, normalize=False + ) conn = cellConnectivity(rows, cols) @@ -391,15 +438,25 @@ def _generate_pixel_solid_angles(start_stop, rows, cols, pixel_size_row, for i, ipix in enumerate(range(start, stop)): pix_conn = conn[ipix] vtx_list = pcrd_array_full[pix_conn, :] - ret[i] = (_solid_angle_of_triangle(vtx_list[[0, 1, 2], :]) + - _solid_angle_of_triangle(vtx_list[[2, 3, 0], :])) + ret[i] = _solid_angle_of_triangle( + vtx_list[[0, 1, 2], :] + ) + _solid_angle_of_triangle(vtx_list[[2, 3, 0], :]) return ret @memoize -def _pixel_solid_angles(rows, cols, pixel_size_row, pixel_size_col, - caxis, paxis, radius, tvec, max_workers): +def _pixel_solid_angles( + rows, + cols, + pixel_size_row, + pixel_size_col, + caxis, + paxis, + radius, + tvec, + max_workers, +): # connectivity array for pixels conn = cellConnectivity(rows, cols) @@ -419,8 +476,9 @@ def _pixel_solid_angles(rows, cols, pixel_size_row, pixel_size_col, 'tvec': tvec, } func = partial(_generate_pixel_solid_angles, **kwargs) - with ProcessPoolExecutor(mp_context=ct.mp_context, - max_workers=max_workers) as executor: + with ProcessPoolExecutor( + mp_context=ct.mp_context, max_workers=max_workers + ) as executor: results = executor.map(func, tasks) # Concatenate all the results together diff --git a/hexrd/core/instrument/detector.py b/hexrd/core/instrument/detector.py index 710ac6686..858fa21d7 100644 --- a/hexrd/core/instrument/detector.py +++ b/hexrd/core/instrument/detector.py @@ -3,7 +3,11 @@ import os from typing import Optional -from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +from hexrd.core.instrument.constants import ( + COATING_DEFAULT, + FILTER_DEFAULTS, + PHOSPHOR_DEFAULT, +) from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba @@ -11,6 +15,7 @@ from hexrd.core import constants as ct from hexrd.core import distortion as distortion_pkg from hexrd.core import matrixutil as mutil + # TODO: Resolve extra-core-dependency from hexrd.hedm import xrdutil from hexrd.core.rotations import mapAngle @@ -18,12 +23,22 @@ from hexrd.core.material import crystallography from hexrd.core.material.crystallography import PlaneData -from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec +from hexrd.core.transforms.xfcapi import ( + xy_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_rmat_of_expmap, + oscill_angles_of_hkls, + angles_to_dvec, +) from hexrd.core.utils.decorators import memoize from hexrd.core.gridutil import cellIndices from hexrd.core.instrument import detector_coatings -from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_incoherent_scattering +from hexrd.core.material.utils import ( + calculate_linear_absorption_length, + calculate_incoherent_scattering, +) distortion_registry = distortion_pkg.Registry() @@ -280,7 +295,8 @@ def __init__( if detector_filter is None: detector_filter = detector_coatings.Filter( - **FILTER_DEFAULTS.TARDIS) + **FILTER_DEFAULTS.TARDIS + ) self.filter = detector_filter if detector_coating is None: @@ -531,8 +547,9 @@ def pixel_coords(self): # METHODS # ========================================================================= - def pixel_Q(self, energy: np.floating, - origin: np.ndarray = ct.zeros_3) -> np.ndarray: + def pixel_Q( + self, energy: np.floating, origin: np.ndarray = ct.zeros_3 + ) -> np.ndarray: '''get the equivalent momentum transfer for the angles. @@ -551,7 +568,7 @@ def pixel_Q(self, energy: np.floating, ''' lam = ct.keVToAngstrom(energy) tth, _ = self.pixel_angles(origin=origin) - return 4.*np.pi*np.sin(tth*0.5)/lam + return 4.0 * np.pi * np.sin(tth * 0.5) / lam def pixel_compton_energy_loss( self, @@ -578,9 +595,9 @@ def pixel_compton_energy_loss( ''' energy = np.asarray(energy) tth, _ = self.pixel_angles() - ang_fact = (1 - np.cos(tth)) - beta = energy/ct.cRestmasskeV - return energy/(1 + beta*ang_fact) + ang_fact = 1 - np.cos(tth) + beta = energy / ct.cRestmasskeV + return energy / (1 + beta * ang_fact) def pixel_compton_attenuation_length( self, @@ -629,8 +646,7 @@ def compute_compton_scattering_intensity( physics_package: AbstractPhysicsPackage, origin: np.array = ct.zeros_3, ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - - ''' compute the theoretical compton scattering + '''compute the theoretical compton scattering signal on the detector. this value is corrected for the transmission of compton scattered photons and normlaized before getting subtracting from the @@ -653,18 +669,20 @@ def compute_compton_scattering_intensity( q = self.pixel_Q(energy) inc_s = calculate_incoherent_scattering( - physics_package.sample_material, - q.flatten()).reshape(self.shape) + physics_package.sample_material, q.flatten() + ).reshape(self.shape) inc_w = calculate_incoherent_scattering( - physics_package.window_material, - q.flatten()).reshape(self.shape) + physics_package.window_material, q.flatten() + ).reshape(self.shape) t_s = self.calc_compton_physics_package_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) t_w = self.calc_compton_window_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) return inc_s * t_s + inc_w * t_w, t_s, t_w @@ -1088,9 +1106,14 @@ def interpolate_nearest(self, xy, img, pad_with_nans=True): int_xy[on_panel] = int_vals return int_xy - def interpolate_bilinear(self, xy, img, pad_with_nans=True, - clip_to_panel=True, - on_panel: Optional[np.ndarray] = None): + def interpolate_bilinear( + self, + xy, + img, + pad_with_nans=True, + clip_to_panel=True, + on_panel: Optional[np.ndarray] = None, + ): """ Interpolate an image array at the specified cartesian points. @@ -1767,19 +1790,23 @@ def increase_memoization_sizes(funcs, min_size): if cache_info['maxsize'] < min_size: f.set_cache_maxsize(min_size) - def calc_physics_package_transmission(self, energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage) -> np.float64: + def calc_physics_package_transmission( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + ) -> np.float64: """get the transmission from the physics package need to consider HED and HEDM samples separately """ bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1792,17 +1819,17 @@ def calc_physics_package_transmission(self, energy: np.floating, cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_transmission_sample( - seca, secb, energy, physics_package) - T_window = self.calc_transmission_window( - secb, energy, physics_package) + seca, secb, energy, physics_package + ) + T_window = self.calc_transmission_window(secb, energy, physics_package) transmission_physics_package = T_sample * T_window return transmission_physics_package @@ -1819,12 +1846,13 @@ def calc_compton_physics_package_transmission( routine than elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1837,18 +1865,19 @@ def calc_compton_physics_package_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'sample') + seca, secb, energy, physics_package, 'sample' + ) T_window = self.calc_compton_transmission_window( - secb, energy, physics_package) + secb, energy, physics_package + ) return T_sample * T_window @@ -1865,12 +1894,13 @@ def calc_compton_window_transmission( elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1883,45 +1913,54 @@ def calc_compton_window_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_window = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'window') + seca, secb, energy, physics_package, 'window' + ) T_sample = self.calc_compton_transmission_sample( - seca, energy, physics_package) + seca, energy, physics_package + ) return T_sample * T_window - def calc_transmission_sample(self, seca: np.array, - secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_sample( + self, + seca: np.array, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: thickness_s = physics_package.sample_thickness # in microns if np.isclose(thickness_s, 0): return np.ones(self.shape) # in microns^-1 - mu_s = 1./physics_package.sample_absorption_length(energy) - x = (mu_s*thickness_s) - pre = 1./x/(secb - seca) - num = np.exp(-x*seca) - np.exp(-x*secb) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + x = mu_s * thickness_s + pre = 1.0 / x / (secb - seca) + num = np.exp(-x * seca) - np.exp(-x * secb) return pre * num - def calc_transmission_window(self, secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_window( + self, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: material_w = physics_package.window_material thickness_w = physics_package.window_thickness # in microns if material_w is None or np.isclose(thickness_w, 0): return np.ones(self.shape) # in microns^-1 - mu_w = 1./physics_package.window_absorption_length(energy) - return np.exp(-thickness_w*mu_w*secb) + mu_w = 1.0 / physics_package.window_absorption_length(energy) + return np.exp(-thickness_w * mu_w * secb) def calc_compton_transmission( self, @@ -1936,9 +1975,11 @@ def calc_compton_transmission( formula = physics_package.sample_material density = physics_package.sample_density thickness = physics_package.sample_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1. / self.pixel_compton_attenuation_length( - energy, density, formula, + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, + density, + formula, ) elif pp_layer == 'window': formula = physics_package.window_material @@ -1947,17 +1988,18 @@ def calc_compton_transmission( density = physics_package.window_density thickness = physics_package.window_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) if thickness <= 0: return np.ones(self.shape) - x1 = mu*thickness*seca - x2 = mu_prime*thickness*secb - num = (np.exp(-x1) - np.exp(-x2)) - return -num/(x1 - x2) + x1 = mu * thickness * seca + x2 = mu_prime * thickness * secb + num = np.exp(-x1) - np.exp(-x2) + return -num / (x1 - x2) def calc_compton_transmission_sample( self, @@ -1967,9 +2009,8 @@ def calc_compton_transmission_sample( ) -> np.ndarray: thickness_s = physics_package.sample_thickness # in microns - mu_s = 1./physics_package.sample_absorption_length( - energy) - return np.exp(-mu_s*thickness_s*seca) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + return np.exp(-mu_s * thickness_s * seca) def calc_compton_transmission_window( self, @@ -1981,60 +2022,71 @@ def calc_compton_transmission_window( if formula is None: return np.ones(self.shape) - density = physics_package.window_density # in g/cc + density = physics_package.window_density # in g/cc thickness_w = physics_package.window_thickness # in microns - mu_w_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) - return np.exp(-mu_w_prime*thickness_w*secb) - - def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) -> np.array: - """get the effective pinhole area correction - """ - if (np.isclose(physics_package.pinhole_diameter, 0) - or np.isclose(physics_package.pinhole_thickness, 0)): + mu_w_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) + return np.exp(-mu_w_prime * thickness_w * secb) + + def calc_effective_pinhole_area( + self, physics_package: AbstractPhysicsPackage + ) -> np.array: + """get the effective pinhole area correction""" + if np.isclose(physics_package.pinhole_diameter, 0) or np.isclose( + physics_package.pinhole_thickness, 0 + ): return np.ones(self.shape) - hod = (physics_package.pinhole_thickness / - physics_package.pinhole_diameter) + hod = ( + physics_package.pinhole_thickness + / physics_package.pinhole_diameter + ) bvec = self.bvec tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) cth = -dvecs[:, 2].reshape(self.shape) tanth = np.tan(np.arccos(cth)) - f = hod*tanth - f[np.abs(f) > 1.] = np.nan + f = hod * tanth + f[np.abs(f) > 1.0] = np.nan asinf = np.arcsin(f) return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) - def calc_transmission_generic(self, - secb: np.array, - thickness: np.floating, - absorption_length: np.floating) -> np.array: + def calc_transmission_generic( + self, + secb: np.array, + thickness: np.floating, + absorption_length: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - mu = 1./absorption_length # in microns^-1 - return np.exp(-thickness*mu*secb) + mu = 1.0 / absorption_length # in microns^-1 + return np.exp(-thickness * mu * secb) - def calc_transmission_phosphor(self, - secb: np.array, - thickness: np.floating, - readout_length: np.floating, - absorption_length: np.floating, - energy: np.floating, - pre_U0: np.floating) -> np.array: + def calc_transmission_phosphor( + self, + secb: np.array, + thickness: np.floating, + readout_length: np.floating, + absorption_length: np.floating, + energy: np.floating, + pre_U0: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - f1 = absorption_length*thickness - f2 = absorption_length*readout_length - arg = (secb + 1/f2) - return pre_U0 * energy*((1.0 - np.exp(-f1*arg))/arg) + f1 = absorption_length * thickness + f2 = absorption_length * readout_length + arg = secb + 1 / f2 + return pre_U0 * energy * ((1.0 - np.exp(-f1 * arg)) / arg) + # ============================================================================= # UTILITY METHODS diff --git a/hexrd/core/instrument/detector_coatings.py b/hexrd/core/instrument/detector_coatings.py index 11a563f8b..d6fae5d03 100644 --- a/hexrd/core/instrument/detector_coatings.py +++ b/hexrd/core/instrument/detector_coatings.py @@ -1,5 +1,8 @@ import numpy as np -from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_energy_absorption_length +from hexrd.core.material.utils import ( + calculate_linear_absorption_length, + calculate_energy_absorption_length, +) class AbstractLayer: @@ -23,12 +26,14 @@ class AbstractLayer: intensity to PSL """ - def __init__(self, - material=None, - density=None, - thickness=None, - readout_length=None, - pre_U0=None): + def __init__( + self, + material=None, + density=None, + thickness=None, + readout_length=None, + pre_U0=None, + ): self._material = material self._density = density self._thickness = thickness @@ -77,10 +82,11 @@ def absorption_length(self, energy): elif isinstance(energy, np.ndarray): energy_inp = energy - args = (self.density, - self.material, - energy_inp, - ) + args = ( + self.density, + self.material, + energy_inp, + ) abs_length = calculate_linear_absorption_length(*args) if abs_length.shape[0] == 1: return abs_length[0] @@ -95,10 +101,11 @@ def energy_absorption_length(self, energy): elif isinstance(energy, np.ndarray): energy_inp = energy - args = (self.density, - self.material, - energy_inp, - ) + args = ( + self.density, + self.material, + energy_inp, + ) abs_length = calculate_energy_absorption_length(*args) if abs_length.shape[0] == 1: return abs_length[0] @@ -112,6 +119,7 @@ def deserialize(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) + class Filter(AbstractLayer): def __init__(self, **abstractlayer_kwargs): @@ -133,13 +141,7 @@ def __init__(self, **abstractlayer_kwargs): @property def attributes_to_serialize(self): - return [ - 'material', - 'density', - 'thickness', - 'readout_length', - 'pre_U0' - ] + return ['material', 'density', 'thickness', 'readout_length', 'pre_U0'] @property def readout_length(self): diff --git a/hexrd/core/instrument/hedm_instrument.py b/hexrd/core/instrument/hedm_instrument.py index 144931acc..bfd56af9d 100644 --- a/hexrd/core/instrument/hedm_instrument.py +++ b/hexrd/core/instrument/hedm_instrument.py @@ -59,7 +59,14 @@ from hexrd.core.fitting.utils import fit_ring from hexrd.core.gridutil import make_tolerance_grid from hexrd.core import matrixutil as mutil -from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +from hexrd.core.transforms.xfcapi import ( + angles_to_gvec, + gvec_to_xy, + make_sample_rmat, + make_rmat_of_expmap, + unit_vector, +) + # TODO: Resolve extra-core-dependency from hexrd.hedm import xrdutil from hexrd.core.material.crystallography import PlaneData @@ -82,9 +89,11 @@ try: from fast_histogram import histogram1d + fast_histogram = True except ImportError: from numpy import histogram as histogram1d + fast_histogram = False logger = logging.getLogger() @@ -107,9 +116,9 @@ pixel_size_DFLT = (0.2, 0.2) tilt_params_DFLT = np.zeros(3) -t_vec_d_DFLT = np.r_[0., 0., -1000.] +t_vec_d_DFLT = np.r_[0.0, 0.0, -1000.0] -chi_DFLT = 0. +chi_DFLT = 0.0 t_vec_s_DFLT = np.zeros(3) multi_ims_key = ct.shared_ims_key @@ -123,8 +132,9 @@ # ============================================================================= -def generate_chunks(nrows, ncols, base_nrows, base_ncols, - row_gap=0, col_gap=0): +def generate_chunks( + nrows, ncols, base_nrows, base_ncols, row_gap=0, col_gap=0 +): """ Generate chunking data for regularly tiled composite detectors. @@ -156,18 +166,15 @@ def generate_chunks(nrows, ncols, base_nrows, base_ncols, [[row_start, row_stop], [col_start, col_stop]] """ - row_starts = np.array([i*(base_nrows + row_gap) for i in range(nrows)]) - col_starts = np.array([i*(base_ncols + col_gap) for i in range(ncols)]) + row_starts = np.array([i * (base_nrows + row_gap) for i in range(nrows)]) + col_starts = np.array([i * (base_ncols + col_gap) for i in range(ncols)]) rr = np.vstack([row_starts, row_starts + base_nrows]) cc = np.vstack([col_starts, col_starts + base_ncols]) rects = [] labels = [] for i in range(nrows): for j in range(ncols): - this_rect = np.array( - [[rr[0, i], rr[1, i]], - [cc[0, j], cc[1, j]]] - ) + this_rect = np.array([[rr[0, i], rr[1, i]], [cc[0, j], cc[1, j]]]) rects.append(this_rect) labels.append('%d_%d' % (i, j)) return rects, labels @@ -193,9 +200,11 @@ def chunk_instrument(instr, rects, labels, use_roi=False): """ icfg_dict = instr.write_config() - new_icfg_dict = dict(beam=icfg_dict['beam'], - oscillation_stage=icfg_dict['oscillation_stage'], - detectors={}) + new_icfg_dict = dict( + beam=icfg_dict['beam'], + oscillation_stage=icfg_dict['oscillation_stage'], + detectors={}, + ) for panel_id, panel in instr.detectors.items(): pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] @@ -205,7 +214,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): row_col_dim = np.diff(rect) # (2, 1) shape = tuple(row_col_dim.flatten()) - center = (rect[:, 0].reshape(2, 1) + 0.5*row_col_dim) + center = rect[:, 0].reshape(2, 1) + 0.5 * row_col_dim sp_tvec = np.concatenate( [panel.pixelToCart(center.T).flatten(), np.zeros(1)] @@ -230,7 +239,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): if panel.panel_buffer is not None: if panel.panel_buffer.ndim == 2: # have a mask array! submask = panel.panel_buffer[ - rect[0, 0]:rect[0, 1], rect[1, 0]:rect[1, 1] + rect[0, 0] : rect[0, 1], rect[1, 0] : rect[1, 1] ] new_icfg_dict['detectors'][panel_name]['buffer'] = submask return new_icfg_dict @@ -274,9 +283,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): images_in = imgser_dict[multi_ims_key] elif np.any(matched_det_keys): if sum(matched_det_keys) != 1: - raise RuntimeError( - f"multiple entries found for '{det_key}'" - ) + raise RuntimeError(f"multiple entries found for '{det_key}'") # use boolean array to index the proper key # !!! these should be in the same order img_keys = img_keys = np.asarray(list(imgser_dict.keys())) @@ -296,7 +303,12 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): if isinstance(images_in, ims_classes): # input is an imageseries of some kind - ims = ProcessedImageSeries(images_in, [('rectangle', roi), ]) + ims = ProcessedImageSeries( + images_in, + [ + ('rectangle', roi), + ], + ) if isinstance(images_in, OmegaImageSeries): # if it was an OmegaImageSeries, must re-cast ims = OmegaImageSeries(ims) @@ -304,16 +316,16 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): # 2- or 3-d array of images ndim = images_in.ndim if ndim == 2: - ims = images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims = images_in[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]] elif ndim == 3: nrows = roi[0][1] - roi[0][0] ncols = roi[1][1] - roi[1][0] n_images = len(images_in) - ims = np.empty((n_images, nrows, ncols), - dtype=images_in.dtype) + ims = np.empty((n_images, nrows, ncols), dtype=images_in.dtype) for i, image in images_in: - ims[i, :, :] = \ - images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims[i, :, :] = images_in[ + roi[0][0] : roi[0][1], roi[1][0] : roi[1][1] + ] else: raise RuntimeError( f"image input dim must be 2 or 3; you gave {ndim}" @@ -331,9 +343,8 @@ def calc_beam_vec(azim, pola): tht = np.radians(azim) phi = np.radians(pola) bv = np.r_[ - np.sin(phi)*np.cos(tht), - np.cos(phi), - np.sin(phi)*np.sin(tht)] + np.sin(phi) * np.cos(tht), np.cos(phi), np.sin(phi) * np.sin(tht) + ] return -bv @@ -344,9 +355,7 @@ def calc_angles_from_beam_vec(bvec): """ bvec = np.atleast_1d(bvec).flatten() nvec = unit_vector(-bvec) - azim = float( - np.degrees(np.arctan2(nvec[2], nvec[0])) - ) + azim = float(np.degrees(np.arctan2(nvec[2], nvec[0]))) pola = float(np.degrees(np.arccos(nvec[1]))) return azim, pola @@ -370,9 +379,9 @@ def angle_in_range(angle, ranges, ccw=True, units='degrees'): WARNING: always clockwise; assumes wedges are not overlapping """ - tau = 360. + tau = 360.0 if units.lower() == 'radians': - tau = 2*np.pi + tau = 2 * np.pi w = np.nan for i, wedge in enumerate(ranges): amin = wedge[0] @@ -404,7 +413,7 @@ def max_tth(instr): tth_max : float The maximum observable Bragg angle by the instrument in radians. """ - tth_max = 0. + tth_max = 0.0 for det in instr.detectors.values(): ptth, peta = det.pixel_angles() tth_max = max(np.max(ptth), tth_max) @@ -436,10 +445,9 @@ def pixel_resolution(instr): ang_ps_full = [] for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) ang_ps_full.append(angps) max_tth = min(max_tth, np.min(angps[:, 0])) @@ -471,10 +479,9 @@ def max_resolution(instr): max_eta = np.inf for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) max_tth = min(max_tth, np.min(angps[:, 0])) max_eta = min(max_eta, np.min(angps[:, 1])) @@ -482,16 +489,16 @@ def max_resolution(instr): def _gaussian_dist(x, cen, fwhm): - sigm = fwhm/(2*np.sqrt(2*np.log(2))) - return np.exp(-0.5*(x - cen)**2/sigm**2) + sigm = fwhm / (2 * np.sqrt(2 * np.log(2))) + return np.exp(-0.5 * (x - cen) ** 2 / sigm**2) def _sigma_to_fwhm(sigm): - return sigm*ct.sigma_to_fwhm + return sigm * ct.sigma_to_fwhm def _fwhm_to_sigma(fwhm): - return fwhm/ct.sigma_to_fwhm + return fwhm / ct.sigma_to_fwhm # ============================================================================= @@ -507,12 +514,17 @@ class HEDMInstrument(object): * where should reference eta be defined? currently set to default config """ - def __init__(self, instrument_config=None, - image_series=None, eta_vector=None, - instrument_name=None, tilt_calibration_mapping=None, - max_workers=max_workers_DFLT, - physics_package=None, - active_beam_name: Optional[str] = None): + def __init__( + self, + instrument_config=None, + image_series=None, + eta_vector=None, + instrument_name=None, + tilt_calibration_mapping=None, + max_workers=max_workers_DFLT, + physics_package=None, + active_beam_name: Optional[str] = None, + ): self._id = instrument_name_DFLT self._active_beam_name = active_beam_name @@ -537,7 +549,8 @@ def __init__(self, instrument_config=None, # FIXME: must add cylindrical self._detectors = dict( panel_id_DFLT=PlanarDetector( - rows=nrows_DFLT, cols=ncols_DFLT, + rows=nrows_DFLT, + cols=ncols_DFLT, pixel_size=pixel_size_DFLT, tvec=t_vec_d_DFLT, tilt=tilt_params_DFLT, @@ -545,9 +558,11 @@ def __init__(self, instrument_config=None, xrs_dist=self.source_distance, evec=self._eta_vector, distortion=None, - roi=None, group=None, - max_workers=self.max_workers), - ) + roi=None, + group=None, + max_workers=self.max_workers, + ), + ) self._tvec = t_vec_s_DFLT self._chi = chi_DFLT @@ -574,10 +589,7 @@ def __init__(self, instrument_config=None, self.physics_package = instrument_config['physics_package'] xrs_config = instrument_config['beam'] - is_single_beam = ( - 'energy' in xrs_config and - 'vector' in xrs_config - ) + is_single_beam = 'energy' in xrs_config and 'vector' in xrs_config if is_single_beam: # Assume single beam. Load the same way as multibeam self._create_default_beam() @@ -634,7 +646,7 @@ def __init__(self, instrument_config=None, elif isinstance(det_buffer, list): panel_buffer = np.asarray(det_buffer) elif np.isscalar(det_buffer): - panel_buffer = det_buffer*np.ones(2) + panel_buffer = det_buffer * np.ones(2) else: raise RuntimeError( "panel buffer spec invalid for %s" % det_id @@ -711,9 +723,9 @@ def mean_detector_center(self) -> np.ndarray: def mean_group_center(self, group: str) -> np.ndarray: """Return the mean center for detectors belonging to a group""" - centers = np.array([ - x.tvec for x in self.detectors_in_group(group).values() - ]) + centers = np.array( + [x.tvec for x in self.detectors_in_group(group).values()] + ) return centers.sum(axis=0) / len(centers) @property @@ -747,10 +759,11 @@ def detector_parameters(self): pdict = {} for key, panel in self.detectors.items(): pdict[key] = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) return pdict @@ -854,8 +867,9 @@ def beam_vector(self) -> np.ndarray: def beam_vector(self, x: np.ndarray): x = np.array(x).flatten() if len(x) == 3: - assert sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' bvec = x elif len(x) == 2: bvec = calc_beam_vec(*x) @@ -872,8 +886,9 @@ def source_distance(self): @source_distance.setter def source_distance(self, x): - assert np.isscalar(x), \ - f"'source_distance' must be a scalar; you input '{x}'" + assert np.isscalar( + x + ), f"'source_distance' must be a scalar; you input '{x}'" self.active_beam['distance'] = x self.beam_dict_modified() @@ -884,8 +899,9 @@ def eta_vector(self): @eta_vector.setter def eta_vector(self, x): x = np.array(x).flatten() - assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' self._eta_vector = x # ...maybe change dictionary item behavior for 3.x compatibility? for detector_id in self.detectors: @@ -897,10 +913,11 @@ def eta_vector(self, x): # ========================================================================= def write_config(self, file=None, style='yaml', calibration_dict={}): - """ WRITE OUT YAML FILE """ + """WRITE OUT YAML FILE""" # initialize output dictionary - assert style.lower() in ['yaml', 'hdf5'], \ + assert style.lower() in ['yaml', 'hdf5'], ( "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) par_dict = {} @@ -929,10 +946,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): if calibration_dict: par_dict['calibration_crystal'] = calibration_dict - ostage = dict( - chi=self.chi, - translation=self.tvec.tolist() - ) + ostage = dict(chi=self.chi, translation=self.tvec.tolist()) par_dict['oscillation_stage'] = ostage det_dict = dict.fromkeys(self.detectors) @@ -940,10 +954,13 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): # grab panel config # !!! don't need beam or tvec # !!! have vetted style - pdict = detector.config_dict(chi=self.chi, tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style=style) + pdict = detector.config_dict( + chi=self.chi, + tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style=style, + ) det_dict[det_name] = pdict['detector'] par_dict['detectors'] = det_dict @@ -953,6 +970,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): with open(file, 'w') as f: yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) else: + def _write_group(file): instr_grp = file.create_group('instrument') unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) @@ -968,9 +986,15 @@ def _write_group(file): return par_dict - def extract_polar_maps(self, plane_data, imgser_dict, - active_hkls=None, threshold=None, - tth_tol=None, eta_tol=0.25): + def extract_polar_maps( + self, + plane_data, + imgser_dict, + active_hkls=None, + threshold=None, + tth_tol=None, + eta_tol=0.25, + ): """ Extract eta-omega maps from an imageseries. @@ -994,23 +1018,25 @@ def extract_polar_maps(self, plane_data, imgser_dict, # detectors, so calculate it once # !!! grab first panel panel = next(iter(self.detectors.values())) - pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = \ + pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = ( panel.make_powder_rings( - plane_data, merge_hkls=False, - delta_eta=eta_tol, full_output=True + plane_data, + merge_hkls=False, + delta_eta=eta_tol, + full_output=True, ) + ) if active_hkls is not None: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" # need to re-cast for element-wise operations active_hkls = np.array(active_hkls) # these are all active reflection unique hklIDs - active_hklIDs = plane_data.getHKLID( - plane_data.hkls, master=True - ) + active_hklIDs = plane_data.getHKLID(plane_data.hkls, master=True) # find indices idx = np.zeros_like(active_hkls, dtype=int) @@ -1067,9 +1093,14 @@ def extract_polar_maps(self, plane_data, imgser_dict, # Divide up the images among processes tasks = distribute_tasks(len(ims), self.max_workers) - func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges, - ring_maps=ring_maps, ring_params=ring_params, - threshold=threshold) + func = partial( + _run_histograms, + ims=ims, + tth_ranges=tth_ranges, + ring_maps=ring_maps, + ring_params=ring_params, + threshold=threshold, + ) max_workers = self.max_workers if max_workers == 1 or len(tasks) == 1: @@ -1087,12 +1118,21 @@ def extract_polar_maps(self, plane_data, imgser_dict, return ring_maps_panel, eta_edges - def extract_line_positions(self, plane_data, imgser_dict, - tth_tol=None, eta_tol=1., npdiv=2, - eta_centers=None, - collapse_eta=True, collapse_tth=False, - do_interpolation=True, do_fitting=False, - tth_distortion=None, fitting_kwargs=None): + def extract_line_positions( + self, + plane_data, + imgser_dict, + tth_tol=None, + eta_tol=1.0, + npdiv=2, + eta_centers=None, + collapse_eta=True, + collapse_tth=False, + do_interpolation=True, + do_fitting=False, + tth_distortion=None, + fitting_kwargs=None, + ): """ Perform annular interpolation on diffraction images. @@ -1167,8 +1207,12 @@ def extract_line_positions(self, plane_data, imgser_dict, # LOOP OVER DETECTORS # ===================================================================== logger.info("Interpolating ring data") - pbar_dets = partial(tqdm, total=self.num_panels, desc="Detector", - position=self.num_panels) + pbar_dets = partial( + tqdm, + total=self.num_panels, + desc="Detector", + position=self.num_panels, + ) # Split up the workers among the detectors max_workers_per_detector = max(1, self.max_workers // self.num_panels) @@ -1191,23 +1235,26 @@ def extract_line_positions(self, plane_data, imgser_dict, def make_instr_cfg(panel): return panel.config_dict( - chi=self.chi, tvec=self.tvec, + chi=self.chi, + tvec=self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) images = [] for detector_id, panel in self.detectors.items(): - images.append(_parse_imgser_dict(imgser_dict, detector_id, - roi=panel.roi)) + images.append( + _parse_imgser_dict(imgser_dict, detector_id, roi=panel.roi) + ) panels = [self.detectors[k] for k in self.detectors] instr_cfgs = [make_instr_cfg(x) for x in panels] pbp_array = np.arange(self.num_panels) iter_args = zip(panels, instr_cfgs, images, pbp_array) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=self.num_panels) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=self.num_panels + ) as executor: results = list(pbar_dets(executor.map(func, iter_args))) panel_data = {} @@ -1216,12 +1263,9 @@ def make_instr_cfg(panel): return panel_data - def simulate_powder_pattern(self, - mat_list, - params=None, - bkgmethod=None, - origin=None, - noise=None): + def simulate_powder_pattern( + self, mat_list, params=None, bkgmethod=None, origin=None, noise=None + ): """ Generate powder diffraction iamges from specified materials. @@ -1260,8 +1304,7 @@ def simulate_powder_pattern(self, if origin is None: origin = self.tvec origin = np.asarray(origin).squeeze() - assert len(origin) == 3, \ - "origin must be a 3-element sequence" + assert len(origin) == 3, "origin must be a 3-element sequence" if bkgmethod is None: bkgmethod = {'chebyshev': 3} @@ -1301,7 +1344,7 @@ def simulate_powder_pattern(self, # find min and max tth over all panels tth_mi = np.inf - tth_ma = 0. + tth_ma = 0.0 ptth_dict = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): ptth, peta = panel.pixel_angles(origin=origin) @@ -1323,7 +1366,7 @@ def simulate_powder_pattern(self, ang_res = max_resolution(self) # !!! calc nsteps by oversampling - nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0]))) + nsteps = int(np.ceil(2 * (tth_ma - tth_mi) / np.degrees(ang_res[0]))) # evaulation vector for LeBail tth = np.linspace(tth_mi, tth_ma, nsteps) @@ -1332,7 +1375,7 @@ def simulate_powder_pattern(self, wavelength = [ valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), - 1. + 1.0, ] ''' @@ -1345,23 +1388,25 @@ def simulate_powder_pattern(self, tth = mat.planeData.getTTh() - LP = (1 + np.cos(tth)**2) / \ - np.cos(0.5*tth)/np.sin(0.5*tth)**2 + LP = ( + (1 + np.cos(tth) ** 2) + / np.cos(0.5 * tth) + / np.sin(0.5 * tth) ** 2 + ) intensity[mat.name] = {} - intensity[mat.name]['synchrotron'] = \ + intensity[mat.name]['synchrotron'] = ( mat.planeData.structFact * LP * multiplicity + ) kwargs = { 'expt_spectrum': expt, 'params': params, 'phases': mat_list, - 'wavelength': { - 'synchrotron': wavelength - }, + 'wavelength': {'synchrotron': wavelength}, 'bkgmethod': bkgmethod, 'intensity_init': intensity, - 'peakshape': 'pvtch' + 'peakshape': 'pvtch', } self.WPPFclass = LeBail(**kwargs) @@ -1379,9 +1424,11 @@ def simulate_powder_pattern(self, for det_key, panel in self.detectors.items(): ptth = ptth_dict[det_key] - img = np.interp(np.degrees(ptth), - self.simulated_spectrum.x, - self.simulated_spectrum.y + self.background.y) + img = np.interp( + np.degrees(ptth), + self.simulated_spectrum.x, + self.simulated_spectrum.y + self.background.y, + ) if noise is None: img_dict[det_key] = img @@ -1392,13 +1439,11 @@ def simulate_powder_pattern(self, img /= prev_max if noise.lower() == 'poisson': - im_noise = random_noise(img, - mode='poisson', - clip=True) + im_noise = random_noise(img, mode='poisson', clip=True) mi = im_noise.min() ma = im_noise.max() if ma > mi: - im_noise = (im_noise - mi)/(ma - mi) + im_noise = (im_noise - mi) / (ma - mi) elif noise.lower() == 'gaussian': im_noise = random_noise(img, mode='gaussian', clip=True) @@ -1420,9 +1465,14 @@ def simulate_powder_pattern(self, return img_dict - def simulate_laue_pattern(self, crystal_data, - minEnergy=5., maxEnergy=35., - rmat_s=None, grain_params=None): + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + grain_params=None, + ): """ Simulate Laue diffraction over the instrument. @@ -1452,17 +1502,28 @@ def simulate_laue_pattern(self, crystal_data, for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_laue_pattern( crystal_data, - minEnergy=minEnergy, maxEnergy=maxEnergy, - rmat_s=rmat_s, tvec_s=self.tvec, + minEnergy=minEnergy, + maxEnergy=maxEnergy, + rmat_s=rmat_s, + tvec_s=self.tvec, grain_params=grain_params, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) return results - def simulate_rotation_series(self, plane_data, grain_param_list, - eta_ranges=[(-np.pi, np.pi), ], - ome_ranges=[(-np.pi, np.pi), ], - ome_period=(-np.pi, np.pi), - wavelength=None): + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + wavelength=None, + ): """ Simulate a monochromatic rotation series over the instrument. @@ -1491,24 +1552,39 @@ def simulate_rotation_series(self, plane_data, grain_param_list, results = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_rotation_series( - plane_data, grain_param_list, + plane_data, + grain_param_list, eta_ranges=eta_ranges, ome_ranges=ome_ranges, ome_period=ome_period, - chi=self.chi, tVec_s=self.tvec, - wavelength=wavelength) + chi=self.chi, + tVec_s=self.tvec, + wavelength=wavelength, + ) return results - def pull_spots(self, plane_data, grain_params, - imgser_dict, - tth_tol=0.25, eta_tol=1., ome_tol=1., - npdiv=2, threshold=10, - eta_ranges=[(-np.pi, np.pi), ], - ome_period=None, - dirname='results', filename=None, output_format='text', - return_spot_list=False, - quiet=True, check_only=False, - interp='nearest'): + def pull_spots( + self, + plane_data, + grain_params, + imgser_dict, + tth_tol=0.25, + eta_tol=1.0, + ome_tol=1.0, + npdiv=2, + threshold=10, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_period=None, + dirname='results', + filename=None, + output_format='text', + return_spot_list=False, + quiet=True, + check_only=False, + interp='nearest', + ): """ Exctract reflection info from a rotation series. @@ -1568,12 +1644,14 @@ def pull_spots(self, plane_data, grain_params, # WARNING: all imageseries AND all wedges within are assumed to have # the same omega values; put in a check that they are all the same??? oims0 = next(iter(imgser_dict.values())) - ome_ranges = [np.radians([i['ostart'], i['ostop']]) - for i in oims0.omegawedges.wedges] + ome_ranges = [ + np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges + ] if ome_period is None: ims = next(iter(imgser_dict.values())) ostart = ims.omega[0, 0] - ome_period = np.radians(ostart + np.r_[0., 360.]) + ome_period = np.radians(ostart + np.r_[0.0, 360.0]) # delta omega in DEGREES grabbed from first imageseries in the dict delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] @@ -1581,7 +1659,10 @@ def pull_spots(self, plane_data, grain_params, # make omega grid for frame expansion around reference frame # in DEGREES ndiv_ome, ome_del = make_tolerance_grid( - delta_ome, ome_tol, 1, adjust_window=True, + delta_ome, + ome_tol, + 1, + adjust_window=True, ) # generate structuring element for connected component labeling @@ -1592,24 +1673,37 @@ def pull_spots(self, plane_data, grain_params, # simulate rotation series sim_results = self.simulate_rotation_series( - plane_data, [grain_params, ], + plane_data, + [ + grain_params, + ], eta_ranges=eta_ranges, ome_ranges=ome_ranges, - ome_period=ome_period) + ome_period=ome_period, + ) # patch vertex generator (global for instrument) - tol_vec = 0.5*np.radians( - [-tth_tol, -eta_tol, - -tth_tol, eta_tol, - tth_tol, eta_tol, - tth_tol, -eta_tol]) + tol_vec = 0.5 * np.radians( + [ + -tth_tol, + -eta_tol, + -tth_tol, + eta_tol, + tth_tol, + eta_tol, + tth_tol, + -eta_tol, + ] + ) # prepare output if requested if filename is not None and output_format.lower() == 'hdf5': this_filename = os.path.join(dirname, filename) writer = GrainDataWriter_h5( os.path.join(dirname, filename), - self.write_config(), grain_params) + self.write_config(), + grain_params, + ) # ===================================================================== # LOOP OVER PANELS @@ -1621,28 +1715,25 @@ def pull_spots(self, plane_data, grain_params, for detector_id, panel in self.detectors.items(): # initialize text-based output writer if filename is not None and output_format.lower() == 'text': - output_dir = os.path.join( - dirname, detector_id - ) + output_dir = os.path.join(dirname, detector_id) os.makedirs(output_dir, exist_ok=True) - this_filename = os.path.join( - output_dir, filename - ) + this_filename = os.path.join(output_dir, filename) writer = PatchDataWriter(this_filename) # grab panel instr_cfg = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) native_area = panel.pixel_area # pixel ref area # pull out the OmegaImageSeries for this panel from input dict - ome_imgser = _parse_imgser_dict(imgser_dict, - detector_id, - roi=panel.roi) + ome_imgser = _parse_imgser_dict( + imgser_dict, detector_id, roi=panel.roi + ) # extract simulation results sim_results_p = sim_results[detector_id] @@ -1658,19 +1749,24 @@ def pull_spots(self, plane_data, grain_params, # patch vertex array from sim nangs = len(ang_centers) patch_vertices = ( - np.tile(ang_centers[:, :2], (1, 4)) + - np.tile(tol_vec, (nangs, 1)) - ).reshape(4*nangs, 2) - ome_dupl = np.tile( - ang_centers[:, 2], (4, 1) - ).T.reshape(len(patch_vertices), 1) + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4 * nangs, 2) + ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape( + len(patch_vertices), 1 + ) # find vertices that all fall on the panel det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), - panel.rmat, rMat_c, self.chi, - panel.tvec, tVec_c, self.tvec, - panel.distortion) + panel.rmat, + rMat_c, + self.chi, + panel.tvec, + tVec_c, + self.tvec, + panel.distortion, + ) _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) # all vertices must be on... @@ -1701,7 +1797,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d %d %d) falls outside omega range - """ % tuple(hkls_p[i_pt, :]) + """ % tuple( + hkls_p[i_pt, :] + ) print(msg) continue else: @@ -1719,11 +1817,16 @@ def pull_spots(self, plane_data, grain_params, # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( instr_cfg, - ang_centers[:, :2], ang_pixel_size, + ang_centers[:, :2], + ang_pixel_size, omega=ang_centers[:, 2], - tth_tol=tth_tol, eta_tol=eta_tol, - rmat_c=rMat_c, tvec_c=tVec_c, - npdiv=npdiv, quiet=True) + tth_tol=tth_tol, + eta_tol=eta_tol, + rmat_c=rMat_c, + tvec_c=tVec_c, + npdiv=npdiv, + quiet=True, + ) # GRAND LOOP over reflections for this panel patch_output = [] @@ -1733,7 +1836,7 @@ def pull_spots(self, plane_data, grain_params, vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch prows, pcols = areas.shape - nrm_fac = areas/float(native_area) + nrm_fac = areas / float(native_area) nrm_fac = nrm_fac / np.min(nrm_fac) # grab hkl info @@ -1747,8 +1850,9 @@ def pull_spots(self, plane_data, grain_params, delta_eta = eta_edges[1] - eta_edges[0] # need to reshape eval pts for interpolation - xy_eval = np.vstack([xy_eval[0].flatten(), - xy_eval[1].flatten()]).T + xy_eval = np.vstack( + [xy_eval[0].flatten(), xy_eval[1].flatten()] + ).T # the evaluation omegas; # expand about the central value using tol vector @@ -1763,7 +1867,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d%d%d) falls outside omega range - """ % tuple(hkl) + """ % tuple( + hkl + ) print(msg) continue else: @@ -1772,8 +1878,8 @@ def pull_spots(self, plane_data, grain_params, peak_id = next_invalid_peak_id sum_int = np.nan max_int = np.nan - meas_angs = np.nan*np.ones(3) - meas_xy = np.nan*np.ones(2) + meas_angs = np.nan * np.ones(3) + meas_xy = np.nan * np.ones(2) # quick check for intensity contains_signal = False @@ -1791,19 +1897,23 @@ def pull_spots(self, plane_data, grain_params, # initialize patch data array for intensities if interp.lower() == 'bilinear': patch_data = np.zeros( - (len(frame_indices), prows, pcols)) + (len(frame_indices), prows, pcols) + ) for i, i_frame in enumerate(frame_indices): - patch_data[i] = \ - panel.interpolate_bilinear( - xy_eval, - ome_imgser[i_frame], - pad_with_nans=False - ).reshape(prows, pcols) # * nrm_fac + patch_data[i] = panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False, + ).reshape( + prows, pcols + ) # * nrm_fac elif interp.lower() == 'nearest': patch_data = patch_data_raw # * nrm_fac else: - msg = "interpolation option " + \ - "'%s' not understood" + msg = ( + "interpolation option " + + "'%s' not understood" + ) raise RuntimeError(msg % interp) # now have interpolated patch data... @@ -1816,9 +1926,10 @@ def pull_spots(self, plane_data, grain_params, peak_id = iRefl props = regionprops(labels, patch_data) coms = np.vstack( - [x.weighted_centroid for x in props]) + [x.weighted_centroid for x in props] + ) if num_peaks > 1: - center = np.r_[patch_data.shape]*0.5 + center = np.r_[patch_data.shape] * 0.5 center_t = np.tile(center, (num_peaks, 1)) com_diff = coms - center_t closest_peak_idx = np.argmin( @@ -1829,15 +1940,17 @@ def pull_spots(self, plane_data, grain_params, coms = coms[closest_peak_idx] # meas_omes = \ # ome_edges[0] + (0.5 + coms[0])*delta_ome - meas_omes = \ - ome_eval[0] + coms[0]*delta_ome + meas_omes = ome_eval[0] + coms[0] * delta_ome meas_angs = np.hstack( - [tth_edges[0] + (0.5 + coms[2])*delta_tth, - eta_edges[0] + (0.5 + coms[1])*delta_eta, - mapAngle( - np.radians(meas_omes), ome_period - ) - ] + [ + tth_edges[0] + + (0.5 + coms[2]) * delta_tth, + eta_edges[0] + + (0.5 + coms[1]) * delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ), + ] ) # intensities @@ -1864,15 +1977,21 @@ def pull_spots(self, plane_data, grain_params, meas_angs, chi=self.chi, rmat_c=rMat_c, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) rMat_s = make_sample_rmat( self.chi, meas_angs[2] ) meas_xy = gvec_to_xy( gvec_c, - panel.rmat, rMat_s, rMat_c, - panel.tvec, self.tvec, tVec_c, - beam_vec=self.beam_vector) + panel.rmat, + rMat_s, + rMat_c, + panel.tvec, + self.tvec, + tVec_c, + beam_vec=self.beam_vector, + ) if panel.distortion is not None: meas_xy = panel.distortion.apply_inverse( np.atleast_2d(meas_xy) @@ -1891,19 +2010,38 @@ def pull_spots(self, plane_data, grain_params, if filename is not None: if output_format.lower() == 'text': writer.dump_patch( - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, - xy_centers[i_pt], meas_xy) + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + xy_centers[i_pt], + meas_xy, + ) elif output_format.lower() == 'hdf5': xyc_arr = xy_eval.reshape( prows, pcols, 2 ).transpose(2, 0, 1) writer.dump_patch( - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy) + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, + ) if return_spot_list: # Full output @@ -1911,17 +2049,34 @@ def pull_spots(self, plane_data, grain_params, prows, pcols, 2 ).transpose(2, 0, 1) _patch_output = [ - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, ] else: # Trimmed output _patch_output = [ - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, meas_xy + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + meas_xy, ] patch_output.append(_patch_output) iRefl += 1 @@ -1939,7 +2094,9 @@ def update_memoization_sizes(self): PlanarDetector.update_memoization_sizes(all_panels) CylindricalDetector.update_memoization_sizes(all_panels) - def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: + def calc_transmission( + self, rMat_s: np.ndarray = None + ) -> dict[str, np.ndarray]: """calculate the transmission from the filter and polymer coating. the inverse of this number is the intensity correction that needs @@ -1953,26 +2110,31 @@ def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: transmissions = {} for det_name, det in self.detectors.items(): transmission_filter, transmission_phosphor = ( - det.calc_filter_coating_transmission(energy)) + det.calc_filter_coating_transmission(energy) + ) transmission = transmission_filter * transmission_phosphor if self.physics_package is not None: transmission_physics_package = ( det.calc_physics_package_transmission( - energy, rMat_s, self.physics_package)) + energy, rMat_s, self.physics_package + ) + ) effective_pinhole_area = det.calc_effective_pinhole_area( - self.physics_package) + self.physics_package + ) transmission = ( - transmission * - transmission_physics_package * - effective_pinhole_area + transmission + * transmission_physics_package + * effective_pinhole_area ) transmissions[det_name] = transmission return transmissions + # ============================================================================= # UTILITIES # ============================================================================= @@ -1983,6 +2145,7 @@ class PatchDataWriter(object): def __init__(self, filename): self._delim = ' ' + # fmt: off header_items = ( '# ID', 'PID', 'H', 'K', 'L', @@ -1997,6 +2160,7 @@ def __init__(self, filename): self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) ]) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2009,30 +2173,34 @@ def __del__(self): def close(self): self.fid.close() - def dump_patch(self, peak_id, hkl_id, - hkl, spot_int, max_int, - pangs, mangs, pxy, mxy): + def dump_patch( + self, peak_id, hkl_id, hkl, spot_int, max_int, pangs, mangs, pxy, mxy + ): """ !!! maybe need to check that last four inputs are arrays """ if mangs is None: spot_int = np.nan max_int = np.nan - mangs = np.nan*np.ones(3) - mxy = np.nan*np.ones(2) - - res = [int(peak_id), int(hkl_id)] \ - + np.array(hkl, dtype=int).tolist() \ - + [spot_int, max_int] \ - + pangs.tolist() \ - + mangs.tolist() \ - + pxy.tolist() \ + mangs = np.nan * np.ones(3) + mxy = np.nan * np.ones(2) + + res = ( + [int(peak_id), int(hkl_id)] + + np.array(hkl, dtype=int).tolist() + + [spot_int, max_int] + + pangs.tolist() + + mangs.tolist() + + pxy.tolist() + mxy.tolist() + ) output_str = self._delim.join( - [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), - self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), - self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + [ + self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:]), + ] ) print(output_str, file=self.fid) return output_str @@ -2048,20 +2216,23 @@ def __init__(self, filename=None, array=None): """ if filename is None and array is None: raise RuntimeError( - 'GrainDataWriter must be specified with filename or array') + 'GrainDataWriter must be specified with filename or array' + ) self.array = None self.fid = None # array supersedes filename if array is not None: - assert array.shape[1] == 21, \ - f'grain data table must have 21 columns not {array.shape[21]}' + assert ( + array.shape[1] == 21 + ), f'grain data table must have 21 columns not {array.shape[21]}' self.array = array self._array_row = 0 return self._delim = ' ' + # fmt: off header_items = ( '# grain ID', 'completeness', 'chi^2', 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', @@ -2081,6 +2252,7 @@ def __init__(self, filename=None, array=None): np.tile('{:<23}', len(header_items) - 3) ).format(*header_items[3:])] ) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2094,35 +2266,40 @@ def close(self): if self.fid is not None: self.fid.close() - def dump_grain(self, grain_id, completeness, chisq, - grain_params): - assert len(grain_params) == 12, \ - "len(grain_params) must be 12, not %d" % len(grain_params) + def dump_grain(self, grain_id, completeness, chisq, grain_params): + assert ( + len(grain_params) == 12 + ), "len(grain_params) must be 12, not %d" % len(grain_params) # extract strain emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) evec = mutil.symmToVecMV(emat, scale=False) - res = [int(grain_id), completeness, chisq] \ - + grain_params.tolist() \ + res = ( + [int(grain_id), completeness, chisq] + + grain_params.tolist() + evec.tolist() + ) if self.array is not None: row = self._array_row - assert row < self.array.shape[0], \ - f'invalid row {row} in array table' + assert ( + row < self.array.shape[0] + ), f'invalid row {row} in array table' self.array[row] = res self._array_row += 1 return res # (else) format and write to file output_str = self._delim.join( - [self._delim.join( - ['{:<12d}', '{:<12f}', '{:<12e}'] - ).format(*res[:3]), - self._delim.join( - np.tile('{:<23.16e}', len(res) - 3) - ).format(*res[3:])] + [ + self._delim.join(['{:<12d}', '{:<12f}', '{:<12e}']).format( + *res[:3] + ), + self._delim.join(np.tile('{:<23.16e}', len(res) - 3)).format( + *res[3:] + ), + ] ) print(output_str, file=self.fid) return output_str @@ -2152,12 +2329,12 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): vinv_s = np.array(grain_params[6:]).flatten() vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) - if use_attr: # attribute version + if use_attr: # attribute version self.grain_grp.attrs.create('rmat_c', rmat_c) self.grain_grp.attrs.create('tvec_c', tvec_c) self.grain_grp.attrs.create('inv(V)_s', vinv_s) self.grain_grp.attrs.create('vmat_s', vmat_s) - else: # dataset version + else: # dataset version self.grain_grp.create_dataset('rmat_c', data=rmat_c) self.grain_grp.create_dataset('tvec_c', data=tvec_c) self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) @@ -2176,11 +2353,26 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): def close(self): self.fid.close() - def dump_patch(self, panel_id, - i_refl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, ome_centers, - xy_centers, ijs, frame_indices, - spot_data, pangs, pxy, mangs, mxy, gzip=1): + def dump_patch( + self, + panel_id, + i_refl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + ome_centers, + xy_centers, + ijs, + frame_indices, + spot_data, + pangs, + pxy, + mangs, + mxy, + gzip=1, + ): """ to be called inside loop over patches @@ -2196,10 +2388,10 @@ def dump_patch(self, panel_id, spot_grp.attrs.create('predicted_angles', pangs) spot_grp.attrs.create('predicted_xy', pxy) if mangs is None: - mangs = np.nan*np.ones(3) + mangs = np.nan * np.ones(3) spot_grp.attrs.create('measured_angles', mangs) if mxy is None: - mxy = np.nan*np.ones(3) + mxy = np.nan * np.ones(3) spot_grp.attrs.create('measured_xy', mxy) # get centers crds from edge arrays @@ -2218,27 +2410,55 @@ def dump_patch(self, panel_id, eta_crd = centers_of_edge_vec(eta_edges) shuffle_data = True # reduces size by 20% - spot_grp.create_dataset('tth_crd', data=tth_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('eta_crd', data=eta_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ome_crd', data=ome_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('xy_centers', data=xy_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ij_centers', data=ijs, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('frame_indices', data=fi, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('intensities', data=spot_data, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) + spot_grp.create_dataset( + 'tth_crd', + data=tth_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'eta_crd', + data=eta_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ome_crd', + data=ome_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'xy_centers', + data=xy_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ij_centers', + data=ijs, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'frame_indices', + data=fi, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'intensities', + data=spot_data, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) return @@ -2260,9 +2480,16 @@ class GenerateEtaOmeMaps(object): """ - def __init__(self, image_series_dict, instrument, plane_data, - active_hkls=None, eta_step=0.25, threshold=None, - ome_period=(0, 360)): + def __init__( + self, + image_series_dict, + instrument, + plane_data, + active_hkls=None, + eta_step=0.25, + threshold=None, + ome_period=(0, 360), + ): """ image_series must be OmegaImageSeries class instrument_params must be a dict (loaded from yaml spec) @@ -2276,13 +2503,12 @@ def __init__(self, image_series_dict, instrument, plane_data, # ???: change name of iHKLList? # ???: can we change the behavior of iHKLList? if active_hkls is None: - self._iHKLList = plane_data.getHKLID( - plane_data.hkls, master=True - ) + self._iHKLList = plane_data.getHKLID(plane_data.hkls, master=True) n_rings = len(self._iHKLList) else: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" self._iHKLList = active_hkls n_rings = len(active_hkls) @@ -2297,14 +2523,18 @@ def __init__(self, image_series_dict, instrument, plane_data, omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES delta_ome = omegas_array[0][-1] - omegas_array[0][0] frame_mask = None - ome_period = omegas_array[0, 0] + np.r_[0., 360.] # !!! be careful + ome_period = omegas_array[0, 0] + np.r_[0.0, 360.0] # !!! be careful if this_det_ims.omegawedges.nwedges > 1: - delta_omes = [(i['ostop'] - i['ostart'])/i['nsteps'] - for i in this_det_ims.omegawedges.wedges] - check_wedges = mutil.uniqueVectors(np.atleast_2d(delta_omes), - tol=1e-6).squeeze() - assert check_wedges.size == 1, \ - "all wedges must have the same delta omega to 1e-6" + delta_omes = [ + (i['ostop'] - i['ostart']) / i['nsteps'] + for i in this_det_ims.omegawedges.wedges + ] + check_wedges = mutil.uniqueVectors( + np.atleast_2d(delta_omes), tol=1e-6 + ).squeeze() + assert ( + check_wedges.size == 1 + ), "all wedges must have the same delta omega to 1e-6" # grab representative delta ome # !!! assuming positive delta consistent with OmegaImageSeries delta_ome = delta_omes[0] @@ -2320,9 +2550,9 @@ def __init__(self, image_series_dict, instrument, plane_data, ) # compute total nsteps # FIXME: need check for roundoff badness - nsteps = int((ostop - ostart)/delta_ome) + nsteps = int((ostop - ostart) / delta_ome) ome_edges_full = np.linspace( - ostart, ostop, num=nsteps+1, endpoint=True + ostart, ostop, num=nsteps + 1, endpoint=True ) omegas_array = np.vstack( [ome_edges_full[:-1], ome_edges_full[1:]] @@ -2333,15 +2563,21 @@ def __init__(self, image_series_dict, instrument, plane_data, # !!! this array has -1 outside a wedge # !!! again assuming the valid frame order increases monotonically frame_mask = np.array( - [this_det_ims.omega_to_frame(ome)[0] != -1 - for ome in ome_centers] + [ + this_det_ims.omega_to_frame(ome)[0] != -1 + for ome in ome_centers + ] ) # ???: need to pass a threshold? eta_mapping, etas = instrument.extract_polar_maps( - plane_data, image_series_dict, - active_hkls=active_hkls, threshold=threshold, - tth_tol=None, eta_tol=eta_step) + plane_data, + image_series_dict, + active_hkls=active_hkls, + threshold=threshold, + tth_tol=None, + eta_tol=eta_step, + ) # for convenience grab map shape from first map_shape = next(iter(eta_mapping.values())).shape[1:] @@ -2368,7 +2604,7 @@ def __init__(self, image_series_dict, instrument, plane_data, if frame_mask is not None: # !!! must expand row dimension to include # skipped omegas - tmp = np.ones((len(frame_mask), map_shape[1]))*np.nan + tmp = np.ones((len(frame_mask), map_shape[1])) * np.nan tmp[frame_mask, :] = full_map full_map = tmp data_store.append(full_map) @@ -2377,11 +2613,11 @@ def __init__(self, image_series_dict, instrument, plane_data, # set required attributes self._omegas = mapAngle( np.radians(np.average(omegas_array, axis=1)), - np.radians(ome_period) + np.radians(ome_period), ) self._omeEdges = mapAngle( np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), - np.radians(ome_period) + np.radians(ome_period), ) # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the @@ -2395,7 +2631,7 @@ def __init__(self, image_series_dict, instrument, plane_data, # WARNING: unlinke the omegas in imageseries metadata, # these are in RADIANS and represent bin centers self._etaEdges = etas - self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + self._etas = self._etaEdges[:-1] + 0.5 * np.radians(eta_step) @property def dataStore(self): @@ -2431,9 +2667,7 @@ def save(self, filename): def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): # mark pixels in the spec'd tth range - pixels_in_tthr = np.logical_and( - ptth >= tthr[0], ptth <= tthr[1] - ) + pixels_in_tthr = np.logical_and(ptth >= tthr[0], ptth <= tthr[1]) # catch case where ring isn't on detector if not np.any(pixels_in_tthr): @@ -2450,8 +2684,7 @@ def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): def run_fast_histogram(x, bins, weights=None): - return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), - weights=weights) + return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), weights=weights) def run_numpy_histogram(x, bins, weights=None): @@ -2469,7 +2702,7 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): if threshold is not None: # !!! NaNs get preserved image = np.array(image) - image[image < threshold] = 0. + image[image < threshold] = 0.0 for i_r, tthr in enumerate(tth_ranges): this_map = ring_maps[i_r] @@ -2486,12 +2719,21 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): this_map[i_row, bins_on_detector] = result[bins_on_detector] -def _extract_detector_line_positions(iter_args, plane_data, tth_tol, - eta_tol, eta_centers, npdiv, - collapse_tth, collapse_eta, - do_interpolation, do_fitting, - fitting_kwargs, tth_distortion, - max_workers): +def _extract_detector_line_positions( + iter_args, + plane_data, + tth_tol, + eta_tol, + eta_centers, + npdiv, + collapse_tth, + collapse_eta, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, + max_workers, +): panel, instr_cfg, images, pbp = iter_args if images.ndim == 2: @@ -2506,9 +2748,13 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, tth_distr_cls = tth_distortion[panel.name] pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( - plane_data, merge_hkls=True, - delta_tth=tth_tol, delta_eta=eta_tol, - eta_list=eta_centers, tth_distortion=tth_distr_cls) + plane_data, + merge_hkls=True, + delta_tth=tth_tol, + delta_eta=eta_tol, + eta_list=eta_centers, + tth_distortion=tth_distr_cls, + ) tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) @@ -2523,8 +2769,9 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, # ================================================================= # LOOP OVER RING SETS # ================================================================= - pbar_rings = partial(tqdm, total=len(pow_angs), desc="Ringset", - position=pbp) + pbar_rings = partial( + tqdm, total=len(pow_angs), desc="Ringset", position=pbp + ) kwargs = { 'instr_cfg': instr_cfg, @@ -2541,15 +2788,26 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, } func = partial(_extract_ring_line_positions, **kwargs) iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=max_workers) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=max_workers + ) as executor: return list(pbar_rings(executor.map(func, iter_arg))) -def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, - collapse_tth, collapse_eta, images, - do_interpolation, do_fitting, fitting_kwargs, - tth_distortion): +def _extract_ring_line_positions( + iter_args, + instr_cfg, + panel, + eta_tol, + npdiv, + collapse_tth, + collapse_eta, + images, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, +): """ Extracts data for a single Debye-Scherrer ring . @@ -2597,16 +2855,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) nan_mask = np.logical_or.reduce(nan_mask, 1) if angs.ndim > 1 and xys.ndim > 1: - angs = angs[nan_mask,:] - xys = xys[nan_mask, :] + angs = angs[nan_mask, :] + xys = xys[nan_mask, :] n_images = len(images) native_area = panel.pixel_area # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( - instr_cfg, angs, panel.angularPixelSize(xys), - tth_tol=tth_tol, eta_tol=eta_tol, npdiv=npdiv, quiet=True) + instr_cfg, + angs, + panel.angularPixelSize(xys), + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + quiet=True, + ) # loop over patches # FIXME: fix initialization @@ -2619,9 +2883,7 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch # need to reshape eval pts for interpolation - xy_eval = np.vstack([ - xys_eval[0].flatten(), - xys_eval[1].flatten()]).T + xy_eval = np.vstack([xys_eval[0].flatten(), xys_eval[1].flatten()]).T _, on_panel = panel.clip_to_panel(xy_eval) @@ -2629,25 +2891,20 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, continue if collapse_tth: - ang_data = (vtx_angs[0][0, [0, -1]], - vtx_angs[1][[0, -1], 0]) + ang_data = (vtx_angs[0][0, [0, -1]], vtx_angs[1][[0, -1], 0]) elif collapse_eta: # !!! yield the tth bin centers tth_centers = np.average( - np.vstack( - [vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]] - ), - axis=0 + np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]), axis=0 ) - ang_data = (tth_centers, - angs[i_p][-1]) + ang_data = (tth_centers, angs[i_p][-1]) if do_fitting: fit_data = [] else: ang_data = vtx_angs prows, pcols = areas.shape - area_fac = areas/float(native_area) + area_fac = areas / float(native_area) # interpolate if not collapse_tth: @@ -2656,19 +2913,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, # catch interpolation type image = images[j_p] if do_interpolation: - p_img = panel.interpolate_bilinear( + p_img = ( + panel.interpolate_bilinear( xy_eval, image, - ).reshape(prows, pcols)*area_fac + ).reshape(prows, pcols) + * area_fac + ) else: - p_img = image[ijs[0], ijs[1]]*area_fac + p_img = image[ijs[0], ijs[1]] * area_fac # catch flat spectrum data, which will cause # fitting to fail. # ???: best here, or make fitting handle it? mxval = np.max(p_img) mnval = np.min(p_img) - if mxval == 0 or (1. - mnval/mxval) < 0.01: + if mxval == 0 or (1.0 - mnval / mxval) < 0.01: continue # catch collapsing options @@ -2685,11 +2945,16 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, tmp = tth_distortion.apply( panel.angles_to_cart( np.vstack( - [np.radians(this_tth0), - np.tile(ang_data[-1], len(this_tth0))] + [ + np.radians(this_tth0), + np.tile( + ang_data[-1], len(this_tth0) + ), + ] ).T ), - return_nominal=True) + return_nominal=True, + ) pk_centers = np.degrees(tmp[:, 0]) else: pk_centers = this_tth0 diff --git a/hexrd/core/instrument/physics_package.py b/hexrd/core/instrument/physics_package.py index 7b77f5e10..e0af72b8f 100644 --- a/hexrd/core/instrument/physics_package.py +++ b/hexrd/core/instrument/physics_package.py @@ -43,22 +43,24 @@ class AbstractPhysicsPackage: Readout models for BaFBr0.85I0.15:Eu image plates Rev. Sci. Instrum. 89, 063101 (2018 """ + # Abstract methods that must be redefined in derived classes @property @abstractmethod def type(self): pass - def __init__(self, - sample_material=None, - sample_density=None, - sample_thickness=None, - pinhole_material=None, - pinhole_density=None, - pinhole_thickness=None, - pinhole_diameter=None, - **kwargs - ): + def __init__( + self, + sample_material=None, + sample_density=None, + sample_thickness=None, + pinhole_material=None, + pinhole_density=None, + pinhole_thickness=None, + pinhole_diameter=None, + **kwargs, + ): self._sample_material = sample_material self._sample_density = sample_density self._sample_thickness = sample_thickness @@ -164,20 +166,23 @@ def absorption_length(self, energy, flag): energy_inp = energy if flag.lower() == 'sample': - args = (self.sample_density, - self.sample_material, - energy_inp, - ) + args = ( + self.sample_density, + self.sample_material, + energy_inp, + ) elif flag.lower() == 'window': - args = (self.window_density, - self.window_material, - energy_inp, - ) + args = ( + self.window_density, + self.window_material, + energy_inp, + ) elif flag.lower() == 'pinhole': - args = (self.pinhole_density, - self.pinhole_material, - energy_inp, - ) + args = ( + self.pinhole_density, + self.pinhole_material, + energy_inp, + ) abs_length = calculate_linear_absorption_length(*args) if abs_length.shape[0] == 1: return abs_length[0] @@ -285,8 +290,10 @@ def sample_diameter(self): if self.sample_geometry == 'cylinder': return self._sample_thickness else: - msg = (f'sample geometry does not have diameter ' - f'associated with it.') + msg = ( + f'sample geometry does not have diameter ' + f'associated with it.' + ) print(msg) return diff --git a/hexrd/core/instrument/planar_detector.py b/hexrd/core/instrument/planar_detector.py index fe0aa104a..749dd9ef2 100644 --- a/hexrd/core/instrument/planar_detector.py +++ b/hexrd/core/instrument/planar_detector.py @@ -1,10 +1,21 @@ import numpy as np from hexrd.core import constants as ct -from hexrd.core.transforms.xfcapi import angles_to_gvec, xy_to_gvec, gvec_to_xy, make_beam_rmat, angles_to_dvec +from hexrd.core.transforms.xfcapi import ( + angles_to_gvec, + xy_to_gvec, + gvec_to_xy, + make_beam_rmat, + angles_to_dvec, +) from hexrd.core.utils.decorators import memoize -from .detector import Detector, _solid_angle_of_triangle, _row_edge_vec, _col_edge_vec +from .detector import ( + Detector, + _solid_angle_of_triangle, + _row_edge_vec, + _col_edge_vec, +) from functools import partial from hexrd.core.gridutil import cellConnectivity @@ -22,10 +33,14 @@ def __init__(self, **detector_kwargs): def detector_type(self): return 'planar' - def cart_to_angles(self, xy_data, - rmat_s=None, - tvec_s=None, tvec_c=None, - apply_distortion=False): + def cart_to_angles( + self, + xy_data, + rmat_s=None, + tvec_s=None, + tvec_c=None, + apply_distortion=False, + ): if rmat_s is None: rmat_s = ct.identity_3x3 if tvec_s is None: @@ -36,16 +51,26 @@ def cart_to_angles(self, xy_data, xy_data = self.distortion.apply(xy_data) rmat_b = make_beam_rmat(self.bvec, self.evec) angs, g_vec = xy_to_gvec( - xy_data, self.rmat, rmat_s, - self.tvec, tvec_s, tvec_c, - rmat_b=rmat_b) + xy_data, + self.rmat, + rmat_s, + self.tvec, + tvec_s, + tvec_c, + rmat_b=rmat_b, + ) tth_eta = np.vstack([angs[0], angs[1]]).T return tth_eta, g_vec - def angles_to_cart(self, tth_eta, - rmat_s=None, tvec_s=None, - rmat_c=None, tvec_c=None, - apply_distortion=False): + def angles_to_cart( + self, + tth_eta, + rmat_s=None, + tvec_s=None, + rmat_c=None, + tvec_c=None, + apply_distortion=False, + ): if rmat_s is None: rmat_s = ct.identity_3x3 if tvec_s is None: @@ -63,13 +88,19 @@ def angles_to_cart(self, tth_eta, ome = np.arccos(rmat_s[0, 0]) angs = np.hstack([tth_eta, np.tile(ome, (len(tth_eta), 1))]) - gvec = angles_to_gvec(angs, beam_vec=self.bvec, eta_vec=self.evec, - chi=chi) + gvec = angles_to_gvec( + angs, beam_vec=self.bvec, eta_vec=self.evec, chi=chi + ) xy_det = gvec_to_xy( gvec, - self.rmat, rmat_s, rmat_c, - self.tvec, tvec_s, tvec_c, - beam_vec=self.bvec) + self.rmat, + rmat_s, + rmat_c, + self.tvec, + tvec_s, + tvec_c, + beam_vec=self.bvec, + ) if apply_distortion and self.distortion is not None: xy_det = self.distortion.apply_inverse(xy_det) return xy_det @@ -80,21 +111,47 @@ def cart_to_dvecs(self, xy_data): return np.dot(crds, self.rmat.T) + self.tvec def pixel_angles(self, origin=ct.zeros_3): - return _pixel_angles(origin, self.pixel_coords, self.distortion, - self.rmat, self.tvec, self.bvec, self.evec, - self.rows, self.cols) + return _pixel_angles( + origin, + self.pixel_coords, + self.distortion, + self.rmat, + self.tvec, + self.bvec, + self.evec, + self.rows, + self.cols, + ) def pixel_tth_gradient(self, origin=ct.zeros_3): - return _pixel_tth_gradient(origin, self.pixel_coords, self.distortion, - self.rmat, self.tvec, self.bvec, self.evec, - self.rows, self.cols) + return _pixel_tth_gradient( + origin, + self.pixel_coords, + self.distortion, + self.rmat, + self.tvec, + self.bvec, + self.evec, + self.rows, + self.cols, + ) def pixel_eta_gradient(self, origin=ct.zeros_3): - return _pixel_eta_gradient(origin, self.pixel_coords, self.distortion, - self.rmat, self.tvec, self.bvec, self.evec, - self.rows, self.cols) + return _pixel_eta_gradient( + origin, + self.pixel_coords, + self.distortion, + self.rmat, + self.tvec, + self.bvec, + self.evec, + self.rows, + self.cols, + ) - def calc_filter_coating_transmission(self, energy: np.floating) -> tuple[np.ndarray, np.ndarray]: + def calc_filter_coating_transmission( + self, energy: np.floating + ) -> tuple[np.ndarray, np.ndarray]: """ calculate thetrnasmission after x-ray beam interacts with the filter and the mylar polymer coating. @@ -117,26 +174,29 @@ def calc_filter_coating_transmission(self, energy: np.floating) -> tuple[np.ndar t_f = self.filter.thickness t_c = self.coating.thickness t_p = self.phosphor.thickness - L = self.phosphor.readout_length + L = self.phosphor.readout_length pre_U0 = self.phosphor.pre_U0 det_normal = -self.normal bvec = self.bvec tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) - secb = 1./np.dot(dvecs, det_normal).reshape(self.shape) + secb = 1.0 / np.dot(dvecs, det_normal).reshape(self.shape) - transmission_filter = self.calc_transmission_generic(secb, t_f, al_f) + transmission_filter = self.calc_transmission_generic(secb, t_f, al_f) transmission_coating = self.calc_transmission_generic(secb, t_c, al_c) - transmission_phosphor = ( - self.calc_transmission_phosphor(secb, t_p, al_p, L, energy, pre_U0)) + transmission_phosphor = self.calc_transmission_phosphor( + secb, t_p, al_p, L, energy, pre_U0 + ) transmission_filter_coating = ( - transmission_filter * transmission_coating) + transmission_filter * transmission_coating + ) return transmission_filter_coating, transmission_phosphor @@ -144,12 +204,9 @@ def calc_filter_coating_transmission(self, energy: np.floating) -> tuple[np.ndar def beam_position(self): output = np.nan * np.ones(2) b_dot_n = np.dot(self.bvec, self.normal) - if np.logical_and( - abs(b_dot_n) > ct.sqrt_epsf, - np.sign(b_dot_n) == -1 - ): + if np.logical_and(abs(b_dot_n) > ct.sqrt_epsf, np.sign(b_dot_n) == -1): u = np.dot(self.normal, self.tvec) / b_dot_n - p2_l = u*self.bvec + p2_l = u * self.bvec p2_d = np.dot(self.rmat.T, p2_l - self.tvec) output = p2_d[:2] return output @@ -182,24 +239,20 @@ def update_memoization_sizes(all_panels): @memoize -def _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, bvec, evec, - rows, cols): +def _pixel_angles( + origin, pixel_coords, distortion, rmat, tvec, bvec, evec, rows, cols +): assert len(origin) == 3, "origin must have 3 elements" pix_i, pix_j = pixel_coords - xy = np.ascontiguousarray( - np.vstack([ - pix_j.flatten(), pix_i.flatten() - ]).T - ) + xy = np.ascontiguousarray(np.vstack([pix_j.flatten(), pix_i.flatten()]).T) if distortion is not None: xy = distortion.apply(xy) rmat_b = make_beam_rmat(bvec, evec) angs, g_vec = xy_to_gvec( - xy, rmat, ct.identity_3x3, - tvec, ct.zeros_3, origin, - rmat_b=rmat_b) + xy, rmat, ct.identity_3x3, tvec, ct.zeros_3, origin, rmat_b=rmat_b + ) tth = angs[0].reshape(rows, cols) eta = angs[1].reshape(rows, cols) @@ -207,20 +260,24 @@ def _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, bvec, evec, @memoize -def _pixel_tth_gradient(origin, pixel_coords, distortion, rmat, tvec, bvec, - evec, rows, cols): +def _pixel_tth_gradient( + origin, pixel_coords, distortion, rmat, tvec, bvec, evec, rows, cols +): assert len(origin) == 3, "origin must have 3 elements" - ptth, _ = _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, - bvec, evec, rows, cols) + ptth, _ = _pixel_angles( + origin, pixel_coords, distortion, rmat, tvec, bvec, evec, rows, cols + ) return np.linalg.norm(np.stack(np.gradient(ptth)), axis=0) @memoize -def _pixel_eta_gradient(origin, pixel_coords, distortion, rmat, tvec, bvec, - evec, rows, cols): +def _pixel_eta_gradient( + origin, pixel_coords, distortion, rmat, tvec, bvec, evec, rows, cols +): assert len(origin) == 3, "origin must have 3 elemnts" - _, peta = _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, - bvec, evec, rows, cols) + _, peta = _pixel_angles( + origin, pixel_coords, distortion, rmat, tvec, bvec, evec, rows, cols + ) peta_grad_row = np.gradient(peta, axis=0) peta_grad_col = np.gradient(peta, axis=1) @@ -234,13 +291,13 @@ def _pixel_eta_gradient(origin, pixel_coords, distortion, rmat, tvec, bvec, def _fix_branch_cut_in_gradients(pgarray): return np.min( - np.abs(np.stack([pgarray - np.pi, pgarray, pgarray + np.pi])), - axis=0 + np.abs(np.stack([pgarray - np.pi, pgarray, pgarray + np.pi])), axis=0 ) -def _generate_pixel_solid_angles(start_stop, rows, cols, pixel_size_row, - pixel_size_col, rmat, tvec): +def _generate_pixel_solid_angles( + start_stop, rows, cols, pixel_size_row, pixel_size_col, rmat, tvec +): start, stop = start_stop row_edge_vec = _row_edge_vec(rows, pixel_size_row) col_edge_vec = _col_edge_vec(cols, pixel_size_col) @@ -250,10 +307,12 @@ def _generate_pixel_solid_angles(start_stop, rows, cols, pixel_size_row, pvy, pvx = np.meshgrid(row_edge_vec, col_edge_vec, indexing='ij') # add Z_d coord and transform to lab frame - pcrd_array_full = np.dot( - np.vstack([pvx.flatten(), pvy.flatten(), np.zeros(nvtx)]).T, - rmat.T - ) + tvec + pcrd_array_full = ( + np.dot( + np.vstack([pvx.flatten(), pvy.flatten(), np.zeros(nvtx)]).T, rmat.T + ) + + tvec + ) conn = cellConnectivity(rows, cols) @@ -262,15 +321,17 @@ def _generate_pixel_solid_angles(start_stop, rows, cols, pixel_size_row, for i, ipix in enumerate(range(start, stop)): pix_conn = conn[ipix] vtx_list = pcrd_array_full[pix_conn, :] - ret[i] = (_solid_angle_of_triangle(vtx_list[[0, 1, 2], :]) + - _solid_angle_of_triangle(vtx_list[[2, 3, 0], :])) + ret[i] = _solid_angle_of_triangle( + vtx_list[[0, 1, 2], :] + ) + _solid_angle_of_triangle(vtx_list[[2, 3, 0], :]) return ret @memoize -def _pixel_solid_angles(rows, cols, pixel_size_row, pixel_size_col, - rmat, tvec, max_workers): +def _pixel_solid_angles( + rows, cols, pixel_size_row, pixel_size_col, rmat, tvec, max_workers +): # connectivity array for pixels conn = cellConnectivity(rows, cols) @@ -288,8 +349,9 @@ def _pixel_solid_angles(rows, cols, pixel_size_row, pixel_size_col, 'tvec': tvec, } func = partial(_generate_pixel_solid_angles, **kwargs) - with ProcessPoolExecutor(mp_context=ct.mp_context, - max_workers=max_workers) as executor: + with ProcessPoolExecutor( + mp_context=ct.mp_context, max_workers=max_workers + ) as executor: results = executor.map(func, tasks) # Concatenate all the results together diff --git a/hexrd/core/material/__init__.py b/hexrd/core/material/__init__.py index b46571887..8ce0c9625 100644 --- a/hexrd/core/material/__init__.py +++ b/hexrd/core/material/__init__.py @@ -1 +1,7 @@ -from .material import _angstroms, _kev, load_materials_hdf5, Material, save_materials_hdf5 +from .material import ( + _angstroms, + _kev, + load_materials_hdf5, + Material, + save_materials_hdf5, +) diff --git a/hexrd/core/material/crystallography.py b/hexrd/core/material/crystallography.py index 482c625d9..29e621972 100644 --- a/hexrd/core/material/crystallography.py +++ b/hexrd/core/material/crystallography.py @@ -38,7 +38,13 @@ from hexrd.core.deprecation import deprecated from hexrd.core import constants from hexrd.core.matrixutil import unitVector -from hexrd.core.rotations import rotMatOfExpMap, mapAngle, applySym, ltypeOfLaueGroup, quatOfLaueGroup +from hexrd.core.rotations import ( + rotMatOfExpMap, + mapAngle, + applySym, + ltypeOfLaueGroup, + quatOfLaueGroup, +) from hexrd.core.transforms import xfcapi from hexrd.core import valunits from hexrd.core.valunits import toFloat @@ -159,6 +165,7 @@ def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' ).getVal(dUnit) + def latticeParameters(lvec): """ Generates direct and reciprocal lattice vector components in a @@ -186,6 +193,7 @@ def latticeParameters(lvec): return [a, b, c, alfa, beta, gama] + def latticePlanes( hkls: np.ndarray, lparms: np.ndarray, @@ -562,6 +570,7 @@ def latticeVectors( 'rparms': rparms, } + def hexagonalIndicesFromRhombohedral(hkl): """ converts rhombohedral hkl to hexagonal indices @@ -909,7 +918,7 @@ def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: elif len(exclusions.shape) == 2: # treat exclusions as ranges of indices for r in exclusions: - excl[self.tThSort[r[0]:r[1]]] = True + excl[self.tThSort[r[0] : r[1]]] = True else: raise RuntimeError( f'Unclear behavior for shape {exclusions.shape}' @@ -1844,8 +1853,10 @@ def get_exclusions(self): def set_exclusions(self, exclusions): self.exclusions = exclusions - @deprecated(new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", - removal_date="2025-08-01") + @deprecated( + new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", + removal_date="2025-08-01", + ) def getLatticeType(self): return ltypeOfLaueGroup(self.laueGroup) diff --git a/hexrd/core/material/jcpds.py b/hexrd/core/material/jcpds.py index 0affaa8c2..202b29639 100644 --- a/hexrd/core/material/jcpds.py +++ b/hexrd/core/material/jcpds.py @@ -2,7 +2,7 @@ import numpy as np -class JCPDS_extend(): +class JCPDS_extend: def __init__(self, filename=None): self.a0 = 0 self.b0 = 0 @@ -40,15 +40,15 @@ def read_file(self, file): # Construct base name = file without path and without extension name = os.path.splitext(os.path.basename(self.file))[0] self.name = name -# line = '', nd=0 - version = 0. + # line = '', nd=0 + version = 0.0 self.comments = [] self.DiffLines = [] version_status = '' inp = open(file, 'r').readlines() -# my_list = [] # get all the text first and throw into my_list + # my_list = [] # get all the text first and throw into my_list if inp[0][0] in ('2', '3', '4'): version = int(inp[0]) # JCPDS version number @@ -86,44 +86,44 @@ def read_file(self, file): a = float(item[0]) b = a c = a - alpha = 90. - beta = 90. - gamma = 90. + alpha = 90.0 + beta = 90.0 + gamma = 90.0 elif crystal_system == 7: # P, d-sp input a = float(item[0]) b = a c = a - alpha = 90. - beta = 90. - gamma = 90. + alpha = 90.0 + beta = 90.0 + gamma = 90.0 elif crystal_system == 2: # hexagonal a = float(item[0]) c = float(item[1]) b = a - alpha = 90. - beta = 90. - gamma = 120. + alpha = 90.0 + beta = 90.0 + gamma = 120.0 elif crystal_system == 3: # tetragonal a = float(item[0]) c = float(item[1]) b = a - alpha = 90. - beta = 90. - gamma = 90. + alpha = 90.0 + beta = 90.0 + gamma = 90.0 elif crystal_system == 4: # orthorhombic a = float(item[0]) b = float(item[1]) c = float(item[2]) - alpha = 90. - beta = 90. - gamma = 90. + alpha = 90.0 + beta = 90.0 + gamma = 90.0 elif crystal_system == 5: # monoclinic a = float(item[0]) b = float(item[1]) c = float(item[2]) beta = float(item[3]) - alpha = 90. - gamma = 90. + alpha = 90.0 + gamma = 90.0 elif crystal_system == 6: # triclinic a = float(item[0]) b = float(item[1]) @@ -142,7 +142,7 @@ def read_file(self, file): item = str.split(inp[4]) if self.version == 3: - alpha_t = 0. + alpha_t = 0.0 else: alpha_t = float(item[0]) self.alpha_t = alpha_t @@ -227,32 +227,32 @@ def read_file(self, file): if self.symmetry == 'cubic': self.b0 = self.a0 self.c0 = self.a0 - self.alpha0 = 90. - self.beta0 = 90. - self.gamma0 = 90. + self.alpha0 = 90.0 + self.beta0 = 90.0 + self.gamma0 = 90.0 elif self.symmetry == 'manual': self.b0 = self.a0 self.c0 = self.a0 - self.alpha0 = 90. - self.beta0 = 90. - self.gamma0 = 90. + self.alpha0 = 90.0 + self.beta0 = 90.0 + self.gamma0 = 90.0 elif self.symmetry == 'hexagonal' or self.symmetry == 'trigonal': self.b0 = self.a0 - self.alpha0 = 90. - self.beta0 = 90. - self.gamma0 = 120. + self.alpha0 = 90.0 + self.beta0 = 90.0 + self.gamma0 = 120.0 elif self.symmetry == 'tetragonal': self.b0 = self.a0 - self.alpha0 = 90. - self.beta0 = 90. - self.gamma0 = 90. + self.alpha0 = 90.0 + self.beta0 = 90.0 + self.gamma0 = 90.0 elif self.symmetry == 'orthorhombic': - self.alpha0 = 90. - self.beta0 = 90. - self.gamma0 = 90. + self.alpha0 = 90.0 + self.beta0 = 90.0 + self.gamma0 = 90.0 elif self.symmetry == 'monoclinic': - self.alpha0 = 90. - self.gamma0 = 90. + self.alpha0 = 90.0 + self.gamma0 = 90.0 # elif self.symmetry == 'triclinic': jcpdsfile.close() @@ -317,10 +317,9 @@ def calc_volume_unitcell(self): cb = np.cos(np.radians(self.beta0)) cg = np.cos(np.radians(self.gamma0)) - v0 = self.a0*self.b0*self.c0 - f = np.sqrt(1 - ca**2 - cb**2 - cg**2 - + 2 * ca * cb * cg) - return v0*f + v0 = self.a0 * self.b0 * self.c0 + f = np.sqrt(1 - ca**2 - cb**2 - cg**2 + 2 * ca * cb * cg) + return v0 * f class SymmetryMismatch(Exception): diff --git a/hexrd/core/material/material.py b/hexrd/core/material/material.py index 4500fdadb..e2081b2b2 100644 --- a/hexrd/core/material/material.py +++ b/hexrd/core/material/material.py @@ -46,7 +46,11 @@ import h5py from warnings import warn from hexrd.core.material.mksupport import Write2H5File -from hexrd.core.material.symbols import xtal_sys_dict, Hall_to_sgnum, HM_to_sgnum +from hexrd.core.material.symbols import ( + xtal_sys_dict, + Hall_to_sgnum, + HM_to_sgnum, +) from hexrd.core.utils.compatibility import h5py_read_string from hexrd.core.fitting.peakfunctions import _unit_gaussian @@ -80,6 +84,7 @@ def get_default_sgsetting(sgnum): else: return 0 + # # ---------------------------------------------------CLASS: Material # @@ -486,10 +491,12 @@ def pt_lp_factor(self): def lparms0(self): # Get the lattice parameters for 0 pressure and temperature (at v0) lparms = self.lparms - return np.array([ - *(lparms[:3] / self.pt_lp_factor), - *lparms[3:], - ]) + return np.array( + [ + *(lparms[:3] / self.pt_lp_factor), + *lparms[3:], + ] + ) def calc_pressure(self, volume=None, temperature=None): '''calculate the pressure given the volume @@ -903,13 +910,15 @@ def _readHDFxtal(self, fhdf=DFLT_NAME, xtal=DFLT_NAME): ''' self.pressure = 0 if 'pressure' in gid: - self.pressure = np.array(gid.get('pressure'), - dtype=np.float64).item() + self.pressure = np.array( + gid.get('pressure'), dtype=np.float64 + ).item() self.temperature = 298 if 'temperature' in gid: - self.temperature = np.array(gid.get('temperature'), - dtype=np.float64).item() + self.temperature = np.array( + gid.get('temperature'), dtype=np.float64 + ).item() self.k0 = 100.0 if 'k0' in gid: @@ -949,8 +958,9 @@ def _readHDFxtal(self, fhdf=DFLT_NAME, xtal=DFLT_NAME): if 'dalpha_t_dt' in gid: # this is the temperature derivation of # the pressure derivative of isotropic bulk modulus - dalpha_t_dt = np.array(gid.get('dalpha_t_dt'), - dtype=np.float64).item() + dalpha_t_dt = np.array( + gid.get('dalpha_t_dt'), dtype=np.float64 + ).item() self.dalpha_t_dt = dalpha_t_dt '''Finished with the BM EOS @@ -1098,11 +1108,13 @@ def lparms(self): def lparms(self, v): # Assume we are in `nm`, since that is what self.lparms returns. # Convert to angstroms and set with latticeParameters - self.latticeParameters = np.array([ - # Convert to angstroms - *(v[:3] * 10), - *v[3:], - ]) + self.latticeParameters = np.array( + [ + # Convert to angstroms + *(v[:3] * 10), + *v[3:], + ] + ) @property def latticeType(self): @@ -1160,8 +1172,7 @@ def sgsetting(self, val): if val in [0, 1]: self._sgsetting = val else: - msg = (f'space group setting must be either 0' - f' or 1.') + msg = f'space group setting must be either 0' f' or 1.' raise ValueError(msg) sgnum = property(_get_sgnum, _set_sgnum, None, "Space group number") @@ -1443,8 +1454,7 @@ def loadMaterialList(cfgFile): return matList -def load_materials_hdf5( - f, dmin=None, kev=None): +def load_materials_hdf5(f, dmin=None, kev=None): """Load materials from an HDF5 file The file uses the HDF5 file format. diff --git a/hexrd/core/material/mksupport.py b/hexrd/core/material/mksupport.py index 17354b39f..9d40712c9 100644 --- a/hexrd/core/material/mksupport.py +++ b/hexrd/core/material/mksupport.py @@ -1,4 +1,11 @@ -from hexrd.core.material.symbols import pstr_Elements, two_origin_choice, PrintPossibleSG, TRIG, pstr_spacegroup, pstr_mkxtal +from hexrd.core.material.symbols import ( + pstr_Elements, + two_origin_choice, + PrintPossibleSG, + TRIG, + pstr_spacegroup, + pstr_mkxtal, +) import h5py import os import numpy as np @@ -21,9 +28,15 @@ def mk(filename, xtalname): space_group, iset = GetSpaceGroup(xtal_sys, bool_trigonal, bool_hexset) AtomInfo = GetAtomInfo() - AtomInfo.update({'file': filename, 'xtalname': xtalname, - 'xtal_sys': xtal_sys, 'SG': space_group, - 'SGsetting': iset}) + AtomInfo.update( + { + 'file': filename, + 'xtalname': xtalname, + 'xtal_sys': xtal_sys, + 'SG': space_group, + 'SGsetting': iset, + } + ) Write2H5File(AtomInfo, lat_param) @@ -31,41 +44,44 @@ def mk(filename, xtalname): def GetXtalSystem(): xtal_sys = input("Crystal System (1-7 use the legend above): ") - if(not xtal_sys.isdigit()): + if not xtal_sys.isdigit(): raise ValueError( "Invalid value. \ - Please enter valid number between 1-7 using the legend above.") + Please enter valid number between 1-7 using the legend above." + ) else: xtal_sys = int(xtal_sys) - if(xtal_sys < 1 or xtal_sys > 7): + if xtal_sys < 1 or xtal_sys > 7: raise ValueError( "Value outside range. \ - Please enter numbers between 1 and 7 using legend above") + Please enter numbers between 1 and 7 using legend above" + ) btrigonal = False bhexset = False - if(xtal_sys == 5): + if xtal_sys == 5: print(" 1. Hexagonal setting \n 2. Rhombohedral setting") hexset = input("(1/2)? : ") - if(not hexset.isdigit()): + if not hexset.isdigit(): raise ValueError("Invalid value.") else: hexset = int(hexset) - if(not hexset in [1, 2]): + if not hexset in [1, 2]: raise ValueError( - "Invalid value of integer. Only 1 or 2 is acceptable.") + "Invalid value of integer. Only 1 or 2 is acceptable." + ) btrigonal = True - if(hexset == 1): + if hexset == 1: bhexset = True xtal_sys = 4 # only temporarily set to 4 so that the correct # lattice parameter can be queried next - elif(hexset == 2): + elif hexset == 2: bhexset = False return xtal_sys, btrigonal, bhexset @@ -74,7 +90,7 @@ def GetXtalSystem(): def GetLatticeParameters(xtal_sys, bool_trigonal): a = input("a [nm] : ") - if(not a.replace('.', '', 1).isdigit()): + if not a.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: a = float(a) @@ -84,18 +100,24 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): alpha = 90.0 beta = 90.0 gamma = 90.0 - lat_param = {'a': a, 'b': b, 'c': c, - 'alpha': alpha, 'beta': beta, 'gamma': gamma} + lat_param = { + 'a': a, + 'b': b, + 'c': c, + 'alpha': alpha, + 'beta': beta, + 'gamma': gamma, + } # cubic symmetry - if (xtal_sys == 1): + if xtal_sys == 1: pass # tetragonal symmetry - elif(xtal_sys == 2): + elif xtal_sys == 2: c = input("c [nm] : ") - if(not c.replace('.', '', 1).isdigit()): + if not c.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: c = float(c) @@ -103,15 +125,15 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): lat_param['c'] = c # orthorhombic symmetry - elif(xtal_sys == 3): + elif xtal_sys == 3: b = input("b [nm] : ") - if(not b.replace('.', '', 1).isdigit()): + if not b.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: b = float(b) c = input("c [nm] : ") - if(not c.replace('.', '', 1).isdigit()): + if not c.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: c = float(c) @@ -120,9 +142,9 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): lat_param['c'] = c # hexagonal system - elif(xtal_sys == 4): + elif xtal_sys == 4: c = input("c [nm] : ") - if(not c.replace('.', '', 1).isdigit()): + if not c.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: c = float(c) @@ -130,13 +152,13 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): lat_param['c'] = c lat_param['gamma'] = 120.0 - if(bool_trigonal): + if bool_trigonal: xtal_sys = 5 # rhombohedral system - elif(xtal_sys == 5): + elif xtal_sys == 5: alpha = input("alpha [deg] : ") - if(not alpha.replace('.', '', 1).isdigit()): + if not alpha.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: alpha = float(alpha) @@ -146,21 +168,21 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): lat_param['gamma'] = alpha # monoclinic system - elif(xtal_sys == 6): + elif xtal_sys == 6: b = input("b [nm] : ") - if(not b.replace('.', '', 1).isdigit()): + if not b.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: b = float(b) c = input("c [nm] : ") - if(not c.replace('.', '', 1).isdigit()): + if not c.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: c = float(c) beta = input("beta [deg] : ") - if(not beta.replace('.', '', 1).isdigit()): + if not beta.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: beta = float(beta) @@ -170,33 +192,33 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): lat_param['beta'] = beta # triclinic system - elif(xtal_sys == 7): + elif xtal_sys == 7: b = input("b [nm] : ") - if(not b.replace('.', '', 1).isdigit()): + if not b.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: b = float(b) c = input("c [nm] : ") - if(not c.replace('.', '', 1).isdigit()): + if not c.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: c = float(c) alpha = input("alpha [deg] : ") - if(not alpha.replace('.', '', 1).isdigit()): + if not alpha.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: alpha = float(alpha) beta = input("beta [deg] : ") - if(not beta.replace('.', '', 1).isdigit()): + if not beta.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: beta = float(beta) gamma = input("gamma [deg] : ") - if(not gamma.replace('.', '', 1).isdigit()): + if not gamma.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: gamma = float(gamma) @@ -213,58 +235,63 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): def GetSpaceGroup(xtal_sys, btrigonal, bhexset): - if(btrigonal): + if btrigonal: xtal_sys = 5 - if(btrigonal and (not bhexset)): + if btrigonal and (not bhexset): print("\n The space groups below correspond to the ") print("second (rhombohedral) setting.") print(" Please select one of these space groups.\n") for i in range(0, 7): pstr = str(TRIG[i]) + ":" + pstr_spacegroup[TRIG[i]] - if ((i + 1) % 4 == 0 or i == 6): + if (i + 1) % 4 == 0 or i == 6: print(pstr) else: print(pstr, end='') - print(50*"-"+"\n") + print(50 * "-" + "\n") else: sgmin, sgmax = PrintPossibleSG(xtal_sys) sg = input("Space group number (use legend above): ") - if(not sg.isdigit()): + if not sg.isdigit(): raise ValueError( "Invalid value. Please enter valid number between \ - 1 and 230 using the legend above.") + 1 and 230 using the legend above." + ) else: sg = int(sg) - if(btrigonal and (not bhexset)): - if(not sg in TRIG): + if btrigonal and (not bhexset): + if not sg in TRIG: raise ValueError( "Invalid space group entered. \ - Please use one of the space groups from the legend above") - if (sg == 146): + Please use one of the space groups from the legend above" + ) + if sg == 146: sg = 231 - if (sg == 148): + if sg == 148: sg = 232 - if (sg == 155): + if sg == 155: sg = 233 - if (sg == 160): + if sg == 160: sg = 234 - if (sg == 161): + if sg == 161: sg = 235 - if (sg == 166): + if sg == 166: sg = 236 - if (sg == 167): + if sg == 167: sg = 237 else: - if(sg < sgmin or sg > sgmax): + if sg < sgmin or sg > sgmax: raise ValueError( "Value outside range. Please enter numbers between \ - {} and {} using legend above".format(sgmin, sgmax)) + {} and {} using legend above".format( + sgmin, sgmax + ) + ) iset = SpaceGroupSetting(sg) @@ -274,25 +301,24 @@ def GetSpaceGroup(xtal_sys, btrigonal, bhexset): def SpaceGroupSetting(sgnum): iset = 1 - if(sgnum in two_origin_choice): + if sgnum in two_origin_choice: sitesym1 = two_origin_choice[sgnum][0] sitesym2 = two_origin_choice[sgnum][1] print(' ---------------------------------------------') print(' This space group has two origin settings.') - print(' The first setting has site symmetry : ' + - sitesym1) - print(' the second setting has site symmetry : ' + - sitesym2) + print(' The first setting has site symmetry : ' + sitesym1) + print(' the second setting has site symmetry : ' + sitesym2) iset = input(' Which setting do you wish to use (1/2) : ') - if(not iset.isdigit()): + if not iset.isdigit(): raise ValueError("Invalid integer value for atomic number.") else: iset = int(iset) print(iset) - if(not iset in [1, 2]): + if not iset in [1, 2]: raise ValueError(" Value entered for setting must be 1 or 2 !") - return iset-1 + return iset - 1 + def GetAtomInfo(): @@ -305,10 +331,10 @@ def GetAtomInfo(): stiffness = np.zeros([6, 6]) ques = 'y' - while(ques.strip().lower() == 'y' or ques.strip().lower() == 'yes'): + while ques.strip().lower() == 'y' or ques.strip().lower() == 'yes': tmp = input("Enter atomic number of species : ") - if(not tmp.isdigit()): + if not tmp.isdigit(): raise ValueError("Invalid integer value for atomic number.") else: tmp = int(tmp) @@ -326,32 +352,35 @@ def GetAsymmetricPositions(aniU): asym = input( "Enter asymmetric position of atom in unit cell \ - separated by comma (fractional coordinates) : ") + separated by comma (fractional coordinates) : " + ) asym = [x.strip() for x in asym.split(',')] for i, x in enumerate(asym): tmp = x.split('/') - if(len(tmp) == 2): - if(tmp[1].strip() != '0'): - asym[i] = str(float(tmp[0])/float(tmp[1])) + if len(tmp) == 2: + if tmp[1].strip() != '0': + asym[i] = str(float(tmp[0]) / float(tmp[1])) else: raise ValueError("Division by zero in fractional coordinates.") else: pass - if(len(asym) != 3): + if len(asym) != 3: raise ValueError("Need 3 coordinates in x,y,z fractional coordinates.") for i, x in enumerate(asym): - if(not x.replace('.', '', 1).isdigit()): + if not x.replace('.', '', 1).isdigit(): raise ValueError( - "Invalid floating point value in fractional coordinates.") + "Invalid floating point value in fractional coordinates." + ) else: asym[i] = float(x) - if(asym[i] < 0.0 or asym[i] >= 1.0): + if asym[i] < 0.0 or asym[i] >= 1.0: raise ValueError( " fractional coordinates only in the \ - range [0,1) i.e. 1 excluded") + range [0,1) i.e. 1 excluded" + ) occ, dw = GetOccDW(aniU=aniU) if isinstance(dw, float): @@ -366,73 +395,81 @@ def GetAsymmetricPositions(aniU): def GetOccDW(aniU=0): occ = input("Enter site occupation : ") - if(not occ.replace('.', '', 1).isdigit()): + if not occ.replace('.', '', 1).isdigit(): raise ValueError( - "Invalid floating point value in fractional coordinates.") + "Invalid floating point value in fractional coordinates." + ) else: occ = float(occ) - if(occ > 1.0 or occ <= 0.0): + if occ > 1.0 or occ <= 0.0: raise ValueError( - "site occupation can only in range (0,1.0] i.e. 0 excluded") + "site occupation can only in range (0,1.0] i.e. 0 excluded" + ) - if(aniU != 0): + if aniU != 0: ani = aniU else: ani = input( "Isotropic or anisotropic Debye-Waller factor? \n \ - 1 for isotropic, 2 for anisotropic : ") + 1 for isotropic, 2 for anisotropic : " + ) - if(not ani.isdigit()): + if not ani.isdigit(): raise ValueError("Invalid integer value for atomic number.") else: ani = int(ani) - if(ani == 1): + if ani == 1: dw = input("Enter isotropic Debye-Waller factor [nm^(-2)] : ") - if(not dw.replace('.', '', 1).isdigit()): + if not dw.replace('.', '', 1).isdigit(): raise ValueError( - "Invalid floating point value in fractional coordinates.") + "Invalid floating point value in fractional coordinates." + ) else: dw = float(dw) return [occ, dw] - elif(ani == 2): + elif ani == 2: - U = np.zeros([6, ]) + U = np.zeros( + [ + 6, + ] + ) U11 = input("Enter U11 [nm^2] : ") - if(not U11.replace('.', '', 1).isdigit()): + if not U11.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U11.") else: U[0] = float(U11) U22 = input("Enter U22 [nm^2] : ") - if(not U22.replace('.', '', 1).isdigit()): + if not U22.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U22.") else: U[1] = float(U22) U33 = input("Enter U33 [nm^2] : ") - if(not U33.replace('.', '', 1).isdigit()): + if not U33.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U33.") else: U[2] = float(U33) U12 = input("Enter U12 [nm^2] : ") - if(not U12.replace('.', '', 1).isdigit()): + if not U12.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U12.") else: U[3] = float(U12) U13 = input("Enter U13 [nm^2] : ") - if(not U13.replace('.', '', 1).isdigit()): + if not U13.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U13.") else: U[4] = float(U13) U23 = input("Enter U23 [nm^2] : ") - if(not U23.replace('.', '', 1).isdigit()): + if not U23.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U23.") else: U[5] = float(U23) @@ -442,6 +479,7 @@ def GetOccDW(aniU=0): else: raise ValueError("Invalid input. Only 1 or 2 acceptable.") + # write to H5 file @@ -453,10 +491,10 @@ def Write2H5File(AtomInfo, lat_param, path=None): # first check if file exists fexist = os.path.isfile(AtomInfo['file']) - if(fexist): + if fexist: fid = h5py.File(AtomInfo['file'], 'r+') else: - Warning('File doesn''t exist. creating it') + Warning('File doesn' 't exist. creating it') fid = h5py.File(AtomInfo['file'], 'x') close = True @@ -493,7 +531,8 @@ def WriteH5Data(fid, AtomInfo, lat_param, path=None): gid = fid.create_group(path) did = gid.create_dataset( - "Atomtypes", (len(AtomInfo['Z']), ), dtype=np.int32) + "Atomtypes", (len(AtomInfo['Z']),), dtype=np.int32 + ) did.write_direct(np.array(AtomInfo['Z'], dtype=np.int32)) did = gid.create_dataset("CrystalSystem", (1,), dtype=np.int32) @@ -547,9 +586,9 @@ def WriteH5Data(fid, AtomInfo, lat_param, path=None): if 'hkls' in AtomInfo: if AtomInfo['hkls'].shape != (0,): - did = gid.create_dataset("hkls", - AtomInfo['hkls'].shape, - dtype=np.int32) + did = gid.create_dataset( + "hkls", AtomInfo['hkls'].shape, dtype=np.int32 + ) did.write_direct(AtomInfo['hkls']) if 'dmin' in AtomInfo: @@ -561,7 +600,8 @@ def WriteH5Data(fid, AtomInfo, lat_param, path=None): did.write_direct(np.array(AtomInfo['kev'], dtype=np.float64)) did = gid.create_dataset( - "AtomData", (4, len(AtomInfo['Z'])), dtype=np.float64) + "AtomData", (4, len(AtomInfo['Z'])), dtype=np.float64 + ) # this is done for contiguous c-allocation arr = np.array(AtomInfo['APOS'], dtype=np.float32).transpose() arr2 = arr.copy() @@ -570,9 +610,9 @@ def WriteH5Data(fid, AtomInfo, lat_param, path=None): if 'charge' in AtomInfo: data = np.array(AtomInfo['charge'], dtype=object) dt = h5py.special_dtype(vlen=str) - did = gid.create_dataset("ChargeStates", - (len(AtomInfo['Z']),), - dtype=dt) + did = gid.create_dataset( + "ChargeStates", (len(AtomInfo['Z']),), dtype=dt + ) did.write_direct(data) ''' @@ -582,7 +622,8 @@ def WriteH5Data(fid, AtomInfo, lat_param, path=None): if not isinstance(AtomInfo['U'][0], np.floating): did = gid.create_dataset( - "U", (6, len(AtomInfo['Z'])), dtype=np.float64) + "U", (6, len(AtomInfo['Z'])), dtype=np.float64 + ) arr = np.array(AtomInfo['U'], dtype=np.float32).transpose() arr2 = arr.copy() did.write_direct(arr2) diff --git a/hexrd/core/material/spacegroup.py b/hexrd/core/material/spacegroup.py index 768227f2d..647f3777b 100644 --- a/hexrd/core/material/spacegroup.py +++ b/hexrd/core/material/spacegroup.py @@ -76,6 +76,7 @@ from hexrd.core.material import symbols, symmetry import numpy as np + # __all__ = ['SpaceGroup'] @@ -134,8 +135,7 @@ def _set_sgnum(self, v): self._pointGroup = pglg[0] self._laueGroup = pglg[1] - sgnum = property(_get_sgnum, _set_sgnum, None, - "Space group number") + sgnum = property(_get_sgnum, _set_sgnum, None, "Space group number") @property def laueGroup(self): @@ -161,7 +161,7 @@ def latticeType(self): Rhombohedral lattices are treated as trigonal using the "obverse" setting. -""" + """ return _ltDict[self.laueGroup] @property @@ -273,34 +273,38 @@ def _map_sg_info(hstr): laue_10 = 'th' laue_11 = 'oh' -_laue_international = {laue_1:"-1", -laue_2:"2/m", -laue_3:"mmm", -laue_4:"4/m", -laue_5:"4/mmm", -laue_6:"-3", -laue_7:"-3m", -laue_8:"6/m", -laue_9:"6/mmm", -laue_10:"m3", -laue_11:"m3m"} +_laue_international = { + laue_1: "-1", + laue_2: "2/m", + laue_3: "mmm", + laue_4: "4/m", + laue_5: "4/mmm", + laue_6: "-3", + laue_7: "-3m", + laue_8: "6/m", + laue_9: "6/mmm", + laue_10: "m3", + laue_11: "m3m", +} + -def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range +def _sgrange(min, max): + return tuple(range(min, max + 1)) # inclusive range _pgDict = { - _sgrange(1, 1): ('c1', laue_1), # Triclinic - _sgrange(2, 2): ('ci', laue_1), # laue 1 - _sgrange(3, 5): ('c2', laue_2), # Monoclinic - _sgrange(6, 9): ('cs', laue_2), - _sgrange(10, 15): ('c2h', laue_2), # laue 2 - _sgrange(16, 24): ('d2', laue_3), # Orthorhombic - _sgrange(25, 46): ('c2v', laue_3), - _sgrange(47, 74): ('d2h', laue_3), # laue 3 - _sgrange(75, 80): ('c4', laue_4), # Tetragonal - _sgrange(81, 82): ('s4', laue_4), - _sgrange(83, 88): ('c4h', laue_4), # laue 4 - _sgrange(89, 98): ('d4', laue_5), + _sgrange(1, 1): ('c1', laue_1), # Triclinic + _sgrange(2, 2): ('ci', laue_1), # laue 1 + _sgrange(3, 5): ('c2', laue_2), # Monoclinic + _sgrange(6, 9): ('cs', laue_2), + _sgrange(10, 15): ('c2h', laue_2), # laue 2 + _sgrange(16, 24): ('d2', laue_3), # Orthorhombic + _sgrange(25, 46): ('c2v', laue_3), + _sgrange(47, 74): ('d2h', laue_3), # laue 3 + _sgrange(75, 80): ('c4', laue_4), # Tetragonal + _sgrange(81, 82): ('s4', laue_4), + _sgrange(83, 88): ('c4h', laue_4), # laue 4 + _sgrange(89, 98): ('d4', laue_5), _sgrange(99, 110): ('c4v', laue_5), _sgrange(111, 122): ('d2d', laue_5), _sgrange(123, 142): ('d4h', laue_5), # laue 5 @@ -316,9 +320,9 @@ def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range _sgrange(183, 186): ('c6v', laue_9), _sgrange(187, 190): ('d3h', laue_9), _sgrange(191, 194): ('d6h', laue_9), # laue 9 - _sgrange(195, 199): ('t', laue_10), # Cubic + _sgrange(195, 199): ('t', laue_10), # Cubic _sgrange(200, 206): ('th', laue_10), # laue 10 - _sgrange(207, 214): ('o', laue_11), + _sgrange(207, 214): ('o', laue_11), _sgrange(215, 220): ('td', laue_11), _sgrange(221, 230): ('oh', laue_11), # laue 11 } @@ -347,7 +351,7 @@ def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range laue_8: ltype_6, laue_9: ltype_6, laue_10: ltype_7, - laue_11: ltype_7 + laue_11: ltype_7, } @@ -361,11 +365,11 @@ def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range ltype_1: (tuple(range(6)), lambda p: p), # all 6 # note beta ltype_2: ((0, 1, 2, 4), lambda p: (p[0], p[1], p[2], 90, p[3], 90)), - ltype_3: ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), - ltype_4: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), - ltype_5: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - ltype_6: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - ltype_7: ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), + ltype_3: ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), + ltype_4: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), + ltype_5: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + ltype_6: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + ltype_7: ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), } @@ -374,47 +378,53 @@ def Allowed_HKLs(sgnum, hkllist): this function checks if a particular g vector is allowed by lattice centering, screw axis or glide plane """ - sg_hmsymbol = symbols.pstr_spacegroup[sgnum-1].strip() + sg_hmsymbol = symbols.pstr_spacegroup[sgnum - 1].strip() symmorphic = False - if(sgnum in constants.sgnum_symmorphic): + if sgnum in constants.sgnum_symmorphic: symmorphic = True hkllist = np.atleast_2d(hkllist) centering = sg_hmsymbol[0] - if(centering == 'P'): + if centering == 'P': # all reflections are allowed - mask = np.ones([hkllist.shape[0], ], dtype=bool) - elif(centering == 'F'): + mask = np.ones( + [ + hkllist.shape[0], + ], + dtype=bool, + ) + elif centering == 'F': # same parity - seo = np.sum(np.mod(hkllist+100, 2), axis=1) + seo = np.sum(np.mod(hkllist + 100, 2), axis=1) mask = np.logical_not(np.logical_or(seo == 1, seo == 2)) - elif(centering == 'I'): + elif centering == 'I': # sum is even - seo = np.mod(np.sum(hkllist, axis=1)+100, 2) - mask = (seo == 0) - elif(centering == 'A'): + seo = np.mod(np.sum(hkllist, axis=1) + 100, 2) + mask = seo == 0 + elif centering == 'A': # k+l is even - seo = np.mod(np.sum(hkllist[:, 1:3], axis=1)+100, 2) + seo = np.mod(np.sum(hkllist[:, 1:3], axis=1) + 100, 2) mask = seo == 0 - elif(centering == 'B'): + elif centering == 'B': # h+l is even - seo = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 2) + seo = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 2) mask = seo == 0 - elif(centering == 'C'): + elif centering == 'C': # h+k is even - seo = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 2) + seo = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 2) mask = seo == 0 - elif(centering == 'R'): + elif centering == 'R': # -h+k+l is divisible by 3 - seo = np.mod(-hkllist[:, 0]+hkllist[:, 1]+hkllist[:, 2]+90, 3) + seo = np.mod(-hkllist[:, 0] + hkllist[:, 1] + hkllist[:, 2] + 90, 3) mask = seo == 0 else: raise RuntimeError( - 'IsGAllowed: unknown lattice centering encountered.') + 'IsGAllowed: unknown lattice centering encountered.' + ) hkls = hkllist[mask, :] - if(not symmorphic): + if not symmorphic: hkls = NonSymmorphicAbsences(sgnum, hkls) return hkls.astype(np.int32) @@ -434,118 +444,123 @@ def omitscrewaxisabsences(sgnum, hkllist, ax, iax): """ latticeType = symmetry.latticeType(sgnum) - if(latticeType == 'triclinic'): + if latticeType == 'triclinic': """ - no systematic absences for the triclinic crystals + no systematic absences for the triclinic crystals """ pass - elif(latticeType == 'monoclinic'): - if(ax != '2_1'): + elif latticeType == 'monoclinic': + if ax != '2_1': raise RuntimeError( 'omitscrewaxisabsences: monoclinic systems\ - can only have 2_1 screw axis') + can only have 2_1 screw axis' + ) """ only unique b-axis will be encoded it is the users responsibility to input lattice parameters in the standard setting with b-axis having the 2-fold symmetry """ - if(iax == 1): + if iax == 1: mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 2] == 0) - mask2 = np.mod(hkllist[:, 1]+100, 2) != 0 + mask2 = np.mod(hkllist[:, 1] + 100, 2) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] else: raise RuntimeError( 'omitscrewaxisabsences: only b-axis\ - can have 2_1 screw axis') + can have 2_1 screw axis' + ) - elif(latticeType == 'orthorhombic'): - if(ax != '2_1'): + elif latticeType == 'orthorhombic': + if ax != '2_1': raise RuntimeError( 'omitscrewaxisabsences: orthorhombic systems\ - can only have 2_1 screw axis') + can only have 2_1 screw axis' + ) """ 2_1 screw on primary axis h00 ; h = 2n """ - if(iax == 0): + if iax == 0: mask1 = np.logical_and(hkllist[:, 1] == 0, hkllist[:, 2] == 0) - mask2 = np.mod(hkllist[:, 0]+100, 2) != 0 + mask2 = np.mod(hkllist[:, 0] + 100, 2) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(iax == 1): + elif iax == 1: mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 2] == 0) - mask2 = np.mod(hkllist[:, 1]+100, 2) != 0 + mask2 = np.mod(hkllist[:, 1] + 100, 2) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(iax == 2): + elif iax == 2: mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 1] == 0) - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'tetragonal'): - if(iax == 0): + elif latticeType == 'tetragonal': + if iax == 0: mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 1] == 0) - if(ax == '4_2'): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(ax in ['4_1', '4_3']): - mask2 = np.mod(hkllist[:, 2]+100, 4) != 0 + if ax == '4_2': + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif ax in ['4_1', '4_3']: + mask2 = np.mod(hkllist[:, 2] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(iax == 1): + elif iax == 1: mask1 = np.logical_and(hkllist[:, 1] == 0, hkllist[:, 2] == 0) mask2 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 2] == 0) - if(ax == '2_1'): - mask3 = np.mod(hkllist[:, 0]+100, 2) != 0 - mask4 = np.mod(hkllist[:, 1]+100, 2) != 0 + if ax == '2_1': + mask3 = np.mod(hkllist[:, 0] + 100, 2) != 0 + mask4 = np.mod(hkllist[:, 1] + 100, 2) != 0 mask1 = np.logical_not(np.logical_and(mask1, mask3)) mask2 = np.logical_not(np.logical_and(mask2, mask4)) mask = ~np.logical_or(~mask1, ~mask2) hkllist = hkllist[mask, :] - elif(latticeType == 'trigonal'): + elif latticeType == 'trigonal': mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 1] == 0) - if(iax == 0): - if(ax in ['3_1', '3_2']): - mask2 = np.mod(hkllist[:, 2]+90, 3) != 0 + if iax == 0: + if ax in ['3_1', '3_2']: + mask2 = np.mod(hkllist[:, 2] + 90, 3) != 0 else: raise RuntimeError( 'omitscrewaxisabsences: trigonal \ - systems can only have screw axis') + systems can only have screw axis' + ) mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'hexagonal'): + elif latticeType == 'hexagonal': mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 1] == 0) - if(iax == 0): - if(ax == '6_3'): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(ax in ['3_1', '3_2', '6_2', '6_4']): - mask2 = np.mod(hkllist[:, 2]+90, 3) != 0 - elif(ax in ['6_1', '6_5']): - mask2 = np.mod(hkllist[:, 2]+120, 6) != 0 + if iax == 0: + if ax == '6_3': + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif ax in ['3_1', '3_2', '6_2', '6_4']: + mask2 = np.mod(hkllist[:, 2] + 90, 3) != 0 + elif ax in ['6_1', '6_5']: + mask2 = np.mod(hkllist[:, 2] + 120, 6) != 0 else: raise RuntimeError( 'omitscrewaxisabsences: hexagonal \ - systems can only have screw axis') + systems can only have screw axis' + ) mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'cubic'): + elif latticeType == 'cubic': mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 1] == 0) mask2 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 2] == 0) mask3 = np.logical_and(hkllist[:, 1] == 0, hkllist[:, 2] == 0) - if(ax in ['2_1', '4_2']): - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 - mask5 = np.mod(hkllist[:, 1]+100, 2) != 0 - mask6 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(ax in ['4_1', '4_3']): - mask4 = np.mod(hkllist[:, 2]+100, 4) != 0 - mask5 = np.mod(hkllist[:, 1]+100, 4) != 0 - mask6 = np.mod(hkllist[:, 0]+100, 4) != 0 + if ax in ['2_1', '4_2']: + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 + mask5 = np.mod(hkllist[:, 1] + 100, 2) != 0 + mask6 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif ax in ['4_1', '4_3']: + mask4 = np.mod(hkllist[:, 2] + 100, 4) != 0 + mask5 = np.mod(hkllist[:, 1] + 100, 4) != 0 + mask6 = np.mod(hkllist[:, 0] + 100, 4) != 0 mask1 = np.logical_not(np.logical_and(mask1, mask4)) mask2 = np.logical_not(np.logical_and(mask2, mask5)) mask3 = np.logical_not(np.logical_and(mask3, mask6)) @@ -569,229 +584,239 @@ def omitglideplaneabsences(sgnum, hkllist, plane, ip): """ latticeType = symmetry.latticeType(sgnum) - if(latticeType == 'triclinic'): + if latticeType == 'triclinic': pass - elif(latticeType == 'monoclinic'): - if(ip == 1): + elif latticeType == 'monoclinic': + if ip == 1: mask1 = hkllist[:, 1] == 0 - if(plane == 'c'): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(plane == 'a'): - mask2 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(plane == 'n'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 2) != 0 + if plane == 'c': + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif plane == 'a': + mask2 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif plane == 'n': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 2) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'orthorhombic'): - if(ip == 0): + elif latticeType == 'orthorhombic': + if ip == 0: mask1 = hkllist[:, 0] == 0 - if(plane == 'b'): - mask2 = np.mod(hkllist[:, 1]+100, 2) != 0 - elif(plane == 'c'): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(plane == 'n'): - mask2 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 2) != 0 - elif(plane == 'd'): - mask2 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 4) != 0 + if plane == 'b': + mask2 = np.mod(hkllist[:, 1] + 100, 2) != 0 + elif plane == 'c': + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif plane == 'n': + mask2 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 2) != 0 + elif plane == 'd': + mask2 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(ip == 1): + elif ip == 1: mask1 = hkllist[:, 1] == 0 - if(plane == 'c'): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(plane == 'a'): - mask2 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(plane == 'n'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 2) != 0 - elif(plane == 'd'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 4) != 0 + if plane == 'c': + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif plane == 'a': + mask2 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif plane == 'n': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 2) != 0 + elif plane == 'd': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(ip == 2): + elif ip == 2: mask1 = hkllist[:, 2] == 0 - if(plane == 'a'): - mask2 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(plane == 'b'): - mask2 = np.mod(hkllist[:, 1]+100, 2) != 0 - elif(plane == 'n'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 2) != 0 - elif(plane == 'd'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 4) != 0 + if plane == 'a': + mask2 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif plane == 'b': + mask2 = np.mod(hkllist[:, 1] + 100, 2) != 0 + elif plane == 'n': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 2) != 0 + elif plane == 'd': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'tetragonal'): - if(ip == 0): + elif latticeType == 'tetragonal': + if ip == 0: mask1 = hkllist[:, 2] == 0 - if(plane == 'a'): - mask2 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(plane == 'b'): - mask2 = np.mod(hkllist[:, 1]+100, 2) != 0 - elif(plane == 'n'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 2) != 0 - elif(plane == 'd'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 4) != 0 + if plane == 'a': + mask2 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif plane == 'b': + mask2 = np.mod(hkllist[:, 1] + 100, 2) != 0 + elif plane == 'n': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 2) != 0 + elif plane == 'd': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(ip == 1): + elif ip == 1: mask1 = hkllist[:, 0] == 0 mask2 = hkllist[:, 1] == 0 - if(plane in ['a', 'b']): - mask3 = np.mod(hkllist[:, 1]+100, 2) != 0 - mask4 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(plane == 'c'): - mask3 = np.mod(hkllist[:, 2]+100, 2) != 0 + if plane in ['a', 'b']: + mask3 = np.mod(hkllist[:, 1] + 100, 2) != 0 + mask4 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif plane == 'c': + mask3 = np.mod(hkllist[:, 2] + 100, 2) != 0 mask4 = mask3 - elif(plane == 'n'): - mask3 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 2) != 0 - mask4 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 2) != 0 - elif(plane == 'd'): - mask3 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 4) != 0 - mask4 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 4) != 0 + elif plane == 'n': + mask3 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 2) != 0 + mask4 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 2) != 0 + elif plane == 'd': + mask3 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 4) != 0 + mask4 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 4) != 0 mask1 = np.logical_not(np.logical_and(mask1, mask3)) mask2 = np.logical_not(np.logical_and(mask2, mask4)) mask = ~np.logical_or(~mask1, ~mask2) hkllist = hkllist[mask, :] - elif(ip == 2): + elif ip == 2: mask1 = np.abs(hkllist[:, 0]) == np.abs(hkllist[:, 1]) - if(plane in ['c', 'n']): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(plane == 'd'): - mask2 = np.mod(2*hkllist[:, 0]+hkllist[:, 2]+100, 4) != 0 + if plane in ['c', 'n']: + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif plane == 'd': + mask2 = np.mod(2 * hkllist[:, 0] + hkllist[:, 2] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'trigonal'): - if(plane != 'c'): + elif latticeType == 'trigonal': + if plane != 'c': raise RuntimeError( 'omitglideplaneabsences: only c-glide \ - allowed for trigonal systems') - if(ip == 1): + allowed for trigonal systems' + ) + if ip == 1: mask1 = hkllist[:, 0] == 0 mask2 = hkllist[:, 1] == 0 mask3 = hkllist[:, 0] == -hkllist[:, 1] - if(plane == 'c'): - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 + if plane == 'c': + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 else: raise RuntimeError( 'omitglideplaneabsences: only c-glide \ - allowed for trigonal systems') + allowed for trigonal systems' + ) - elif(ip == 2): + elif ip == 2: mask1 = hkllist[:, 1] == hkllist[:, 0] - mask2 = hkllist[:, 0] == -2*hkllist[:, 1] - mask3 = -2*hkllist[:, 0] == hkllist[:, 1] - if(plane == 'c'): - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 + mask2 = hkllist[:, 0] == -2 * hkllist[:, 1] + mask3 = -2 * hkllist[:, 0] == hkllist[:, 1] + if plane == 'c': + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 else: raise RuntimeError( 'omitglideplaneabsences: only c-glide \ - allowed for trigonal systems') + allowed for trigonal systems' + ) mask1 = np.logical_and(mask1, mask4) mask2 = np.logical_and(mask2, mask4) mask3 = np.logical_and(mask3, mask4) - mask = np.logical_not(np.logical_or( - mask1, np.logical_or(mask2, mask3))) + mask = np.logical_not( + np.logical_or(mask1, np.logical_or(mask2, mask3)) + ) hkllist = hkllist[mask, :] - elif(latticeType == 'hexagonal'): - if(plane != 'c'): + elif latticeType == 'hexagonal': + if plane != 'c': raise RuntimeError( 'omitglideplaneabsences: only c-glide \ - allowed for hexagonal systems') - if(ip == 2): + allowed for hexagonal systems' + ) + if ip == 2: mask1 = hkllist[:, 0] == hkllist[:, 1] - mask2 = hkllist[:, 0] == -2*hkllist[:, 1] - mask3 = -2*hkllist[:, 0] == hkllist[:, 1] - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 + mask2 = hkllist[:, 0] == -2 * hkllist[:, 1] + mask3 = -2 * hkllist[:, 0] == hkllist[:, 1] + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 mask1 = np.logical_and(mask1, mask4) mask2 = np.logical_and(mask2, mask4) mask3 = np.logical_and(mask3, mask4) - mask = np.logical_not(np.logical_or( - mask1, np.logical_or(mask2, mask3))) + mask = np.logical_not( + np.logical_or(mask1, np.logical_or(mask2, mask3)) + ) - elif(ip == 1): + elif ip == 1: mask1 = hkllist[:, 1] == 0 mask2 = hkllist[:, 0] == 0 mask3 = hkllist[:, 1] == -hkllist[:, 0] - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 mask1 = np.logical_and(mask1, mask4) mask2 = np.logical_and(mask2, mask4) mask3 = np.logical_and(mask3, mask4) - mask = np.logical_not(np.logical_or( - mask1, np.logical_or(mask2, mask3))) + mask = np.logical_not( + np.logical_or(mask1, np.logical_or(mask2, mask3)) + ) hkllist = hkllist[mask, :] - elif(latticeType == 'cubic'): - if(ip == 0): + elif latticeType == 'cubic': + if ip == 0: mask1 = hkllist[:, 0] == 0 mask2 = hkllist[:, 1] == 0 mask3 = hkllist[:, 2] == 0 - mask4 = np.mod(hkllist[:, 0]+100, 2) != 0 - mask5 = np.mod(hkllist[:, 1]+100, 2) != 0 - mask6 = np.mod(hkllist[:, 2]+100, 2) != 0 - if(plane == 'a'): - mask1 = np.logical_or(np.logical_and( - mask1, mask5), np.logical_and(mask1, mask6)) - mask2 = np.logical_or(np.logical_and( - mask2, mask4), np.logical_and(mask2, mask6)) + mask4 = np.mod(hkllist[:, 0] + 100, 2) != 0 + mask5 = np.mod(hkllist[:, 1] + 100, 2) != 0 + mask6 = np.mod(hkllist[:, 2] + 100, 2) != 0 + if plane == 'a': + mask1 = np.logical_or( + np.logical_and(mask1, mask5), np.logical_and(mask1, mask6) + ) + mask2 = np.logical_or( + np.logical_and(mask2, mask4), np.logical_and(mask2, mask6) + ) mask3 = np.logical_and(mask3, mask4) - mask = np.logical_not(np.logical_or( - mask1, np.logical_or(mask2, mask3))) - elif(plane == 'b'): + mask = np.logical_not( + np.logical_or(mask1, np.logical_or(mask2, mask3)) + ) + elif plane == 'b': mask1 = np.logical_and(mask1, mask5) mask3 = np.logical_and(mask3, mask5) mask = np.logical_not(np.logical_or(mask1, mask3)) - elif(plane == 'c'): + elif plane == 'c': mask1 = np.logical_and(mask1, mask6) mask2 = np.logical_and(mask2, mask6) mask = np.logical_not(np.logical_or(mask1, mask2)) - elif(plane == 'n'): - mask4 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 2) != 0 - mask5 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 2) != 0 - mask6 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 2) != 0 + elif plane == 'n': + mask4 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 2) != 0 + mask5 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 2) != 0 + mask6 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 2) != 0 mask1 = np.logical_not(np.logical_and(mask1, mask4)) mask2 = np.logical_not(np.logical_and(mask2, mask5)) mask3 = np.logical_not(np.logical_and(mask3, mask6)) - mask = ~np.logical_or( - ~mask1, np.logical_or(~mask2, ~mask3)) - elif(plane == 'd'): - mask4 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 4) != 0 - mask5 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 4) != 0 - mask6 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 4) != 0 + mask = ~np.logical_or(~mask1, np.logical_or(~mask2, ~mask3)) + elif plane == 'd': + mask4 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 4) != 0 + mask5 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 4) != 0 + mask6 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 4) != 0 mask1 = np.logical_not(np.logical_and(mask1, mask4)) mask2 = np.logical_not(np.logical_and(mask2, mask5)) mask3 = np.logical_not(np.logical_and(mask3, mask6)) - mask = ~np.logical_or( - ~mask1, np.logical_or(~mask2, ~mask3)) + mask = ~np.logical_or(~mask1, np.logical_or(~mask2, ~mask3)) else: raise RuntimeError( 'omitglideplaneabsences: unknown glide \ - plane encountered.') + plane encountered.' + ) hkllist = hkllist[mask, :] - if(ip == 2): + if ip == 2: mask1 = np.abs(hkllist[:, 0]) == np.abs(hkllist[:, 1]) mask2 = np.abs(hkllist[:, 1]) == np.abs(hkllist[:, 2]) mask3 = np.abs(hkllist[:, 0]) == np.abs(hkllist[:, 2]) - if(plane in ['a', 'b', 'c', 'n']): - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 - mask5 = np.mod(hkllist[:, 0]+100, 2) != 0 - mask6 = np.mod(hkllist[:, 1]+100, 2) != 0 - elif(plane == 'd'): - mask4 = np.mod(2*hkllist[:, 0]+hkllist[:, 2]+100, 4) != 0 - mask5 = np.mod(hkllist[:, 0]+2*hkllist[:, 1]+100, 4) != 0 - mask6 = np.mod(2*hkllist[:, 0]+hkllist[:, 1]+100, 4) != 0 + if plane in ['a', 'b', 'c', 'n']: + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 + mask5 = np.mod(hkllist[:, 0] + 100, 2) != 0 + mask6 = np.mod(hkllist[:, 1] + 100, 2) != 0 + elif plane == 'd': + mask4 = np.mod(2 * hkllist[:, 0] + hkllist[:, 2] + 100, 4) != 0 + mask5 = np.mod(hkllist[:, 0] + 2 * hkllist[:, 1] + 100, 4) != 0 + mask6 = np.mod(2 * hkllist[:, 0] + hkllist[:, 1] + 100, 4) != 0 else: raise RuntimeError( 'omitglideplaneabsences: unknown glide \ - plane encountered.') + plane encountered.' + ) mask1 = np.logical_not(np.logical_and(mask1, mask4)) mask2 = np.logical_not(np.logical_and(mask2, mask5)) mask3 = np.logical_not(np.logical_and(mask3, mask6)) @@ -808,14 +833,15 @@ def NonSymmorphicAbsences(sgnum, hkllist): """ planes = constants.SYS_AB[sgnum][0] for ip, p in enumerate(planes): - if(p != ''): + if p != '': hkllist = omitglideplaneabsences(sgnum, hkllist, p, ip) axes = constants.SYS_AB[sgnum][1] for iax, ax in enumerate(axes): - if(ax != ''): + if ax != '': hkllist = omitscrewaxisabsences(sgnum, hkllist, ax, iax) return hkllist + # # ================================================== HKL Enumeration # @@ -826,28 +852,32 @@ def _getHKLsBySS(ss): ss - (int) sum of squares -""" + """ + # # NOTE: the loop below could be speeded up by requiring # h >= k > = l, and then applying all permutations # and sign changes. Could possibly save up to # a factor of 48. # - def pmrange(n): return list(range(n, -(n+1), -1)) # plus/minus range - def iroot(n): return int(floor(sqrt(n))) # integer square root + def pmrange(n): + return list(range(n, -(n + 1), -1)) # plus/minus range + + def iroot(n): + return int(floor(sqrt(n))) # integer square root hkls = [] hmax = iroot(ss) for h in pmrange(hmax): - ss2 = ss - h*h + ss2 = ss - h * h kmax = iroot(ss2) for k in pmrange(kmax): - rem = ss2 - k*k + rem = ss2 - k * k if rem == 0: hkls.append((h, k, 0)) else: l = iroot(rem) - if l*l == rem: + if l * l == rem: hkls += [(h, k, l), (h, k, -l)] return hkls @@ -868,10 +898,12 @@ def testHKLs(): print('==================== Titanium (194)') ssmax = 20 myHKLs = sg.getHKLs(ssmax) - print('Number of HKLs with sum of square %d or less: %d' - % (ssmax, len(myHKLs))) + print( + 'Number of HKLs with sum of square %d or less: %d' + % (ssmax, len(myHKLs)) + ) for hkl in myHKLs: - ss = hkl[0]**2 + hkl[1]**2 + hkl[2]**2 + ss = hkl[0] ** 2 + hkl[1] ** 2 + hkl[2] ** 2 print((hkl, ss)) # @@ -881,10 +913,12 @@ def testHKLs(): print('==================== Ruby (167)') ssmax = 10 myHKLs = sg.getHKLs(ssmax) - print('Number of HKLs with sum of square %d or less: %d' - % (ssmax, len(myHKLs))) + print( + 'Number of HKLs with sum of square %d or less: %d' + % (ssmax, len(myHKLs)) + ) for hkl in myHKLs: - ss = hkl[0]**2 + hkl[1]**2 + hkl[2]**2 + ss = hkl[0] ** 2 + hkl[1] ** 2 + hkl[2] ** 2 print((hkl, ss)) # # Test Generic HKLs @@ -899,6 +933,7 @@ def testHKLs(): if __name__ == '__main__': # import sys + # if 'testHKLs' in sys.argv: testHKLs() diff --git a/hexrd/core/material/symbols.py b/hexrd/core/material/symbols.py index 637a5136a..45c55b292 100644 --- a/hexrd/core/material/symbols.py +++ b/hexrd/core/material/symbols.py @@ -1,4 +1,3 @@ - pstr_mkxtal = "\n\n This is a program to create a HDF5 file for storing crystallographic information.\n " pstr_mkxtal = pstr_mkxtal + " The following inputs are required:\n " pstr_mkxtal = pstr_mkxtal + " Crystal System:\n" @@ -10,19 +9,47 @@ pstr_mkxtal = pstr_mkxtal + " 6. Monoclinic\n" pstr_mkxtal = pstr_mkxtal + " 7. Triclinic\n\n" pstr_mkxtal = pstr_mkxtal + " Space group number\n" -pstr_mkxtal = pstr_mkxtal + " Atomic number (Z) for all species in unit cell\n" -pstr_mkxtal = pstr_mkxtal + " Asymmetric positions for all atoms in unit cell\n" -pstr_mkxtal = pstr_mkxtal + " Debye-Waller factors for all atoms in the unit cell\n" -pstr_mkxtal = pstr_mkxtal + " You'll be prompted for these values now\n\n" +pstr_mkxtal = ( + pstr_mkxtal + " Atomic number (Z) for all species in unit cell\n" +) +pstr_mkxtal = ( + pstr_mkxtal + " Asymmetric positions for all atoms in unit cell\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " Debye-Waller factors for all atoms in the unit cell\n" +) +pstr_mkxtal = ( + pstr_mkxtal + " You'll be prompted for these values now\n\n" +) pstr_mkxtal = pstr_mkxtal + "\n Note about the trigonal system:\n" pstr_mkxtal = pstr_mkxtal + " -------------------------------\n" -pstr_mkxtal = pstr_mkxtal + " Primitive trigonal crystals are defined with respect to a HEXAGONAL\n" -pstr_mkxtal = pstr_mkxtal + " reference frame. Rhombohedral crystals can be referenced with\n" -pstr_mkxtal = pstr_mkxtal + " respect to a HEXAGONAL basis (first setting), or with respect to\n" -pstr_mkxtal = pstr_mkxtal + " a RHOMBOHEDRAL basis (second setting). The default setting for\n" -pstr_mkxtal = pstr_mkxtal + " trigonal symmetry is the hexagonal setting. When you select\n" -pstr_mkxtal = pstr_mkxtal + " crystal system 5 above, you will be prompted for the setting. \n" +pstr_mkxtal = ( + pstr_mkxtal + + " Primitive trigonal crystals are defined with respect to a HEXAGONAL\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " reference frame. Rhombohedral crystals can be referenced with\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " respect to a HEXAGONAL basis (first setting), or with respect to\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " a RHOMBOHEDRAL basis (second setting). The default setting for\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " trigonal symmetry is the hexagonal setting. When you select\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " crystal system 5 above, you will be prompted for the setting. \n" +) +# fmt: off pstr_spacegroup = [ " P 1 ", " P -1 ", \ # MONOCLINIC SPACE GROUPS @@ -93,18 +120,30 @@ # TRIGONAL GROUPS RHOMBOHEDRAL SETTING " R 3 |146", " R -3 |148", " R 3 2 |155", " R 3 m |160", \ " R 3 c |161", " R -3 m|166", " R -3 c|167"] +# fmt: on - -xtal_dict = {1: 'cubic', 2: 'tetragonal', 3: 'orthorhombic', - 4: 'hexagonal', 5: 'trigonal', 6: 'monoclinic', - 7: 'triclinic'} -xtal_sys_dict = {'cubic': 1, 'tetragonal': 2, 'orthorhombic': 3, - 'hexagonal': 4, 'trigonal': 5, 'monoclinic': 6, - 'triclinic': 7} - +xtal_dict = { + 1: 'cubic', + 2: 'tetragonal', + 3: 'orthorhombic', + 4: 'hexagonal', + 5: 'trigonal', + 6: 'monoclinic', + 7: 'triclinic', +} +xtal_sys_dict = { + 'cubic': 1, + 'tetragonal': 2, + 'orthorhombic': 3, + 'hexagonal': 4, + 'trigonal': 5, + 'monoclinic': 6, + 'triclinic': 7, +} +# fmt: off pstr_pointgroup = [ ' 1', ' -1', ' 2', ' m', ' 2/m', ' 222', ' mm2', ' mmm', ' 4', ' -4', ' 4/m', ' 422', @@ -112,12 +151,13 @@ ' 3m', ' -3m', ' 6', ' -6', ' 6/m', ' 622', ' 6mm', ' -6m2', '6/mmm', ' 23', ' m3', ' 432', ' -43m', ' m-3m', ' 532', ' 822', ' 1022', ' 1222'] - +# fmt: on TRIG = [146, 148, 155, 160, 161, 166, 167] +# fmt: off # symbols and Z for all elements pstr_Elements = ' ------------------------------------ Periodic Table of the Elements --------------------------------------' + "\n" \ '1:H 2:He' + "\n" \ @@ -135,7 +175,7 @@ which have two origin choices the two values are the site symmetries of the origin. There are 24 such space groups''' - +# fmt: on two_origin_choice = { 48: ['222', '-1'], @@ -166,32 +206,32 @@ def PrintPossibleSG(xtal_sys): - if(xtal_sys == 1): + if xtal_sys == 1: sgmax = 230 sgmin = 195 - elif(xtal_sys == 2): + elif xtal_sys == 2: sgmax = 142 sgmin = 75 - elif(xtal_sys == 3): + elif xtal_sys == 3: sgmax = 74 sgmin = 16 - elif(xtal_sys == 4): + elif xtal_sys == 4: sgmax = 194 sgmin = 168 - elif(xtal_sys == 5): + elif xtal_sys == 5: sgmax = 167 sgmin = 143 - elif(xtal_sys == 6): + elif xtal_sys == 6: sgmax = 15 sgmin = 3 - elif(xtal_sys == 7): + elif xtal_sys == 7: sgmax = 2 sgmin = 1 - for i in range(sgmin, sgmax+1): + for i in range(sgmin, sgmax + 1): j = i - sgmin + 1 - pstr = str(i) + ":" + pstr_spacegroup[i-1] + "\t" - if(j % 4 == 0 or j == sgmax): + pstr = str(i) + ":" + pstr_spacegroup[i - 1] + "\t" + if j % 4 == 0 or j == sgmax: print(pstr) else: print(pstr, end='') @@ -1299,4 +1339,4 @@ def _buildDict(hstr): lookupHall, Hall_to_sgnum = _buildDict(HALL_STR) -lookupHM, HM_to_sgnum = _buildDict(HM_STR) +lookupHM, HM_to_sgnum = _buildDict(HM_STR) diff --git a/hexrd/core/material/symmetry.py b/hexrd/core/material/symmetry.py index 5cb5dfc0f..ad312ea9f 100644 --- a/hexrd/core/material/symmetry.py +++ b/hexrd/core/material/symmetry.py @@ -39,7 +39,11 @@ from hexrd.core.utils.decorators import memoize # Imports in case others are importing from here -from hexrd.core.rotations import toFundamentalRegion, ltypeOfLaueGroup, quatOfLaueGroup +from hexrd.core.rotations import ( + toFundamentalRegion, + ltypeOfLaueGroup, + quatOfLaueGroup, +) # ============================================================================= @@ -47,11 +51,11 @@ # ============================================================================= eps = constants.sqrt_epsf -sq3by2 = sqrt(3.)/2. -piby2 = pi/2. -piby3 = pi/3. -piby4 = pi/4. -piby6 = pi/6. +sq3by2 = sqrt(3.0) / 2.0 +piby2 = pi / 2.0 +piby3 = pi / 3.0 +piby4 = pi / 4.0 +piby6 = pi / 6.0 # ============================================================================= @@ -68,7 +72,7 @@ def GeneratorString(sgnum): ... and so on ''' - sg = sgnum-1 + sg = sgnum - 1 # sgdict = {146:231, 148:232, 155:233, 160:234, 161:235, 166:236, 167:237} # if(sgnum in sgdict): # sg = sgdict[sgnum]-1 @@ -86,17 +90,17 @@ def MakeGenerators(genstr, setting): centrosymmetric = False # check if space group has inversion symmetry - if(genstr[0] == '1'): + if genstr[0] == '1': t = 'hOOO' mat = SYM_fillgen(t) genmat = np.concatenate((genmat, mat)) centrosymmetric = True n = int(genstr[1]) - if(n > 0): + if n > 0: for i in range(n): istart = 2 + i * 4 - istop = 2 + (i+1) * 4 + istop = 2 + (i + 1) * 4 t = genstr[istart:istop] @@ -108,20 +112,20 @@ def MakeGenerators(genstr, setting): if there is an alternate setting for this space group check if the alternate setting needs to be used ''' - if(genstr[istop] != '0'): - if(setting != 0): - t = genstr[istop+1:istop+4] + if genstr[istop] != '0': + if setting != 0: + t = genstr[istop + 1 : istop + 4] t = 'a' + t # get the translation without any rotation sym = np.squeeze(SYM_fillgen(t, sgn=-1)) sym2 = np.squeeze(SYM_fillgen(t)) for i in range(1, genmat.shape[0]): - generator = np.dot(sym2, np.dot( - np.squeeze(genmat[i, :, :]), - sym)) + generator = np.dot( + sym2, np.dot(np.squeeze(genmat[i, :, :]), sym) + ) frac = np.modf(generator[0:3, 3])[0] - frac[frac < 0.] += 1. - frac[np.abs(frac) < 1E-5] = 0.0 - frac[np.abs(frac-1.0) < 1E-5] = 0.0 + frac[frac < 0.0] += 1.0 + frac[np.abs(frac) < 1e-5] = 0.0 + frac[np.abs(frac - 1.0) < 1e-5] = 0.0 generator[0:3, 3] = frac genmat[i, :, :] = generator @@ -130,13 +134,16 @@ def MakeGenerators(genstr, setting): def SYM_fillgen(t, sgn=1): mat = np.zeros([4, 4]) - mat[3, 3] = 1. + mat[3, 3] = 1.0 mat[0:3, 0:3] = constants.SYM_GENERATORS[t[0]] - mat[0:3, 3] = sgn*np.array([constants.SYM_GENERATORS[t[1]], - constants.SYM_GENERATORS[t[2]], - constants.SYM_GENERATORS[t[3]] - ]) + mat[0:3, 3] = sgn * np.array( + [ + constants.SYM_GENERATORS[t[1]], + constants.SYM_GENERATORS[t[2]], + constants.SYM_GENERATORS[t[3]], + ] + ) mat = np.broadcast_to(mat, [1, 4, 4]) return mat @@ -151,7 +158,7 @@ def GenerateSGSym(sgnum, setting=0): genstr = GeneratorString(sgnum) genmat, centrosymmetric = MakeGenerators(genstr, setting) symmorphic = False - if(sgnum in constants.sgnum_symmorphic): + if sgnum in constants.sgnum_symmorphic: symmorphic = True ''' use the generator string to get the rest of the @@ -179,17 +186,17 @@ def GenerateSGSym(sgnum, setting=0): # only fractional parts frac = np.modf(gnew[0:3, 3])[0] - frac[frac < 0.] += 1. - frac[np.abs(frac) < 1E-5] = 0.0 - frac[np.abs(frac-1.0) < 1E-5] = 0.0 + frac[frac < 0.0] += 1.0 + frac[np.abs(frac) < 1e-5] = 0.0 + frac[np.abs(frac - 1.0) < 1e-5] = 0.0 gnew[0:3, 3] = frac - if(isnew(gnew, SYM_SG)): + if isnew(gnew, SYM_SG): gnew = np.broadcast_to(gnew, [1, 4, 4]) SYM_SG = np.concatenate((SYM_SG, gnew)) nsym += 1 - if (nsym >= 192): + if nsym >= 192: k2 = nsym k1 = nsym @@ -200,7 +207,7 @@ def GenerateSGSym(sgnum, setting=0): SYM_PG_d_laue = GeneratePGSym_Laue(SYM_PG_d) for s in SYM_PG_d: - if(np.allclose(-np.eye(3), s)): + if np.allclose(-np.eye(3), s): centrosymmetric = True return SYM_SG, SYM_PG_d, SYM_PG_d_laue, centrosymmetric, symmorphic @@ -226,7 +233,7 @@ def GeneratePGSym(SYM_SG): g = SYM_SG[i, :, :] t = g[0:3, 3] g = g[0:3, 0:3] - if(isnew(g, SYM_PG_d)): + if isnew(g, SYM_PG_d): g = np.broadcast_to(g, [1, 3, 3]) SYM_PG_d = np.concatenate((SYM_PG_d, g)) @@ -246,7 +253,7 @@ def GeneratePGSym_Laue(SYM_PG_d): first check if the group already has the inversion symmetry ''' for s in SYM_PG_d: - if(np.allclose(s, -np.eye(3))): + if np.allclose(s, -np.eye(3)): return SYM_PG_d ''' @@ -270,12 +277,12 @@ def GeneratePGSym_Laue(SYM_PG_d): g2 = np.squeeze(SYM_PG_d_laue[k2, :, :]) gnew = np.dot(g1, g2) - if(isnew(gnew, SYM_PG_d_laue)): + if isnew(gnew, SYM_PG_d_laue): gnew = np.broadcast_to(gnew, [1, 3, 3]) SYM_PG_d_laue = np.concatenate((SYM_PG_d_laue, gnew)) nsym += 1 - if (nsym >= 48): + if nsym >= 48: k2 = nsym k1 = nsym @@ -296,19 +303,19 @@ def isnew(mat, sym_mats): def latticeType(sgnum): - if(sgnum <= 2): + if sgnum <= 2: return 'triclinic' - elif(sgnum > 2 and sgnum <= 15): + elif sgnum > 2 and sgnum <= 15: return 'monoclinic' - elif(sgnum > 15 and sgnum <= 74): + elif sgnum > 15 and sgnum <= 74: return 'orthorhombic' - elif(sgnum > 74 and sgnum <= 142): + elif sgnum > 74 and sgnum <= 142: return 'tetragonal' - elif(sgnum > 142 and sgnum <= 167): + elif sgnum > 142 and sgnum <= 167: return 'trigonal' - elif(sgnum > 167 and sgnum <= 194): + elif sgnum > 167 and sgnum <= 194: return 'hexagonal' - elif(sgnum > 194 and sgnum <= 230): + elif sgnum > 194 and sgnum <= 230: return 'cubic' else: raise RuntimeError('symmetry.latticeType: unknown space group number') @@ -325,7 +332,7 @@ def MakeGenerators_PGSYM(pggenstr): SYM_GEN_PG = np.zeros([ngen, 3, 3]) for i in range(ngen): - s = pggenstr[i+1] + s = pggenstr[i + 1] SYM_GEN_PG[i, :, :] = constants.SYM_GENERATORS[s] return SYM_GEN_PG @@ -358,18 +365,18 @@ def GeneratePGSYM(pgsym): g2 = np.squeeze(SYM_GEN_PG[k2, :, :]) gnew = np.dot(g1, g2) - if(isnew(gnew, SYM_GEN_PG)): + if isnew(gnew, SYM_GEN_PG): gnew = np.broadcast_to(gnew, [1, 3, 3]) SYM_GEN_PG = np.concatenate((SYM_GEN_PG, gnew)) nsym += 1 - if (nsym >= 48): + if nsym >= 48: k2 = nsym k1 = nsym k2 += 1 k1 += 1 - SYM_GEN_PG[np.abs(SYM_GEN_PG) < eps] = 0. + SYM_GEN_PG[np.abs(SYM_GEN_PG) < eps] = 0.0 return SYM_GEN_PG diff --git a/hexrd/core/material/unitcell.py b/hexrd/core/material/unitcell.py index 5904732e0..5edb9e97e 100644 --- a/hexrd/core/material/unitcell.py +++ b/hexrd/core/material/unitcell.py @@ -3,6 +3,7 @@ from numba import njit from hexrd.core import constants from hexrd.core.material import spacegroup, symbols, symmetry + # TODO: Resolve extra-core-dependency from hexrd.hedm.ipfcolor import sphere_sector, colorspace from hexrd.core.valunits import valWUnit @@ -40,7 +41,7 @@ def _calcstar(v, sym, mat): for vec in vsym: vv = vp - vec dist = _calclength(vv, mat) - if dist < 1E-3: + if dist < 1e-3: isnew = False break if isnew: @@ -51,7 +52,6 @@ def _calcstar(v, sym, mat): class unitcell: - ''' >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov >> @DATE: 10/09/2018 SS 1.0 original @@ -62,11 +62,18 @@ class unitcell: # initialize the unitcell class # need lattice parameters and space group data from HDF5 file - def __init__(self, lp, sgnum, - atomtypes, charge, - atominfo, - U, dmin, beamenergy, - sgsetting=0): + def __init__( + self, + lp, + sgnum, + atomtypes, + charge, + atominfo, + U, + dmin, + beamenergy, + sgsetting=0, + ): self._tstart = time.time() self.pref = 0.4178214 @@ -116,10 +123,12 @@ def GetPgLg(self): def CalcWavelength(self): # wavelength in nm - self.wavelength = constants.cPlanck * \ - constants.cLight / \ - constants.cCharge / \ - self.voltage + self.wavelength = ( + constants.cPlanck + * constants.cLight + / constants.cCharge + / self.voltage + ) self.wavelength *= 1e9 def calcBetaij(self): @@ -127,11 +136,11 @@ def calcBetaij(self): self.betaij = np.zeros([3, 3, self.atom_ntype]) for i in range(self.U.shape[0]): U = self.U[i, :] - self.betaij[:, :, i] = np.array([[U[0], U[3], U[4]], - [U[3], U[1], U[5]], - [U[4], U[5], U[2]]]) + self.betaij[:, :, i] = np.array( + [[U[0], U[3], U[4]], [U[3], U[1], U[5]], [U[4], U[5], U[2]]] + ) - self.betaij[:, :, i] *= 2. * np.pi**2 * self._aij + self.betaij[:, :, i] *= 2.0 * np.pi**2 * self._aij def calcmatrices(self): @@ -154,9 +163,13 @@ def calcmatrices(self): ''' direct metric tensor ''' - self._dmt = np.array([[a**2, a*b*cg, a*c*cb], - [a*b*cg, b**2, b*c*ca], - [a*c*cb, b*c*ca, c**2]]) + self._dmt = np.array( + [ + [a**2, a * b * cg, a * c * cb], + [a * b * cg, b**2, b * c * ca], + [a * c * cb, b * c * ca, c**2], + ] + ) self._vol = np.sqrt(np.linalg.det(self.dmt)) if self.vol < 1e-5: @@ -170,30 +183,44 @@ def calcmatrices(self): ''' direct structure matrix ''' - self._dsm = np.array([[a, b*cg, c*cb], - [0., b*sg, -c*(cb*cg - ca)/sg], - [0., 0., self.vol/(a*b*sg)]]) + self._dsm = np.array( + [ + [a, b * cg, c * cb], + [0.0, b * sg, -c * (cb * cg - ca) / sg], + [0.0, 0.0, self.vol / (a * b * sg)], + ] + ) - self._dsm[np.abs(self._dsm) < eps] = 0. + self._dsm[np.abs(self._dsm) < eps] = 0.0 ''' reciprocal structure matrix ''' - self._rsm = np.array([[1./a, 0., 0.], - [-1./(a*tg), 1./(b*sg), 0.], - [b*c*(cg*ca - cb)/(self.vol*sg), - a*c*(cb*cg - ca)/(self.vol*sg), - a*b*sg/self.vol]]) + self._rsm = np.array( + [ + [1.0 / a, 0.0, 0.0], + [-1.0 / (a * tg), 1.0 / (b * sg), 0.0], + [ + b * c * (cg * ca - cb) / (self.vol * sg), + a * c * (cb * cg - ca) / (self.vol * sg), + a * b * sg / self.vol, + ], + ] + ) - self._rsm[np.abs(self._rsm) < eps] = 0. + self._rsm[np.abs(self._rsm) < eps] = 0.0 ast = self.CalcLength([1, 0, 0], 'r') bst = self.CalcLength([0, 1, 0], 'r') cst = self.CalcLength([0, 0, 1], 'r') - self._aij = np.array([[ast**2, ast*bst, ast*cst], - [bst*ast, bst**2, bst*cst], - [cst*ast, cst*bst, cst**2]]) + self._aij = np.array( + [ + [ast**2, ast * bst, ast * cst], + [bst * ast, bst**2, bst * cst], + [cst * ast, cst * bst, cst**2], + ] + ) ''' transform between any crystal space to any other space. choices are 'd' (direct), 'r' (reciprocal) and 'c' (cartesian)''' @@ -208,7 +235,8 @@ def TransSpace(self, v_in, inspace, outspace): v_out = np.dot(self.dsm, v_in) else: raise ValueError( - 'inspace in "d" but outspace can\'t be identified') + 'inspace in "d" but outspace can\'t be identified' + ) elif inspace == 'r': if outspace == 'd': @@ -217,7 +245,8 @@ def TransSpace(self, v_in, inspace, outspace): v_out = np.dot(self.rsm, v_in) else: raise ValueError( - 'inspace in "r" but outspace can\'t be identified') + 'inspace in "r" but outspace can\'t be identified' + ) elif inspace == 'c': if outspace == 'r': @@ -226,7 +255,8 @@ def TransSpace(self, v_in, inspace, outspace): v_out = np.dot(v_in, self.rsm) else: raise ValueError( - 'inspace in "c" but outspace can\'t be identified') + 'inspace in "c" but outspace can\'t be identified' + ) else: raise ValueError('incorrect inspace argument') @@ -269,7 +299,7 @@ def CalcLength(self, u, space): def NormVec(self, u, space): ulen = self.CalcLength(u, space) - return u/ulen + return u / ulen ''' calculate angle between two vectors in any space''' @@ -278,7 +308,7 @@ def CalcAngle(self, u, v, space): ulen = self.CalcLength(u, space) vlen = self.CalcLength(v, space) - dot = self.CalcDot(u, v, space)/ulen/vlen + dot = self.CalcDot(u, v, space) / ulen / vlen if np.isclose(np.abs(dot), 1.0): dot = np.sign(dot) angle = np.arccos(dot) @@ -305,9 +335,13 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): else: vol = 1.0 - pxq = np.array([p[1]*q[2]-p[2]*q[1], - p[2]*q[0]-p[0]*q[2], - p[0]*q[1]-p[1]*q[0]]) + pxq = np.array( + [ + p[1] * q[2] - p[2] * q[1], + p[2] * q[0] - p[0] * q[2], + p[0] * q[1] - p[1] * q[0], + ] + ) if inspace == 'd': ''' @@ -324,7 +358,8 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): pxq = self.TransSpace(pxq, 'r', 'c') else: raise ValueError( - 'inspace is ''d'' but outspace is unidentified') + 'inspace is ' 'd' ' but outspace is unidentified' + ) elif inspace == 'r': ''' @@ -340,7 +375,8 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): pxq = self.TransSpace(pxq, 'd', 'c') else: raise ValueError( - 'inspace is ''r'' but outspace is unidentified') + 'inspace is ' 'r' ' but outspace is unidentified' + ) elif inspace == 'c': ''' @@ -356,7 +392,8 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): pass else: raise ValueError( - 'inspace is ''c'' but outspace is unidentified') + 'inspace is ' 'c' ' but outspace is unidentified' + ) else: raise ValueError('inspace is unidentified') @@ -399,16 +436,17 @@ def GenerateCartesianPGSym(self): self.SYM_PG_c.append(np.dot(self.dsm, np.dot(sop, self.rsm.T))) self.SYM_PG_c = np.array(self.SYM_PG_c) - self.SYM_PG_c[np.abs(self.SYM_PG_c) < eps] = 0. + self.SYM_PG_c[np.abs(self.SYM_PG_c) < eps] = 0.0 if self._pointGroup == self._laueGroup: self.SYM_PG_c_laue = self.SYM_PG_c else: for sop in self.SYM_PG_d_laue: self.SYM_PG_c_laue.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T))) + np.dot(self.dsm, np.dot(sop, self.rsm.T)) + ) self.SYM_PG_c_laue = np.array(self.SYM_PG_c_laue) - self.SYM_PG_c_laue[np.abs(self.SYM_PG_c_laue) < eps] = 0. + self.SYM_PG_c_laue[np.abs(self.SYM_PG_c_laue) < eps] = 0.0 ''' use the point group symmetry of the supergroup @@ -441,18 +479,21 @@ def GenerateCartesianPGSym(self): for sop in sym_supergroup: self.SYM_PG_supergroup.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T))) + np.dot(self.dsm, np.dot(sop, self.rsm.T)) + ) self.SYM_PG_supergroup = np.array(self.SYM_PG_supergroup) - self.SYM_PG_supergroup[np.abs(self.SYM_PG_supergroup) < eps] = 0. + self.SYM_PG_supergroup[np.abs(self.SYM_PG_supergroup) < eps] = 0.0 for sop in sym_supergroup_laue: self.SYM_PG_supergroup_laue.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T))) + np.dot(self.dsm, np.dot(sop, self.rsm.T)) + ) self.SYM_PG_supergroup_laue = np.array(self.SYM_PG_supergroup_laue) - self.SYM_PG_supergroup_laue[np.abs( - self.SYM_PG_supergroup_laue) < eps] = 0. + self.SYM_PG_supergroup_laue[ + np.abs(self.SYM_PG_supergroup_laue) < eps + ] = 0.0 ''' the standard setting for the monoclinic system has the b-axis aligned @@ -466,7 +507,7 @@ def GenerateCartesianPGSym(self): ''' if self.latticeType == 'monoclinic': - om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]]) + om = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]]) for i, s in enumerate(self.SYM_PG_c): ss = np.dot(om, np.dot(s, om.T)) @@ -483,7 +524,7 @@ def GenerateCartesianPGSym(self): SS 12/10/2020 ''' if self._pointGroup == 'c1': - om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]]) + om = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]]) for i, s in enumerate(self.SYM_PG_supergroup): ss = np.dot(om, np.dot(s, om.T)) @@ -512,7 +553,7 @@ def CalcOrbit(self, v, reduceToUC=True): raise RuntimeError("fractional coordinate in not 3-d") r = v # using wigner-sietz notation - r = np.hstack((r, 1.)) + r = np.hstack((r, 1.0)) asym_pos = np.broadcast_to(r[0:3], [1, 3]) @@ -525,15 +566,15 @@ def CalcOrbit(self, v, reduceToUC=True): # reduce to fundamental unitcell with fractional # coordinates between 0-1 rr = np.modf(rr)[0] - rr[rr < 0.] += 1. - rr[np.abs(rr) < 1.0E-6] = 0. + rr[rr < 0.0] += 1.0 + rr[np.abs(rr) < 1.0e-6] = 0.0 # check if this is new isnew = True for j in range(n): v = rr - asym_pos[j] dist = self.CalcLength(v, 'd') - if dist < 1E-3: + if dist < 1e-3: isnew = False break @@ -594,9 +635,7 @@ def CalcPositions(self): self.numat = np.array(numat) self.asym_pos = asym_pos - def remove_duplicate_atoms(self, - atom_pos=None, - tol=1e-3): + def remove_duplicate_atoms(self, atom_pos=None, tol=1e-3): """ @date 03/04/2021 SS 1.0 original @@ -633,12 +672,11 @@ def remove_duplicate_atoms(self, idx.append(i) else: isclose = False - for j, uniqpos in enumerate(atom_pos_fixed): + for j, uniqpos in enumerate(atom_pos_fixed): pos2 = uniqpos[0:3] occ2 = uniqpos[3] # cases with fractional occupancy on same site - if (np.all(np.isclose(pos, pos2)) and - (occ+occ2 <= 1.)): + if np.all(np.isclose(pos, pos2)) and (occ + occ2 <= 1.0): atom_pos_fixed.append(np.hstack([pos, occ])) idx.append(i) isclose = True @@ -652,7 +690,7 @@ def remove_duplicate_atoms(self, for vvv in vv: # check if distance less than tol # the factor of 10 is for A --> nm - if self.CalcLength(vvv, 'd') < tol/10.: + if self.CalcLength(vvv, 'd') < tol / 10.0: # if true then its a repeated atom isclose = True break @@ -707,11 +745,11 @@ def CalcDensity(self): occ = self.atom_pos[i, 3] # -1 due to 0 indexing in python - self.avA += numat * constants.atom_weights[atype-1] * occ + self.avA += numat * constants.atom_weights[atype - 1] * occ self.avZ += numat * atype - self.density = self.avA / (self.vol * 1.0E-21 * constants.cAvogadro) + self.density = self.avA / (self.vol * 1.0e-21 * constants.cAvogadro) av_natom = np.dot(self.numat, self.atom_pos[:, 3]) @@ -733,19 +771,25 @@ def init_max_g_index(self): def CalcMaxGIndex(self): self.init_max_g_index() - while (1.0 / self.CalcLength( - np.array([self.ih, 0, 0], - dtype=np.float64), 'r') > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([self.ih, 0, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ih = self.ih + 1 - while (1.0 / self.CalcLength( - np.array([0, self.ik, 0], - dtype=np.float64), 'r') > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([0, self.ik, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ik = self.ik + 1 - while (1.0 / self.CalcLength( - np.array([0, 0, self.il], - dtype=np.float64), 'r') > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([0, 0, self.il], dtype=np.float64), 'r') + > self.dmin + ): self.il = self.il + 1 def InitializeInterpTable(self): @@ -764,15 +808,16 @@ def InitializeInterpTable(self): elem = constants.ptableinverse[Z] if Z <= 92: - gid = fid.get('/'+elem) + gid = fid.get('/' + elem) data = np.array(gid.get('data')) - self.pe_cs[elem] = interp1d(data[:, WAV_ID], - data[:, MU_ID]+data[:,COH_INCOH_ID]) + self.pe_cs[elem] = interp1d( + data[:, WAV_ID], data[:, MU_ID] + data[:, COH_INCOH_ID] + ) data = data[:, [WAV_ID, REAL_F1_ID, IMAG_F2_ID]] f_anomalous_data.append(data) else: - wav = np.linspace(1.16E2, 2.86399992e-03, 189) - zs = np.ones_like(wav)*Z + wav = np.linspace(1.16e2, 2.86399992e-03, 189) + zs = np.ones_like(wav) * Z zrs = np.zeros_like(wav) data_zs = np.vstack((wav, zs, zrs)).T self.pe_cs[elem] = interp1d(wav, zrs) @@ -781,7 +826,11 @@ def InitializeInterpTable(self): n = max([x.shape[0] for x in f_anomalous_data]) self.f_anomalous_data = np.zeros([self.atom_ntype, n, 3]) self.f_anomalous_data_sizes = np.zeros( - [self.atom_ntype, ], dtype=np.int32) + [ + self.atom_ntype, + ], + dtype=np.int32, + ) for i in range(self.atom_ntype): nd = f_anomalous_data[i].shape[0] @@ -791,19 +840,32 @@ def InitializeInterpTable(self): def CalcXRSF(self, hkl): # TODO: Resolve extra-core dependency from hexrd.powder.wppf.xtal import _calcxrsf + ''' the 1E-2 is to convert to A^-2 since the fitting is done in those units ''' - fNT = np.zeros([self.atom_ntype, ]) - frel = np.zeros([self.atom_ntype, ]) + fNT = np.zeros( + [ + self.atom_ntype, + ] + ) + frel = np.zeros( + [ + self.atom_ntype, + ] + ) scatfac = np.zeros([self.atom_ntype, 11]) f_anomalous_data = self.f_anomalous_data hkl2d = np.atleast_2d(hkl).astype(np.float64) nref = hkl2d.shape[0] - multiplicity = np.ones([nref, ]) + multiplicity = np.ones( + [ + nref, + ] + ) w_int = 1.0 occ = self.atom_pos[:, 3] @@ -828,23 +890,25 @@ def CalcXRSF(self, hkl): frel[i] = constants.frel[elem] fNT[i] = constants.fNT[elem] - sf, sf_raw = _calcxrsf(hkl2d, - nref, - multiplicity, - w_int, - self.wavelength, - self.rmt.astype(np.float64), - self.atom_type, - self.atom_ntype, - betaij, - occ, - self.asym_pos_arr, - self.numat, - scatfac, - fNT, - frel, - f_anomalous_data, - self.f_anomalous_data_sizes) + sf, sf_raw = _calcxrsf( + hkl2d, + nref, + multiplicity, + w_int, + self.wavelength, + self.rmt.astype(np.float64), + self.atom_type, + self.atom_ntype, + betaij, + occ, + self.asym_pos_arr, + self.numat, + scatfac, + fNT, + frel, + f_anomalous_data, + self.f_anomalous_data_sizes, + ) return sf_raw @@ -855,8 +919,8 @@ def CalcXRSF(self, hkl): """ def calc_unitcell_mass(self): - a_mass = constants.atom_weights[self.atom_type-1] - return np.sum(a_mass*self.numat) + a_mass = constants.atom_weights[self.atom_type - 1] + return np.sum(a_mass * self.numat) """ calculate the number density in 1/micron^3 @@ -872,12 +936,15 @@ def calc_number_density(self): def calc_absorption_cross_sec(self): - abs_cs_total = 0. + abs_cs_total = 0.0 for i in range(self.atom_ntype): Z = self.atom_type[i] elem = constants.ptableinverse[Z] - abs_cs_total += self.pe_cs[elem](self.wavelength) *\ - self.numat[i]/np.sum(self.numat) + abs_cs_total += ( + self.pe_cs[elem](self.wavelength) + * self.numat[i] + / np.sum(self.numat) + ) return abs_cs_total """ @@ -901,7 +968,7 @@ def calc_absorption_length(self): abs_cs_total = self.calc_absorption_cross_sec() # the 1e4 factor converts wavelength from cm -> micron - self.absorption_length = 1e4/(abs_cs_total*self.density) + self.absorption_length = 1e4 / (abs_cs_total * self.density) """ calculate bragg angle for a reflection. returns Nan if @@ -928,7 +995,7 @@ def ChooseSymmetric(self, hkllist, InversionSymmetry=True): geqv = self.CalcStar(g, 'r', applyLaue=laue) - for r in geqv[1:, ]: + for r in geqv[1:,]: rid = np.where(np.all(r == hkllist, axis=1)) mask[rid] = False @@ -956,8 +1023,14 @@ def SortHKL(self, hkllist): glen.append(np.round(self.CalcLength(g, 'r'), 8)) # glen = np.atleast_2d(np.array(glen,dtype=float)).T - dtype = [('glen', float), ('max', int), ('sum', int), - ('h', int), ('k', int), ('l', int)] + dtype = [ + ('glen', float), + ('max', int), + ('sum', int), + ('h', int), + ('k', int), + ('l', int), + ] a = [] for i, gl in enumerate(glen): @@ -981,16 +1054,21 @@ def getHKLs(self, dmin): ignore all l < 0 ''' - hmin = -self.ih-1 + hmin = -self.ih - 1 hmax = self.ih - kmin = -self.ik-1 + kmin = -self.ik - 1 kmax = self.ik lmin = -1 lmax = self.il - hkllist = np.array([[ih, ik, il] for ih in np.arange(hmax, hmin, -1) - for ik in np.arange(kmax, kmin, -1) - for il in np.arange(lmax, lmin, -1)]) + hkllist = np.array( + [ + [ih, ik, il] + for ih in np.arange(hmax, hmin, -1) + for ik in np.arange(kmax, kmin, -1) + for il in np.arange(lmax, lmin, -1) + ] + ) hkl_allowed = spacegroup.Allowed_HKLs(self.sgnum, hkllist) @@ -1004,7 +1082,7 @@ def getHKLs(self, dmin): # ignore [0 0 0] as it is the direct beam if np.sum(np.abs(g)) != 0: - dspace = 1./self.CalcLength(g, 'r') + dspace = 1.0 / self.CalcLength(g, 'r') if dspace >= dmin: hkl_dsp.append(g) @@ -1031,6 +1109,7 @@ def getHKLs(self, dmin): self.hkls = self.SortHKL(hkl) return self.hkls + ''' set some properties for the unitcell class. only the lattice parameters, space group and asymmetric positions can change, @@ -1046,8 +1125,10 @@ def Required_C(self, C): def MakeStiffnessMatrix(self, inp_Cvals): if len(inp_Cvals) != len(_StiffnessDict[self._laueGroup][0]): x = len(_StiffnessDict[self._laueGroup][0]) - msg = (f"number of constants entered is not correct." - f" need a total of {x} independent constants.") + msg = ( + f"number of constants entered is not correct." + f" need a total of {x} independent constants." + ) raise IOError(msg) # initialize all zeros and fill the supplied values @@ -1115,15 +1196,15 @@ def inside_spheretriangle(self, conn, dir3, hemisphere, switch): number ''' if np.abs(d1) < eps: - d1 = 0. + d1 = 0.0 if np.abs(d2) < eps: - d2 = 0. + d2 = 0.0 if np.abs(d3) < eps: - d3 = 0. + d3 = 0.0 ss = np.unique(np.sign([d1, d2, d3])) if hemisphere == 'upper': - if np.all(ss >= 0.): + if np.all(ss >= 0.0): mask.append(True) else: mask.append(False) @@ -1184,11 +1265,12 @@ def reduce_dirvector(self, dir3, switch='pg'): dir3n = dir3 else: if np.all(np.linalg.norm(dir3) > eps): - dir3n = dir3/np.tile(np.linalg.norm(dir3, axis=1), [3, 1]).T + dir3n = dir3 / np.tile(np.linalg.norm(dir3, axis=1), [3, 1]).T else: raise RuntimeError( "atleast one of the input direction seems \ - to be a null vector") + to be a null vector" + ) ''' we need both the symmetry reductions for the point group and laue group @@ -1231,18 +1313,19 @@ def reduce_dirvector(self, dir3, switch='pg'): if hemisphere == 'both': mask = np.ones(dir3_sym.shape[0], dtype=bool) elif hemisphere == 'upper': - mask = dir3_sym[:, 2] >= 0. + mask = dir3_sym[:, 2] >= 0.0 else: for ii in range(ntriangle): tmpmask = self.inside_spheretriangle( - connectivity[:, ii], dir3_sym, - hemisphere, switch) + connectivity[:, ii], dir3_sym, hemisphere, switch + ) mask = np.logical_or(mask, tmpmask) if np.sum(mask) > 0: if dir3_reduced.size != 0: dir3_reduced = np.vstack( - (dir3_reduced, dir3_sym[mask, :])) + (dir3_reduced, dir3_sym[mask, :]) + ) idx_red = np.hstack((idx_red, idx[mask])) else: dir3_reduced = np.copy(dir3_sym[mask, :]) @@ -1284,7 +1367,8 @@ class which correctly color the orientations for this crystal class. the ''' dir3_red = self.reduce_dirvector(dir3, switch='laue') dir3_red_supergroup = self.reduce_dirvector( - dir3, switch='superlaue') + dir3, switch='superlaue' + ) switch = 'superlaue' else: @@ -1301,10 +1385,9 @@ class which correctly color the orientations for this crystal class. the rgb = colorspace.hsl2rgb(hsl) return rgb - def color_orientations(self, - rmats, - ref_dir=np.array([0., 0., 1.]), - laueswitch=True): + def color_orientations( + self, rmats, ref_dir=np.array([0.0, 0.0, 1.0]), laueswitch=True + ): ''' @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov @DATE 11/12/2020 SS 1.0 original @@ -1327,7 +1410,9 @@ def color_orientations(self, if rmats.ndim == 2: rmats = np.atleast_3d(rmats).T else: - assert rmats.ndim == 3, "rotations matrices need to \ + assert ( + rmats.ndim == 3 + ), "rotations matrices need to \ be nx3x3. Please check size." ''' @@ -1366,12 +1451,10 @@ def convert_lp_to_valunits(self, lp): lp_valunit = [] for i in range(6): if i < 3: - lp_valunit.append( - valWUnit('lp', 'length', lp[i], 'nm')) + lp_valunit.append(valWUnit('lp', 'length', lp[i], 'nm')) else: - lp_valunit.append( - valWUnit('lp', 'angle', lp[i], 'degrees')) + lp_valunit.append(valWUnit('lp', 'angle', lp[i], 'degrees')) return lp_valunit @@ -1381,8 +1464,7 @@ def fill_correct_lp_vals(self, lp, val, lp_name): """ index = list(_lpname).index(lp_name) lp[index] = val - lp_red = [lp[i] for i in - _rqpDict[self.latticeType][0]] + lp_red = [lp[i] for i in _rqpDict[self.latticeType][0]] lp = _rqpDict[self.latticeType][1](lp_red) lp_valunit = self.convert_lp_to_valunits(lp) return lp_valunit @@ -1393,20 +1475,18 @@ def compliance(self): if not hasattr(self, 'stiffness'): raise AttributeError('Stiffness not set on unit cell') - return np.linalg.inv(self.stiffness / 1.e3) + return np.linalg.inv(self.stiffness / 1.0e3) @compliance.setter def compliance(self, v): # Compliance in TPa⁻¹. Stiffness is in GPa. - self.stiffness = np.linalg.inv(v) * 1.e3 + self.stiffness = np.linalg.inv(v) * 1.0e3 # lattice constants as properties @property def lparms(self): - return [self.a, self.b, - self.c, self.alpha, self.beta, - self.gamma] + return [self.a, self.b, self.c, self.alpha, self.beta, self.gamma] @lparms.setter def lparms(self, lp): @@ -1428,8 +1508,7 @@ def lparms(self, lp): @property def lparms_reduced(self): lp = self.lparms - lp_red = [lp[i] for i in - _rqpDict[self.latticeType][0]] + lp_red = [lp[i] for i in _rqpDict[self.latticeType][0]] return lp_red @property @@ -1440,12 +1519,10 @@ def a(self): def a(self, val): if self.is_editable("a"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "a") + lp_valunit = self.fill_correct_lp_vals(lp, val, "a") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1456,12 +1533,10 @@ def b(self): def b(self, val): if self.is_editable("b"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "b") + lp_valunit = self.fill_correct_lp_vals(lp, val, "b") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1472,12 +1547,10 @@ def c(self): def c(self, val): if self.is_editable("c"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "c") + lp_valunit = self.fill_correct_lp_vals(lp, val, "c") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1488,12 +1561,10 @@ def alpha(self): def alpha(self, val): if self.is_editable("alpha"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "alpha") + lp_valunit = self.fill_correct_lp_vals(lp, val, "alpha") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1504,12 +1575,10 @@ def beta(self): def beta(self, val): if self.is_editable("beta"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "beta") + lp_valunit = self.fill_correct_lp_vals(lp, val, "beta") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1520,12 +1589,10 @@ def gamma(self): def gamma(self, val): if self.is_editable("gamma"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "gamma") + lp_valunit = self.fill_correct_lp_vals(lp, val, "gamma") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1576,17 +1643,21 @@ def sgnum(self): @sgnum.setter def sgnum(self, val): - if not(isinstance(val, int)): + if not (isinstance(val, int)): raise ValueError('space group should be integer') - if not((val >= 1) and (val <= 230)): + if not ((val >= 1) and (val <= 230)): raise ValueError('space group number should be between 1 and 230.') self._sym_sgnum = val - self.sg_hmsymbol = symbols.pstr_spacegroup[val-1].strip() + self.sg_hmsymbol = symbols.pstr_spacegroup[val - 1].strip() - self.SYM_SG, self.SYM_PG_d, self.SYM_PG_d_laue, \ - self.centrosymmetric, self.symmorphic = \ - symmetry.GenerateSGSym(self.sgnum, self.sgsetting) + ( + self.SYM_SG, + self.SYM_PG_d, + self.SYM_PG_d_laue, + self.centrosymmetric, + self.symmorphic, + ) = symmetry.GenerateSGSym(self.sgnum, self.sgsetting) self.latticeType = symmetry.latticeType(self.sgnum) @@ -1611,10 +1682,12 @@ def sgnum(self, val): ''' SS 11/11/2020 adding the sphere_sector class initialization here ''' - self.sphere_sector = sphere_sector.sector(self._pointGroup, - self._laueGroup, - self._supergroup, - self._supergroup_laue) + self.sphere_sector = sphere_sector.sector( + self._pointGroup, + self._laueGroup, + self._supergroup, + self._supergroup_laue, + ) self.CalcDensity() self.calc_absorption_length() @@ -1640,10 +1713,12 @@ def atom_pos(self, val): """ if hasattr(self, 'atom_type'): if self.atom_ntype != val.shape[0]: - msg = (f"incorrect number of atom positions." - f" number of atom type = {self.atom_ntype} " - f" and number of" - f" atom positions = {val.shape[0]}.") + msg = ( + f"incorrect number of atom positions." + f" number of atom type = {self.atom_ntype} " + f" and number of" + f" atom positions = {val.shape[0]}." + ) raise ValueError(msg) self._atom_pos = val @@ -1668,8 +1743,9 @@ def asym_pos(self): @asym_pos.setter def asym_pos(self, val): - assert(type(val) == list),\ - 'input type to asymmetric positions should be list' + assert ( + type(val) == list + ), 'input type to asymmetric positions should be list' self._asym_pos = val @property @@ -1678,8 +1754,9 @@ def numat(self): @numat.setter def numat(self, val): - assert(val.shape[0] == - self.atom_ntype), 'shape of numat is not consistent' + assert ( + val.shape[0] == self.atom_ntype + ), 'shape of numat is not consistent' self._numat = val # direct metric tensor is read only @@ -1713,18 +1790,18 @@ def vol(self): @property def vol_per_atom(self): # vol per atom in A^3 - return 1e3*self.vol/self.num_atom + return 1e3 * self.vol / self.num_atom _rqpDict = { 'triclinic': (tuple(range(6)), lambda p: p), # all 6 # note beta 'monoclinic': ((0, 1, 2, 4), lambda p: (p[0], p[1], p[2], 90, p[3], 90)), - 'orthorhombic': ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), - 'tetragonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), - 'trigonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - 'hexagonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - 'cubic': ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), + 'orthorhombic': ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), + 'tetragonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), + 'trigonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + 'hexagonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + 'cubic': ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), } _lpname = np.array(['a', 'b', 'c', 'alpha', 'beta', 'gamma']) @@ -1764,78 +1841,68 @@ def vol_per_atom(self): supergroup_11 = 'oh' -def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range +def _sgrange(min, max): + return tuple(range(min, max + 1)) # inclusive range + ''' 11/20/2020 SS added supergroup to the list which is used for coloring the fundamental zone IPF ''' _pgDict = { - _sgrange(1, 1): ('c1', laue_1, - supergroup_1, supergroup_00), # Triclinic - _sgrange(2, 2): ('ci', laue_1, \ - supergroup_00, supergroup_00), # laue 1 - _sgrange(3, 5): ('c2', laue_2, \ - supergroup_2, supergroup_3), # Monoclinic - _sgrange(6, 9): ('cs', laue_2, \ - supergroup_1, supergroup_3), - _sgrange(10, 15): ('c2h', laue_2, \ - supergroup_3, supergroup_3), # laue 2 - _sgrange(16, 24): ('d2', laue_3, \ - supergroup_3, supergroup_3), # Orthorhombic - _sgrange(25, 46): ('c2v', laue_3, \ - supergroup_2, supergroup_3), - _sgrange(47, 74): ('d2h', laue_3, \ - supergroup_3, supergroup_3), # laue 3 - _sgrange(75, 80): ('c4', laue_4, \ - supergroup_4, supergroup_5), # Tetragonal - _sgrange(81, 82): ('s4', laue_4, \ - supergroup_01, supergroup_5), - _sgrange(83, 88): ('c4h', laue_4, \ - supergroup_5, supergroup_5), # laue 4 - _sgrange(89, 98): ('d4', laue_5, \ - supergroup_5, supergroup_5), - _sgrange(99, 110): ('c4v', laue_5, \ - supergroup_4, supergroup_5), - _sgrange(111, 122): ('d2d', laue_5, \ - supergroup_5, supergroup_5), - _sgrange(123, 142): ('d4h', laue_5, \ - supergroup_5, supergroup_5), # laue 5 + _sgrange(1, 1): ('c1', laue_1, supergroup_1, supergroup_00), # Triclinic + _sgrange(2, 2): ('ci', laue_1, supergroup_00, supergroup_00), # laue 1 + _sgrange(3, 5): ('c2', laue_2, supergroup_2, supergroup_3), # Monoclinic + _sgrange(6, 9): ('cs', laue_2, supergroup_1, supergroup_3), + _sgrange(10, 15): ('c2h', laue_2, supergroup_3, supergroup_3), # laue 2 + _sgrange(16, 24): ( + 'd2', + laue_3, + supergroup_3, + supergroup_3, + ), # Orthorhombic + _sgrange(25, 46): ('c2v', laue_3, supergroup_2, supergroup_3), + _sgrange(47, 74): ('d2h', laue_3, supergroup_3, supergroup_3), # laue 3 + _sgrange(75, 80): ('c4', laue_4, supergroup_4, supergroup_5), # Tetragonal + _sgrange(81, 82): ('s4', laue_4, supergroup_01, supergroup_5), + _sgrange(83, 88): ('c4h', laue_4, supergroup_5, supergroup_5), # laue 4 + _sgrange(89, 98): ('d4', laue_5, supergroup_5, supergroup_5), + _sgrange(99, 110): ('c4v', laue_5, supergroup_4, supergroup_5), + _sgrange(111, 122): ('d2d', laue_5, supergroup_5, supergroup_5), + _sgrange(123, 142): ('d4h', laue_5, supergroup_5, supergroup_5), # laue 5 # Trigonal # laue 6 [also c3i] - _sgrange(143, 146): ('c3', laue_6, \ - supergroup_6, supergroup_02), - _sgrange(147, 148): ('s6', laue_6, \ - supergroup_02, supergroup_02), - _sgrange(149, 155): ('d3', laue_7, \ - supergroup_7, supergroup_9), - _sgrange(156, 161): ('c3v', laue_7, \ - supergroup_6, supergroup_9), - _sgrange(162, 167): ('d3d', laue_7, \ - supergroup_9, supergroup_9), # laue 7 - _sgrange(168, 173): ('c6', laue_8, \ - supergroup_7, supergroup_9), # Hexagonal - _sgrange(174, 174): ('c3h', laue_8, \ - supergroup_7, supergroup_9), - _sgrange(175, 176): ('c6h', laue_8, \ - supergroup_9, supergroup_9), # laue 8 - _sgrange(177, 182): ('d6', laue_9, \ - supergroup_9, supergroup_9), - _sgrange(183, 186): ('c6v', laue_9, \ - supergroup_7, supergroup_9), - _sgrange(187, 190): ('d3h', laue_9, \ - supergroup_9, supergroup_9), - _sgrange(191, 194): ('d6h', laue_9, \ - supergroup_9, supergroup_9), # laue 9 - _sgrange(195, 199): ('t', laue_10, \ - supergroup_10, supergroup_11), # Cubic - _sgrange(200, 206): ('th', laue_10, \ - supergroup_11, supergroup_11), # laue 10 - _sgrange(207, 214): ('o', laue_11, \ - supergroup_11, supergroup_11), - _sgrange(215, 220): ('td', laue_11, \ - supergroup_10, supergroup_11), - _sgrange(221, 230): ('oh', laue_11, \ - supergroup_11, supergroup_11) # laue 11 + _sgrange(143, 146): ('c3', laue_6, supergroup_6, supergroup_02), + _sgrange(147, 148): ('s6', laue_6, supergroup_02, supergroup_02), + _sgrange(149, 155): ('d3', laue_7, supergroup_7, supergroup_9), + _sgrange(156, 161): ('c3v', laue_7, supergroup_6, supergroup_9), + _sgrange(162, 167): ('d3d', laue_7, supergroup_9, supergroup_9), # laue 7 + _sgrange(168, 173): ( + 'c6', + laue_8, + supergroup_7, + supergroup_9, + ), # Hexagonal + _sgrange(174, 174): ('c3h', laue_8, supergroup_7, supergroup_9), + _sgrange(175, 176): ('c6h', laue_8, supergroup_9, supergroup_9), # laue 8 + _sgrange(177, 182): ('d6', laue_9, supergroup_9, supergroup_9), + _sgrange(183, 186): ('c6v', laue_9, supergroup_7, supergroup_9), + _sgrange(187, 190): ('d3h', laue_9, supergroup_9, supergroup_9), + _sgrange(191, 194): ('d6h', laue_9, supergroup_9, supergroup_9), # laue 9 + _sgrange(195, 199): ('t', laue_10, supergroup_10, supergroup_11), # Cubic + _sgrange(200, 206): ( + 'th', + laue_10, + supergroup_11, + supergroup_11, + ), # laue 10 + _sgrange(207, 214): ('o', laue_11, supergroup_11, supergroup_11), + _sgrange(215, 220): ('td', laue_11, supergroup_10, supergroup_11), + _sgrange(221, 230): ( + 'oh', + laue_11, + supergroup_11, + supergroup_11, + ), # laue 11 } ''' @@ -1940,7 +2007,8 @@ def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range ''' -def identity(x): return x +def identity(x): + return x def C_cyclictet_eq(x): @@ -1959,7 +2027,7 @@ def C_trigonal_eq(x): x[3, 5] = -x[0, 4] x[4, 4] = x[3, 3] x[4, 5] = x[0, 3] - x[5, 5] = 0.5*(x[0, 0]-x[0, 1]) + x[5, 5] = 0.5 * (x[0, 0] - x[0, 1]) return x @@ -1987,5 +2055,5 @@ def C_cubic_eq(x): laue_8: [type8, C_trigonal_eq], # cyclic hexagonal, 5 components needed laue_9: [type8, C_trigonal_eq], # dihedral hexagonal, 5 components laue_10: [type9, C_cubic_eq], # cubic, 3 components - laue_11: [type9, C_cubic_eq] # cubic, 3 components + laue_11: [type9, C_cubic_eq], # cubic, 3 components } diff --git a/hexrd/core/material/utils.py b/hexrd/core/material/utils.py index afbdd8f63..69f85030a 100644 --- a/hexrd/core/material/utils.py +++ b/hexrd/core/material/utils.py @@ -1,6 +1,10 @@ import importlib.resources import hexrd.core.resources -from hexrd.core.constants import cClassicalelectronRad as re, cAvogadro, ATOM_WEIGHTS_DICT +from hexrd.core.constants import ( + cClassicalelectronRad as re, + cAvogadro, + ATOM_WEIGHTS_DICT, +) import chemparse import numpy as np import h5py @@ -13,6 +17,8 @@ @author Saransh Singh, LLNL @date 1.0 original 02/16/2022 """ + + def interpret_formula(formula): """ first interpret if the formula is a dictionary @@ -27,33 +33,35 @@ def interpret_formula(formula): return chemparse.parse_formula(formula) + def calculate_molecular_mass(formula): """ interpret the formula as either a dictionary or a chemical formula """ formula_dict = interpret_formula(formula) - M = 0. - for k,v in formula_dict.items(): + M = 0.0 + for k, v in formula_dict.items(): M += v * ATOM_WEIGHTS_DICT[k] return M + """ calculate the number density of element or compound number density is the number of atoms per unit volume @author Saransh Singh, LLNL @date 1.0 original 02/16/2022 """ -def calculate_number_density(density, - formula): + + +def calculate_number_density(density, formula): molecular_mass = calculate_molecular_mass(formula) - return 1e-21*density*cAvogadro/molecular_mass + return 1e-21 * density * cAvogadro / molecular_mass + -def calculate_linear_absorption_length(density, - formula, - energy_vector): +def calculate_linear_absorption_length(density, formula, energy_vector): """ this function calculates the absorption length (in mm) based on both coherent and incoherent scattering cross @@ -86,38 +94,39 @@ def calculate_linear_absorption_length(density, data = importlib.resources.open_binary(hexrd.core.resources, 'mu_en.h5') fid = h5py.File(data, 'r') - formula_dict = interpret_formula(formula) + formula_dict = interpret_formula(formula) molecular_mass = calculate_molecular_mass(formula) density_conv = density mu_rho = 0.0 for k, v in formula_dict.items(): - wi = v*ATOM_WEIGHTS_DICT[k]/molecular_mass + wi = v * ATOM_WEIGHTS_DICT[k] / molecular_mass d = np.array(fid[f"/{k}/data"]) - E = d[:,0] - mu_rho_tab = d[:,1] + E = d[:, 0] + mu_rho_tab = d[:, 1] - val = np.interp(np.log(energy_vector), - np.log(E), - np.log(mu_rho_tab), - left=0.0, - right=0.0) + val = np.interp( + np.log(energy_vector), + np.log(E), + np.log(mu_rho_tab), + left=0.0, + right=0.0, + ) val = np.exp(val) mu_rho += wi * val - mu = mu_rho * density_conv # this is in cm^-1 - mu = mu * 1E-4 # this is in mm^-1 - absorption_length = 1./mu + mu = mu_rho * density_conv # this is in cm^-1 + mu = mu * 1e-4 # this is in mm^-1 + absorption_length = 1.0 / mu return absorption_length -def calculate_energy_absorption_length(density, - formula, - energy_vector): + +def calculate_energy_absorption_length(density, formula, energy_vector): """ this function calculates the absorption length (in mm) based on the total energy absorbed by the medium. this @@ -148,32 +157,34 @@ def calculate_energy_absorption_length(density, data = importlib.resources.open_binary(hexrd.core.resources, 'mu_en.h5') fid = h5py.File(data, 'r') - formula_dict = interpret_formula(formula) + formula_dict = interpret_formula(formula) molecular_mass = calculate_molecular_mass(formula) density_conv = density mu_rho = 0.0 for k, v in formula_dict.items(): - wi = v*ATOM_WEIGHTS_DICT[k]/molecular_mass + wi = v * ATOM_WEIGHTS_DICT[k] / molecular_mass d = np.array(fid[f"/{k}/data"]) - E = d[:,0] - mu_rho_tab = d[:,2] + E = d[:, 0] + mu_rho_tab = d[:, 2] - val = np.interp(np.log(energy_vector), - np.log(E), - np.log(mu_rho_tab), - left=0.0, - right=0.0) + val = np.interp( + np.log(energy_vector), + np.log(E), + np.log(mu_rho_tab), + left=0.0, + right=0.0, + ) val = np.exp(val) mu_rho += wi * val - mu = mu_rho * density_conv # this is in cm^-1 - mu = mu * 1E-4 # this is in microns^-1 - absorption_length = 1./mu + mu = mu_rho * density_conv # this is in cm^-1 + mu = mu * 1e-4 # this is in microns^-1 + absorption_length = 1.0 / mu return absorption_length @@ -208,20 +219,20 @@ def convert_density_to_atoms_per_cubic_angstrom( """ # get_smallest abundance if composition is None: - return 0. + return 0.0 norm_elemental_abundances = normalize_composition(composition) mean_z = 0.0 for element, concentration in norm_elemental_abundances.items(): mean_z += concentration * constants.ATOM_WEIGHTS_DICT[element] - return density / mean_z * .602214129 + return density / mean_z * 0.602214129 def calculate_coherent_scattering_factor( element: str, Q: np.ndarray, ) -> np.ndarray: - s = Q/(4. * np.pi) + s = Q / (4.0 * np.pi) sfact = constants.scatfac[element] fe = sfact[5] for jj in range(5): @@ -261,10 +272,7 @@ def calculate_f_squared_mean( norm_elemental_abundances = normalize_composition(formula) res = 0 for key, value in norm_elemental_abundances.items(): - res += ( - value * - calculate_coherent_scattering_factor(key, Q) ** 2 - ) + res += value * calculate_coherent_scattering_factor(key, Q) ** 2 return res @@ -280,11 +288,8 @@ def calculate_f_mean_squared( norm_elemental_abundances = normalize_composition(formula) res = 0 for key, value in norm_elemental_abundances.items(): - res += ( - value * - calculate_coherent_scattering_factor(key, Q) - ) - return res ** 2 + res += value * calculate_coherent_scattering_factor(key, Q) + return res**2 def calculate_incoherent_scattering( @@ -296,12 +301,8 @@ def calculate_incoherent_scattering( return np.zeros_like(Q) formula = interpret_formula(composition) - norm_elemental_abundances = normalize_composition( - formula) + norm_elemental_abundances = normalize_composition(formula) res = 0 for key, value in norm_elemental_abundances.items(): - res += ( - value * - calculate_incoherent_scattering_factor(key, Q) - ) ** 2 + res += (value * calculate_incoherent_scattering_factor(key, Q)) ** 2 return res diff --git a/hexrd/core/matrixutil.py b/hexrd/core/matrixutil.py index f94c1b048..a5746f73c 100644 --- a/hexrd/core/matrixutil.py +++ b/hexrd/core/matrixutil.py @@ -37,15 +37,15 @@ from hexrd.core import constants # module variables -sqr6i = 1./np.sqrt(6.) -sqr3i = 1./np.sqrt(3.) -sqr2i = 1./np.sqrt(2.) -sqr2 = np.sqrt(2.) -sqr3 = np.sqrt(3.) -sqr2b3 = np.sqrt(2./3.) +sqr6i = 1.0 / np.sqrt(6.0) +sqr3i = 1.0 / np.sqrt(3.0) +sqr2i = 1.0 / np.sqrt(2.0) +sqr2 = np.sqrt(2.0) +sqr3 = np.sqrt(3.0) +sqr2b3 = np.sqrt(2.0 / 3.0) fpTol = constants.epsf # 2.220446049250313e-16 -vTol = 100*fpTol +vTol = 100 * fpTol def columnNorm(a): @@ -78,26 +78,27 @@ def unitVector(a): """ normalize array of column vectors (hstacked, axis = 0) """ - assert a.ndim in [1, 2], \ - "incorrect arg shape; must be 1-d or 2-d, yours is %d-d" % (a.ndim) + assert a.ndim in [ + 1, + 2, + ], "incorrect arg shape; must be 1-d or 2-d, yours is %d-d" % (a.ndim) ztol = constants.ten_epsf m = a.shape[0] - nrm = np.tile(np.sqrt(np.sum(np.asarray(a)**2, axis=0)), (m, 1)) + nrm = np.tile(np.sqrt(np.sum(np.asarray(a) ** 2, axis=0)), (m, 1)) # prevent divide by zero nrm[nrm <= ztol] = 1.0 - return a/nrm + return a / nrm def nullSpace(A, tol=vTol): """ computes the null space of the real matrix A """ - assert A.ndim == 2, \ - 'input must be 2-d; yours is %d-d' % (A.ndim) + assert A.ndim == 2, 'input must be 2-d; yours is %d-d' % (A.ndim) n, m = A.shape @@ -108,7 +109,7 @@ def nullSpace(A, tol=vTol): S = np.hstack([S, np.zeros(m - n)]) - null_mask = (S <= tol) + null_mask = S <= tol null_space = V[null_mask, :] return null_space @@ -135,10 +136,10 @@ def blockSparseOfMatArray(matArray): m = matArray.shape[1] n = matArray.shape[2] - mn = m*n - jmax = p*n - imax = p*m - ntot = p*m*n + mn = m * n + jmax = p * n + imax = p * m + ntot = p * m * n rl = np.arange(p) rm = np.arange(m) @@ -146,8 +147,9 @@ def blockSparseOfMatArray(matArray): sij = matArray.transpose(0, 2, 1).reshape(1, ntot).squeeze() j = np.reshape(np.tile(rjmax, (m, 1)).T, (1, ntot)) - i = np.reshape(np.tile(rm, (1, jmax)), (1, ntot)) + \ - np.reshape(np.tile(m*rl, (mn, 1)).T, (1, ntot)) + i = np.reshape(np.tile(rm, (1, jmax)), (1, ntot)) + np.reshape( + np.tile(m * rl, (mn, 1)).T, (1, ntot) + ) ij = np.concatenate((i, j), axis=0) @@ -166,7 +168,7 @@ def symmToVecMV(A, scale=True): if scale: fac = sqr2 else: - fac = 1. + fac = 1.0 mvvec = np.zeros(6, dtype='float64') mvvec[0] = A[0, 0] mvvec[1] = A[1, 1] @@ -186,7 +188,7 @@ def vecMVToSymm(A, scale=True): if scale: fac = sqr2 else: - fac = 1. + fac = 1.0 symm_mat = np.zeros((3, 3), dtype='float64') symm_mat[0, 0] = A[0] symm_mat[1, 1] = A[1] @@ -282,13 +284,15 @@ def nrmlProjOfVecMV(vec): n = unitVector(vec) nmat = np.array( - [n[0, :]**2, - n[1, :]**2, - n[2, :]**2, - sqr2 * n[1, :] * n[2, :], - sqr2 * n[0, :] * n[2, :], - sqr2 * n[0, :] * n[1, :]], - dtype='float64' + [ + n[0, :] ** 2, + n[1, :] ** 2, + n[2, :] ** 2, + sqr2 * n[1, :] * n[2, :], + sqr2 * n[0, :] * n[2, :], + sqr2 * n[0, :] * n[1, :], + ], + dtype='float64', ) return nmat.T @@ -369,7 +373,7 @@ def skew(A): else: raise RuntimeError("this function only works for square arrays") - return np.squeeze(0.5*(A - A.transpose(0, 2, 1))) + return np.squeeze(0.5 * (A - A.transpose(0, 2, 1))) def symm(A): @@ -396,7 +400,7 @@ def symm(A): else: raise RuntimeError("this function only works for square arrays") - return np.squeeze(0.5*(A + A.transpose(0, 2, 1))) + return np.squeeze(0.5 * (A + A.transpose(0, 2, 1))) def skewMatrixOfVector(w): @@ -425,21 +429,11 @@ def skewMatrixOfVector(w): else: stackdim = w.shape[1] else: - raise RuntimeError( - 'input is incorrect shape; expecting ndim = 1 or 2' - ) + raise RuntimeError('input is incorrect shape; expecting ndim = 1 or 2') zs = np.zeros((1, stackdim), dtype='float64') W = np.vstack( - [zs, - -w[2, :], - w[1, :], - w[2, :], - zs, - -w[0, :], - -w[1, :], - w[0, :], - zs] + [zs, -w[2, :], w[1, :], w[2, :], zs, -w[0, :], -w[1, :], w[0, :], zs] ) return np.squeeze(np.reshape(W.T, (stackdim, 3, 3))) @@ -598,7 +592,7 @@ def findDuplicateVectors_old(vec, tol=vTol, equivPM=False): if not equivPM: diff = abs(tvec - dupl.T).sum(0) - match = abs(diff[1:]) <= tol # logical to find duplicates + match = abs(diff[1:]) <= tol # logical to find duplicates else: diffn = abs(tvec - dupl.T).sum(0) matchn = abs(diffn[1:]) <= tol @@ -606,7 +600,7 @@ def findDuplicateVectors_old(vec, tol=vTol, equivPM=False): matchp = abs(diffp[1:]) <= tol match = matchn + matchp - kick = np.hstack([True, match]) # pick self too + kick = np.hstack([True, match]) # pick self too if kick.sum() > 1: eqv += [torid[kick].tolist()] @@ -643,6 +637,7 @@ def findDuplicateVectors_old(vec, tol=vTol, equivPM=False): return eqv, uid + def findDuplicateVectors(vec, tol=vTol, equivPM=False): eqv = _findduplicatevectors(vec, tol, equivPM) uid = np.arange(0, vec.shape[1], dtype=np.int64) @@ -708,18 +703,18 @@ def _findduplicatevectors(vec, tol, equivPM): for ii in range(m): ctr = 0 - eqv_elem = np.zeros((m, ), dtype=np.int64) - for jj in range(ii+1, m): + eqv_elem = np.zeros((m,), dtype=np.int64) + for jj in range(ii + 1, m): if not jj in eqv_elem_master: if equivPM: - diff = np.sum(np.abs(vec[:, ii]-vec2[:, jj])) - diff2 = np.sum(np.abs(vec[:, ii]-vec[:, jj])) + diff = np.sum(np.abs(vec[:, ii] - vec2[:, jj])) + diff2 = np.sum(np.abs(vec[:, ii] - vec[:, jj])) if diff < tol or diff2 < tol: eqv_elem[ctr] = jj eqv_elem_master.append(jj) ctr += 1 else: - diff = np.sum(np.abs(vec[:, ii]-vec[:, jj])) + diff = np.sum(np.abs(vec[:, ii] - vec[:, jj])) if diff < tol: eqv_elem[ctr] = jj eqv_elem_master.append(jj) @@ -746,9 +741,9 @@ def strainTenToVec(strainTen): strainVec[0] = strainTen[0, 0] strainVec[1] = strainTen[1, 1] strainVec[2] = strainTen[2, 2] - strainVec[3] = 2*strainTen[1, 2] - strainVec[4] = 2*strainTen[0, 2] - strainVec[5] = 2*strainTen[0, 1] + strainVec[3] = 2 * strainTen[1, 2] + strainVec[4] = 2 * strainTen[0, 2] + strainVec[5] = 2 * strainTen[0, 1] strainVec = np.atleast_2d(strainVec).T return strainVec @@ -758,12 +753,12 @@ def strainVecToTen(strainVec): strainTen[0, 0] = strainVec[0] strainTen[1, 1] = strainVec[1] strainTen[2, 2] = strainVec[2] - strainTen[1, 2] = strainVec[3] / 2. - strainTen[0, 2] = strainVec[4] / 2. - strainTen[0, 1] = strainVec[5] / 2. - strainTen[2, 1] = strainVec[3] / 2. - strainTen[2, 0] = strainVec[4] / 2. - strainTen[1, 0] = strainVec[5] / 2. + strainTen[1, 2] = strainVec[3] / 2.0 + strainTen[0, 2] = strainVec[4] / 2.0 + strainTen[0, 1] = strainVec[5] / 2.0 + strainTen[2, 1] = strainVec[3] / 2.0 + strainTen[2, 0] = strainVec[4] / 2.0 + strainTen[1, 0] = strainVec[5] / 2.0 return strainTen @@ -802,13 +797,13 @@ def ale3dStrainOutToV(vecds): """ eps = np.zeros([3, 3], dtype='float64') # Akk_by_3 = sqr3i * vecds[5] # -p - a = np.exp(vecds[5])**(1./3.) # -p - t1 = sqr2i*vecds[0] - t2 = sqr6i*vecds[1] + a = np.exp(vecds[5]) ** (1.0 / 3.0) # -p + t1 = sqr2i * vecds[0] + t2 = sqr6i * vecds[1] eps[0, 0] = t1 - t2 eps[1, 1] = -t1 - t2 - eps[2, 2] = sqr2b3*vecds[1] + eps[2, 2] = sqr2b3 * vecds[1] eps[1, 0] = vecds[2] * sqr2i eps[2, 0] = vecds[3] * sqr2i eps[2, 1] = vecds[4] * sqr2i @@ -817,10 +812,10 @@ def ale3dStrainOutToV(vecds): eps[0, 2] = eps[2, 0] eps[1, 2] = eps[2, 1] - epstar = eps/a + epstar = eps / a - V = (constants.identity_3x3 + epstar)*a - Vinv = (constants.identity_3x3 - epstar)/a + V = (constants.identity_3x3 + epstar) * a + Vinv = (constants.identity_3x3 - epstar) / a return V, Vinv @@ -829,12 +824,12 @@ def vecdsToSymm(vecds): """convert from vecds representation to symmetry matrix""" A = np.zeros([3, 3], dtype='float64') Akk_by_3 = sqr3i * vecds[5] # -p - t1 = sqr2i*vecds[0] - t2 = sqr6i*vecds[1] + t1 = sqr2i * vecds[0] + t2 = sqr6i * vecds[1] A[0, 0] = t1 - t2 + Akk_by_3 A[1, 1] = -t1 - t2 + Akk_by_3 - A[2, 2] = sqr2b3*vecds[1] + Akk_by_3 + A[2, 2] = sqr2b3 * vecds[1] + Akk_by_3 A[1, 0] = vecds[2] * sqr2i A[2, 0] = vecds[3] * sqr2i A[2, 1] = vecds[4] * sqr2i @@ -857,7 +852,7 @@ def symmToVecds(A): """convert from symmetry matrix to vecds representation""" vecds = np.zeros(6, dtype='float64') vecds[0] = sqr2i * (A[0, 0] - A[1, 1]) - vecds[1] = sqr6i * (2. * A[2, 2] - A[0, 0] - A[1, 1]) + vecds[1] = sqr6i * (2.0 * A[2, 2] - A[0, 0] - A[1, 1]) vecds[2] = sqr2 * A[1, 0] vecds[3] = sqr2 * A[2, 0] vecds[4] = sqr2 * A[2, 1] @@ -902,15 +897,16 @@ def solve_wahba(v, w, weights=None): # compute weighted outer product sum B = np.zeros((3, 3)) for i in range(n_vecs): - B += weights[i]*np.dot(w[i].reshape(3, 1), v[i].reshape(1, 3)) + B += weights[i] * np.dot(w[i].reshape(3, 1), v[i].reshape(1, 3)) # compute svd Us, _, VsT = svd(B) # form diagonal matrix for solution - M = np.diag([1., 1., np.linalg.det(Us)*np.linalg.det(VsT)]) + M = np.diag([1.0, 1.0, np.linalg.det(Us) * np.linalg.det(VsT)]) return np.dot(Us, np.dot(M, VsT)) + # ============================================================================= # Numba-fied frame cache writer # ============================================================================= diff --git a/hexrd/core/projections/spherical.py b/hexrd/core/projections/spherical.py index c98c2b704..0ca57acdd 100644 --- a/hexrd/core/projections/spherical.py +++ b/hexrd/core/projections/spherical.py @@ -2,6 +2,7 @@ from skimage.transform import PiecewiseAffineTransform, warp from hexrd.core import constants + # TODO: Resolve extra-core-dependency from hexrd.hedm.xrdutil.utils import zproject_sph_angles @@ -10,12 +11,18 @@ class SphericalView: """ Creates a spherical mapping of detector images. """ + MAPPING_TYPES = ('stereographic', 'equal-area') VECTOR_TYPES = ('d', 'q') - PROJ_IMG_DIM = 3. # 2*np.sqrt(2) rounded up - - def __init__(self, mapping='stereographic', vector_type='d', - output_dim=512, rmat=constants.identity_3x3): + PROJ_IMG_DIM = 3.0 # 2*np.sqrt(2) rounded up + + def __init__( + self, + mapping='stereographic', + vector_type='d', + output_dim=512, + rmat=constants.identity_3x3, + ): self._mapping = mapping self._vector_type = vector_type @@ -62,8 +69,10 @@ def rmat(self): def rmat(self, x): x = np.atleast_2d(x) assert x.shape == (3, 3), "rmat must be (3, 3)" - assert np.linalg.norm(np.dot(x.T, x) - constants.identity_3x3) \ - < constants.ten_epsf, "input matrix is not orthogonal" + assert ( + np.linalg.norm(np.dot(x.T, x) - constants.identity_3x3) + < constants.ten_epsf + ), "input matrix is not orthogonal" self._rmat = x def warp_eta_ome_map(self, eta_ome, map_ids=None, skip=10): @@ -80,14 +89,14 @@ def warp_eta_ome_map(self, eta_ome, map_ids=None, skip=10): etas = eta_ome.etas[::skip] # make grid of angular values - op, ep = np.meshgrid(omes, - etas, - indexing='ij') + op, ep = np.meshgrid(omes, etas, indexing='ij') # make grid of output pixel values - oc, ec = np.meshgrid(np.arange(nrows_in)[::skip], - np.arange(ncols_in)[::skip], - indexing='ij') + oc, ec = np.meshgrid( + np.arange(nrows_in)[::skip], + np.arange(ncols_in)[::skip], + indexing='ij', + ) ps = self.PROJ_IMG_DIM / self.output_dim # output pixel size @@ -99,30 +108,45 @@ def warp_eta_ome_map(self, eta_ome, map_ids=None, skip=10): img = eta_ome.dataStore[map_id] # ??? do we need to use iHKLlist? - angs = np.vstack([ - tths[map_id]*np.ones_like(ep.flatten()), - ep.flatten(), - op.flatten() - ]).T + angs = np.vstack( + [ + tths[map_id] * np.ones_like(ep.flatten()), + ep.flatten(), + op.flatten(), + ] + ).T ppts, nmask = zproject_sph_angles( - angs, method=self.mapping, source=self.vector_type, - invert_z=self.invert_z, use_mask=True + angs, + method=self.mapping, + source=self.vector_type, + invert_z=self.invert_z, + use_mask=True, ) # pixel coords in output image - rp = 0.5*self.output_dim - ppts[:, 1]/ps - cp = ppts[:, 0]/ps + 0.5*self.output_dim + rp = 0.5 * self.output_dim - ppts[:, 1] / ps + cp = ppts[:, 0] / ps + 0.5 * self.output_dim # compute piecewise affine transform - src = np.vstack([ec.flatten(), oc.flatten(), ]).T - dst = np.vstack([cp.flatten(), rp.flatten(), ]).T + src = np.vstack( + [ + ec.flatten(), + oc.flatten(), + ] + ).T + dst = np.vstack( + [ + cp.flatten(), + rp.flatten(), + ] + ).T paxf.estimate(src, dst) wimg = warp( img, inverse_map=paxf.inverse, - output_shape=(self.output_dim, self.output_dim) + output_shape=(self.output_dim, self.output_dim), ) if len(map_ids) == 1: return wimg @@ -145,38 +169,49 @@ def warp_polar_image(self, pimg, skip=10): tth_cen = np.array(pimg['tth_coordinates'])[0, :] eta_cen = np.array(pimg['eta_coordinates'])[:, 0] - tp, ep = np.meshgrid(tth_cen[::skip], - eta_cen[::skip]) - tc, ec = np.meshgrid(np.arange(ncols_in)[::skip], - np.arange(nrows_in)[::skip]) + tp, ep = np.meshgrid(tth_cen[::skip], eta_cen[::skip]) + tc, ec = np.meshgrid( + np.arange(ncols_in)[::skip], np.arange(nrows_in)[::skip] + ) op = np.zeros_like(tp.flatten()) angs = np.radians( - np.vstack([tp.flatten(), - ep.flatten(), - op.flatten()]).T + np.vstack([tp.flatten(), ep.flatten(), op.flatten()]).T ) ppts = zproject_sph_angles( - angs, method='stereographic', source='d', invert_z=self.invert_z, - rmat=self.rmat + angs, + method='stereographic', + source='d', + invert_z=self.invert_z, + rmat=self.rmat, ) # output pixel size ps = self.PROJ_IMG_DIM / self.output_dim # pixel coords in output image - rp = 0.5*self.output_dim - ppts[:, 1]/ps - cp = ppts[:, 0]/ps + 0.5*self.output_dim - - src = np.vstack([tc.flatten(), ec.flatten(), ]).T - dst = np.vstack([cp.flatten(), rp.flatten(), ]).T + rp = 0.5 * self.output_dim - ppts[:, 1] / ps + cp = ppts[:, 0] / ps + 0.5 * self.output_dim + + src = np.vstack( + [ + tc.flatten(), + ec.flatten(), + ] + ).T + dst = np.vstack( + [ + cp.flatten(), + rp.flatten(), + ] + ).T paxf.estimate(src, dst) wimg = warp( img, inverse_map=paxf.inverse, - output_shape=(self.output_dim, self.output_dim) + output_shape=(self.output_dim, self.output_dim), ) return wimg diff --git a/hexrd/core/rotations.py b/hexrd/core/rotations.py index 582cdefac..8e8fc4c27 100644 --- a/hexrd/core/rotations.py +++ b/hexrd/core/rotations.py @@ -37,7 +37,13 @@ from hexrd.core.deprecation import deprecated from hexrd.core import constants as cnst -from hexrd.core.matrixutil import columnNorm, unitVector, findDuplicateVectors, multMatArray, nullSpace +from hexrd.core.matrixutil import ( + columnNorm, + unitVector, + findDuplicateVectors, + multMatArray, + nullSpace, +) from hexrd.core.utils.warnings import ignore_warnings @@ -89,6 +95,7 @@ def arccosSafe(cosines): raise RuntimeError("unrecoverable error") return np.arccos(np.clip(cosines, -1.0, 1.0)) + # # ==================== Quaternions # @@ -121,7 +128,7 @@ def fixQuat(q): qfix = unitVector(q) - q0negative = qfix[0, ] < 0 + q0negative = qfix[0,] < 0 qfix[:, q0negative] = -1 * qfix[:, q0negative] if qdims == 3: @@ -310,15 +317,47 @@ def quatProductMatrix(quats, mult='right'): q2 = quats[2, :].copy() q3 = quats[3, :].copy() if mult == 'right': - qmats = np.array([[q0], [q1], [q2], [q3], - [-q1], [q0], [-q3], [q2], - [-q2], [q3], [q0], [-q1], - [-q3], [-q2], [q1], [q0]]) + qmats = np.array( + [ + [q0], + [q1], + [q2], + [q3], + [-q1], + [q0], + [-q3], + [q2], + [-q2], + [q3], + [q0], + [-q1], + [-q3], + [-q2], + [q1], + [q0], + ] + ) elif mult == 'left': - qmats = np.array([[q0], [q1], [q2], [q3], - [-q1], [q0], [q3], [-q2], - [-q2], [-q3], [q0], [q1], - [-q3], [q2], [-q1], [q0]]) + qmats = np.array( + [ + [q0], + [q1], + [q2], + [q3], + [-q1], + [q0], + [q3], + [-q2], + [-q2], + [-q3], + [q0], + [q1], + [-q3], + [q2], + [-q1], + [q0], + ] + ) # some fancy reshuffling... qmats = qmats.T.reshape((nq, 4, 4)).transpose(0, 2, 1) return qmats @@ -631,8 +670,7 @@ def angleAxisOfRotMat(rot_mat): else: raise RuntimeError( "rot_mat array must be (3, 3) or (n, 3, 3); " - "input has dimension %d" - % (rdim) + "input has dimension %d" % (rdim) ) rot_vec = R.from_matrix(rot_mat).as_rotvec() @@ -1089,9 +1127,7 @@ def mapAngle(ang, ang_range=None, units=angularUnits): elif units.lower() == 'radians': period = 2.0 * np.pi else: - raise RuntimeError( - "unknown angular units: " + units - ) + raise RuntimeError("unknown angular units: " + units) ang = np.nan_to_num(np.atleast_1d(np.float_(ang))) @@ -1105,7 +1141,7 @@ def mapAngle(ang, ang_range=None, units=angularUnits): min_val = ang_range.min() max_val = ang_range.max() - if not np.allclose(max_val-min_val, period): + if not np.allclose(max_val - min_val, period): raise RuntimeError('range is incomplete!') val = np.mod(ang - min_val, max_val - min_val) + min_val @@ -1498,8 +1534,8 @@ def quatOfLaueGroup(tag): + "Oh, and have a great day ;-)" ) - angle = angleAxis[0, ] - axis = angleAxis[1:, ] + angle = angleAxis[0,] + axis = angleAxis[1:,] # Note: Axis does not need to be normalized in call to quatOfAngleAxis # 05/01/2014 JVB -- made output a contiguous C-ordered array diff --git a/hexrd/core/transforms/__init__.py b/hexrd/core/transforms/__init__.py index a39f26377..12ff60c85 100644 --- a/hexrd/core/transforms/__init__.py +++ b/hexrd/core/transforms/__init__.py @@ -1,29 +1,28 @@ # ============================================================ -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. # All rights reserved. -# +# # This file is part of HEXRD. For details on dowloading the source, # see the file COPYING. -# +# # Please also see the file LICENSE. -# +# # This program is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License (as published by the Free Software # Foundation) version 2.1 dated February 1999. -# +# # This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA or visit . # ============================================================ -"""Tools or X-ray diffraction analysis -""" +"""Tools or X-ray diffraction analysis""" from . import xfcapi diff --git a/hexrd/core/transforms/new_capi/reference.py b/hexrd/core/transforms/new_capi/reference.py index e90e49e06..36dfe681e 100644 --- a/hexrd/core/transforms/new_capi/reference.py +++ b/hexrd/core/transforms/new_capi/reference.py @@ -4,6 +4,7 @@ # easily testable and clear. They will be use to test against in unit tests. # They may be slow and not vectorized. + def intersect_ray_plane(ro, rv, p): ''' ray-plane intersection @@ -51,6 +52,6 @@ def intersect_ray_plane(ro, rv, p): # the behavior of the function actually relies in IEEE754 with a division # by 0 generating the appropriate infinity, or a NAN if it is a 0/0. with np.errstate(divide='ignore', invalid='ignore'): - t = (D - normal@ro)/(normal@rv) + t = (D - normal @ ro) / (normal @ rv) return t diff --git a/hexrd/core/transforms/old_xfcapi.py b/hexrd/core/transforms/old_xfcapi.py index 3179fd640..220094c68 100644 --- a/hexrd/core/transforms/old_xfcapi.py +++ b/hexrd/core/transforms/old_xfcapi.py @@ -30,27 +30,28 @@ import sys from hexrd.core.extensions import _transforms_CAPI + # Imports so that others can import from this module from hexrd.core.rotations import mapAngle from hexrd.core.matrixutil import columnNorm, rowNorm # ###################################################################### # Module Data -epsf = np.finfo(float).eps # ~2.2e-16 -ten_epsf = 10 * epsf # ~2.2e-15 -sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 +epsf = np.finfo(float).eps # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 +sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 -periodDict = {'degrees': 360.0, 'radians': 2*np.pi} -angularUnits = 'radians' # module-level angle units +periodDict = {'degrees': 360.0, 'radians': 2 * np.pi} +angularUnits = 'radians' # module-level angle units # basis vectors -I3 = np.eye(3) # (3, 3) identity -Xl = np.ascontiguousarray(I3[:, 0].reshape(3, 1)) # X in the lab frame -Yl = np.ascontiguousarray(I3[:, 1].reshape(3, 1)) # Y in the lab frame -Zl = np.ascontiguousarray(I3[:, 2].reshape(3, 1)) # Z in the lab frame +I3 = np.eye(3) # (3, 3) identity +Xl = np.ascontiguousarray(I3[:, 0].reshape(3, 1)) # X in the lab frame +Yl = np.ascontiguousarray(I3[:, 1].reshape(3, 1)) # Y in the lab frame +Zl = np.ascontiguousarray(I3[:, 2].reshape(3, 1)) # Z in the lab frame # reference stretch -vInv_ref = np.array([[1., 1., 1., 0., 0., 0.]], order='C').T +vInv_ref = np.array([[1.0, 1.0, 1.0, 0.0, 0.0, 0.0]], order='C').T # reference beam direction and eta=0 ref in LAB FRAME for standard geometry bVec_ref = -Zl @@ -60,7 +61,7 @@ # Funtions -def anglesToGVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0., rMat_c=I3): +def anglesToGVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0.0, rMat_c=I3): """ from 'eta' frame out to lab (with handy kwargs to go to crystal or sample) @@ -72,12 +73,10 @@ def anglesToGVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0., rMat_c=I3): eHat_l = np.ascontiguousarray(eHat_l.flatten()) rMat_c = np.ascontiguousarray(rMat_c) chi = float(chi) - return _transforms_CAPI.anglesToGVec(angs, - bHat_l, eHat_l, - chi, rMat_c) + return _transforms_CAPI.anglesToGVec(angs, bHat_l, eHat_l, chi, rMat_c) -def anglesToDVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0., rMat_c=I3): +def anglesToDVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0.0, rMat_c=I3): """ from 'eta' frame out to lab (with handy kwargs to go to crystal or sample) @@ -89,9 +88,7 @@ def anglesToDVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0., rMat_c=I3): eHat_l = np.ascontiguousarray(eHat_l.flatten()) rMat_c = np.ascontiguousarray(rMat_c) chi = float(chi) - return _transforms_CAPI.anglesToDVec(angs, - bHat_l, eHat_l, - chi, rMat_c) + return _transforms_CAPI.anglesToDVec(angs, bHat_l, eHat_l, chi, rMat_c) def makeGVector(hkl, bMat): @@ -117,10 +114,9 @@ def makeGVector(hkl, bMat): return unitRowVector(np.dot(bMat, hkl)) -def gvecToDetectorXY(gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec=bVec_ref): +def gvecToDetectorXY( + gVec_c, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec=bVec_ref +): """ Takes a list of unit reciprocal lattice vectors in crystal frame to the specified detector-relative frame, subject to the conditions: @@ -156,16 +152,14 @@ def gvecToDetectorXY(gVec_c, tVec_s = np.ascontiguousarray(tVec_s.flatten()) tVec_c = np.ascontiguousarray(tVec_c.flatten()) beamVec = np.ascontiguousarray(beamVec.flatten()) - return _transforms_CAPI.gvecToDetectorXY(gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec) + return _transforms_CAPI.gvecToDetectorXY( + gVec_c, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec + ) -def gvecToDetectorXYArray(gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec=bVec_ref): +def gvecToDetectorXYArray( + gVec_c, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec=bVec_ref +): """ Takes a list of unit reciprocal lattice vectors in crystal frame to the specified detector-relative frame, subject to the conditions: @@ -201,16 +195,21 @@ def gvecToDetectorXYArray(gVec_c, tVec_s = np.ascontiguousarray(tVec_s.flatten()) tVec_c = np.ascontiguousarray(tVec_c.flatten()) beamVec = np.ascontiguousarray(beamVec.flatten()) - return _transforms_CAPI.gvecToDetectorXYArray(gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec) + return _transforms_CAPI.gvecToDetectorXYArray( + gVec_c, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec + ) -def detectorXYToGvec(xy_det, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - beamVec=bVec_ref, etaVec=eta_ref): +def detectorXYToGvec( + xy_det, + rMat_d, + rMat_s, + tVec_d, + tVec_s, + tVec_c, + beamVec=bVec_ref, + etaVec=eta_ref, +): """ Takes a list cartesian (x, y) pairs in the detector coordinates and calculates the associated reciprocal lattice (G) vectors and @@ -249,16 +248,21 @@ def detectorXYToGvec(xy_det, tVec_c = np.ascontiguousarray(tVec_c.flatten()) beamVec = np.ascontiguousarray(beamVec.flatten()) etaVec = np.ascontiguousarray(etaVec.flatten()) - return _transforms_CAPI.detectorXYToGvec(xy_det, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - beamVec, etaVec) + return _transforms_CAPI.detectorXYToGvec( + xy_det, rMat_d, rMat_s, tVec_d, tVec_s, tVec_c, beamVec, etaVec + ) -def detectorXYToGvecArray(xy_det, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - beamVec=bVec_ref, etaVec=eta_ref): +def detectorXYToGvecArray( + xy_det, + rMat_d, + rMat_s, + tVec_d, + tVec_s, + tVec_c, + beamVec=bVec_ref, + etaVec=eta_ref, +): """ Takes a list cartesian (x, y) pairs in the detector coordinates and calculates the associated reciprocal lattice (G) vectors and @@ -297,14 +301,21 @@ def detectorXYToGvecArray(xy_det, tVec_c = np.ascontiguousarray(tVec_c.flatten()) beamVec = np.ascontiguousarray(beamVec.flatten()) etaVec = np.ascontiguousarray(etaVec.flatten()) - return _transforms_CAPI.detectorXYToGvec(xy_det, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - beamVec, etaVec) + return _transforms_CAPI.detectorXYToGvec( + xy_det, rMat_d, rMat_s, tVec_d, tVec_s, tVec_c, beamVec, etaVec + ) -def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, - vInv=None, beamVec=bVec_ref, etaVec=eta_ref): +def oscillAnglesOfHKLs( + hkls, + chi, + rMat_c, + bMat, + wavelength, + vInv=None, + beamVec=bVec_ref, + etaVec=eta_ref, +): """ Takes a list of unit reciprocal lattice vectors in crystal frame to the specified detector-relative frame, subject to the conditions: @@ -416,8 +427,8 @@ def arccosSafe(temp): print("attempt to take arccos of %s" % temp, file=sys.stderr) raise RuntimeError("unrecoverable error") - gte1 = temp >= 1. - lte1 = temp <= -1. + gte1 = temp >= 1.0 + lte1 = temp <= -1.0 temp[gte1] = 1 temp[lte1] = -1 @@ -437,7 +448,7 @@ def angularDifference(angList0, angList1, units=angularUnits): # take difference as arrays diffAngles = np.atleast_1d(angList0) - np.atleast_1d(angList1) - return abs(np.remainder(diffAngles + 0.5*period, period) - 0.5*period) + return abs(np.remainder(diffAngles + 0.5 * period, period) - 0.5 * period) def unitRowVector(vecIn): @@ -447,8 +458,10 @@ def unitRowVector(vecIn): elif vecIn.ndim == 2: return _transforms_CAPI.unitRowVectors(vecIn) else: - assert vecIn.ndim in [1, 2], \ - "arg shape must be 1-d or 2-d, yours is %d-d" % (vecIn.ndim) + assert vecIn.ndim in [ + 1, + 2, + ], "arg shape must be 1-d or 2-d, yours is %d-d" % (vecIn.ndim) def makeDetectorRotMat(tiltAngles): @@ -536,5 +549,6 @@ def homochoricOfQuat(quats): q = np.ascontiguousarray(quats.T) return _transforms_CAPI.homochoricOfQuat(q) + # def rotateVecsAboutAxis(angle, axis, vecs): # return _transforms_CAPI.rotateVecsAboutAxis(angle, axis, vecs) diff --git a/hexrd/core/transforms/xf.py b/hexrd/core/transforms/xf.py index 2941b0f88..32be30ff3 100644 --- a/hexrd/core/transforms/xf.py +++ b/hexrd/core/transforms/xf.py @@ -38,7 +38,12 @@ # Added to not break people importing these methods -from hexrd.core.rotations import mapAngle, quatProductMatrix as quat_product_matrix, arccosSafe, angularDifference +from hexrd.core.rotations import ( + mapAngle, + quatProductMatrix as quat_product_matrix, + arccosSafe, + angularDifference, +) from hexrd.core.matrixutil import columnNorm, rowNorm diff --git a/hexrd/core/transforms/xfcapi.py b/hexrd/core/transforms/xfcapi.py index 8255fc9f5..1e96661b4 100644 --- a/hexrd/core/transforms/xfcapi.py +++ b/hexrd/core/transforms/xfcapi.py @@ -2,7 +2,42 @@ # add and test them. # NOTE: we are only importing what is currently being used in hexrd # and hexrdgui. This is so that we can see clearly what is in use. -from .old_xfcapi import anglesToDVec, anglesToGVec, detectorXYToGvec, gvecToDetectorXY, gvecToDetectorXYArray, oscillAnglesOfHKLs, angularDifference, makeDetectorRotMat, makeEtaFrameRotMat, makeOscillRotMat, makeOscillRotMatArray, makeRotMatOfExpMap, makeRotMatOfQuat, mapAngle, rowNorm, unitRowVector, bVec_ref, eta_ref, Xl, Yl +from .old_xfcapi import ( + anglesToDVec, + anglesToGVec, + detectorXYToGvec, + gvecToDetectorXY, + gvecToDetectorXYArray, + oscillAnglesOfHKLs, + angularDifference, + makeDetectorRotMat, + makeEtaFrameRotMat, + makeOscillRotMat, + makeOscillRotMatArray, + makeRotMatOfExpMap, + makeRotMatOfQuat, + mapAngle, + rowNorm, + unitRowVector, + bVec_ref, + eta_ref, + Xl, + Yl, +) -from .new_capi.xf_new_capi import angles_to_dvec, angles_to_gvec, gvec_to_xy, make_beam_rmat, make_detector_rmat, make_rmat_of_expmap, make_sample_rmat, oscill_angles_of_hkls, quat_distance, rotate_vecs_about_axis, unit_vector, validate_angle_ranges, xy_to_gvec +from .new_capi.xf_new_capi import ( + angles_to_dvec, + angles_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_detector_rmat, + make_rmat_of_expmap, + make_sample_rmat, + oscill_angles_of_hkls, + quat_distance, + rotate_vecs_about_axis, + unit_vector, + validate_angle_ranges, + xy_to_gvec, +) diff --git a/hexrd/core/utils/decorators.py b/hexrd/core/utils/decorators.py index 9916562d7..03cb10ad3 100644 --- a/hexrd/core/utils/decorators.py +++ b/hexrd/core/utils/decorators.py @@ -105,8 +105,7 @@ def convert(x): # Create an sha1 of the data, and throw in a string # and the shape. x = np.ascontiguousarray(x) - return ('__type_np.ndarray', x.shape, - xxhash.xxh3_128_hexdigest(x)) + return ('__type_np.ndarray', x.shape, xxhash.xxh3_128_hexdigest(x)) elif isinstance(x, (list, tuple)): return _make_hashable(x) elif isinstance(x, dict): diff --git a/hexrd/core/utils/hdf5.py b/hexrd/core/utils/hdf5.py index 954d1af74..e95310169 100644 --- a/hexrd/core/utils/hdf5.py +++ b/hexrd/core/utils/hdf5.py @@ -45,8 +45,12 @@ def unwrap_dict_to_h5(grp, d, asattr=False): else: # probably a string badness - if isinstance(item, np.ndarray) and np.issubdtype(item.dtype, 'U'): - item = str(item) # hdf5 files do not support unicode arrays + if isinstance(item, np.ndarray) and np.issubdtype( + item.dtype, 'U' + ): + item = str( + item + ) # hdf5 files do not support unicode arrays grp.create_dataset(key, data=item) diff --git a/hexrd/core/utils/json.py b/hexrd/core/utils/json.py index f56e707f1..f11f827ba 100644 --- a/hexrd/core/utils/json.py +++ b/hexrd/core/utils/json.py @@ -25,9 +25,7 @@ def default(self, obj): np.save(bytes_io, obj, allow_pickle=False) data = bytes_io.getvalue() - return { - ndarray_key: data.decode('raw_unicode_escape') - } + return {ndarray_key: data.decode('raw_unicode_escape')} return super().default(obj) diff --git a/hexrd/core/utils/multiprocess_generic.py b/hexrd/core/utils/multiprocess_generic.py index f45a943e5..04a7a526f 100644 --- a/hexrd/core/utils/multiprocess_generic.py +++ b/hexrd/core/utils/multiprocess_generic.py @@ -17,8 +17,8 @@ def __init__(self): pass def _func_queue(self, func, q_in, q_out, *args, **kwargs): - """ - Retrive processes from the queue + """ + Retrive processes from the queue """ while True: pos, var = q_in.get() @@ -27,13 +27,18 @@ def _func_queue(self, func, q_in, q_out, *args, **kwargs): res = func(var, *args, **kwargs) q_out.put((pos, res)) - print("finished azimuthal position #", - pos, "with rwp = ", res[2]*100., "%") + print( + "finished azimuthal position #", + pos, + "with rwp = ", + res[2] * 100.0, + "%", + ) return def parallelise_function(self, var, func, *args, **kwargs): - """ - Split evaluations of func across processors + """ + Split evaluations of func across processors """ n = len(var) @@ -48,9 +53,9 @@ def parallelise_function(self, var, func, *args, **kwargs): for i in range(nprocs): pass_args = [func, q_in, q_out] - p = Process(target=self._func_queue, - args=tuple(pass_args), - kwargs=kwargs) + p = Process( + target=self._func_queue, args=tuple(pass_args), kwargs=kwargs + ) processes.append(p) diff --git a/hexrd/core/utils/panel_buffer.py b/hexrd/core/utils/panel_buffer.py index 192eeb3d5..512f9d3d4 100644 --- a/hexrd/core/utils/panel_buffer.py +++ b/hexrd/core/utils/panel_buffer.py @@ -15,18 +15,22 @@ def panel_buffer_as_2d_array(panel: Detector) -> np.ndarray: elif panel.panel_buffer.shape == (2,): # The two floats are specifying the borders in mm for x and y. # Convert to pixel borders. Swap x and y so we have i, j in pixels. - borders = np.round([ - panel.panel_buffer[1] / panel.pixel_size_row, - panel.panel_buffer[0] / panel.pixel_size_col, - ]).astype(int) + borders = np.round( + [ + panel.panel_buffer[1] / panel.pixel_size_row, + panel.panel_buffer[0] / panel.pixel_size_col, + ] + ).astype(int) # Convert to array panel_buffer = np.zeros(panel.shape, dtype=bool) # We can't do `-borders[i]` since that doesn't work for 0, # so we must do `panel.shape[i] - borders[i]` instead. - panel_buffer[borders[0]:panel.shape[0] - borders[0], - borders[1]:panel.shape[1] - borders[1]] = True + panel_buffer[ + borders[0] : panel.shape[0] - borders[0], + borders[1] : panel.shape[1] - borders[1], + ] = True return panel_buffer elif panel.panel_buffer.ndim == 2: return panel.panel_buffer diff --git a/hexrd/core/utils/profiler.py b/hexrd/core/utils/profiler.py index aeb9076ac..142ddc56c 100644 --- a/hexrd/core/utils/profiler.py +++ b/hexrd/core/utils/profiler.py @@ -21,7 +21,6 @@ pass - def instrument_function(fn_desc): """Interpret a record for an instrumented function, and instrument accordingly. The record, fn_desc, contains: @@ -48,7 +47,7 @@ def instrument_function(fn_desc): # consume as many as possible with import (ignore last part that is # the function name) pos = 0 - for i in range(1, path_parts+1): + for i in range(1, path_parts + 1): try: m = importlib.import_module('.'.join(parts[0:i])) pos = i @@ -71,11 +70,11 @@ def instrument_function(fn_desc): warnings.warn('Could not instrument "{0}"'.format(full_name)) - def parse_file(filename): """Parse a file and instrument the associated functions""" try: import yaml + with open(filename, 'r') as f: cfg = yaml.load(f) @@ -87,8 +86,10 @@ def parse_file(filename): profile_cfg = cfg['profile'] if 'instrument' in profile_cfg: # instrument all - [instrument_function(fn_desc) for fn_desc in - profile_cfg['instrument']] + [ + instrument_function(fn_desc) + for fn_desc in profile_cfg['instrument'] + ] except Exception as e: msg = 'Failed to include profile file: {0}' warnings.warn(msg.format(filename)) @@ -96,18 +97,17 @@ def parse_file(filename): def instrument_all(filenames): - """Instrument functions based on a list of profiler configuration files. - - """ + """Instrument functions based on a list of profiler configuration files.""" [parse_file(filename) for filename in filenames] - + def dump_results(args): print(" STATS ".center(72, '=')) fmt = "{2:>14}, {1:>8}, {0:<40}" print(fmt.format("FUNCTION", "CALLS", "TIME")) fmt = "{2:>14F}, {1:>8}, {0:<40}" - sorted_by_time = sorted(nvtx.getstats().iteritems(), key=lambda tup: tup[1][1]) + sorted_by_time = sorted( + nvtx.getstats().iteritems(), key=lambda tup: tup[1][1] + ) for key, val in sorted_by_time: print(fmt.format(key, *val)) - diff --git a/hexrd/core/utils/progressbar.py b/hexrd/core/utils/progressbar.py index af515b5a1..2f9c8bd66 100644 --- a/hexrd/core/utils/progressbar.py +++ b/hexrd/core/utils/progressbar.py @@ -6,12 +6,13 @@ class ProgressBar(_ProgressBar): "overriding the default to delete the progress bar when finished" + def finish(self): 'Puts the ProgressBar bar in the finished state.' self.finished = True self.update(self.maxval) # clear the progress bar: - self.fd.write('\r'+' '*self.term_width+'\r') + self.fd.write('\r' + ' ' * self.term_width + '\r') if self.signal_set: signal.signal(signal.SIGWINCH, signal.SIG_DFL) diff --git a/hexrd/core/utils/yaml.py b/hexrd/core/utils/yaml.py index 88555ec41..89f1eff11 100644 --- a/hexrd/core/utils/yaml.py +++ b/hexrd/core/utils/yaml.py @@ -11,6 +11,7 @@ class NumpyToNativeDumper(yaml.SafeDumper): For instance, np.float128 will raise an error, since it cannot be converted to a basic type. """ + def represent_data(self, data): if isinstance(data, np.ndarray): return self.represent_list(data.tolist()) diff --git a/hexrd/core/valunits.py b/hexrd/core/valunits.py index 50184326d..e0d00dbe7 100644 --- a/hexrd/core/valunits.py +++ b/hexrd/core/valunits.py @@ -58,6 +58,7 @@ class UNames(object): """Units used in this module""" + degrees = 'degrees' radians = 'radians' @@ -72,36 +73,31 @@ class UNames(object): cv_dict = { - (UNames.degrees, UNames.radians): math.pi/180.0, - (UNames.radians, UNames.degrees): 180/math.pi, - - (UNames.m, UNames.mm): 1.0e3, - (UNames.m, UNames.meter): 1.0, + (UNames.degrees, UNames.radians): math.pi / 180.0, + (UNames.radians, UNames.degrees): 180 / math.pi, + (UNames.m, UNames.mm): 1.0e3, + (UNames.m, UNames.meter): 1.0, (UNames.m, UNames.nm): 1.0e9, - (UNames.m, UNames.angstrom): 1.0e10, - - (UNames.meter, UNames.mm): 1.0e3, - (UNames.meter, UNames.m): 1.0, + (UNames.m, UNames.angstrom): 1.0e10, + (UNames.meter, UNames.mm): 1.0e3, + (UNames.meter, UNames.m): 1.0, (UNames.meter, UNames.nm): 1.0e9, - (UNames.meter, UNames.angstrom): 1.0e10, - - (UNames.mm, UNames.m): 1.0e-3, - (UNames.mm, UNames.meter): 1.0e-3, + (UNames.meter, UNames.angstrom): 1.0e10, + (UNames.mm, UNames.m): 1.0e-3, + (UNames.mm, UNames.meter): 1.0e-3, (UNames.mm, UNames.nm): 1.0e6, - (UNames.mm, UNames.angstrom): 1.0e7, - - (UNames.angstrom, UNames.m): 1.0e-10, - (UNames.angstrom, UNames.meter): 1.0e-10, - (UNames.angstrom, UNames.mm): 1.0e-7, + (UNames.mm, UNames.angstrom): 1.0e7, + (UNames.angstrom, UNames.m): 1.0e-10, + (UNames.angstrom, UNames.meter): 1.0e-10, + (UNames.angstrom, UNames.mm): 1.0e-7, (UNames.angstrom, UNames.nm): 1.0e-1, - (UNames.keV, UNames.J): 1.60217646e-16, - (UNames.J, UNames.keV): (1/1.60217646e-16) - } + (UNames.J, UNames.keV): (1 / 1.60217646e-16), +} class valWUnit: - "Value with units""" + "Value with units" "" def __init__(self, name, unitType, value, unit): """Initialization @@ -139,14 +135,15 @@ def __repr__(self): def __mul__(self, other): if isinstance(other, float): - new = valWUnit(self.name, self.uT, self.value*other, self.unit) + new = valWUnit(self.name, self.uT, self.value * other, self.unit) return new elif isinstance(other, valWUnit): - new = valWUnit('%s_times_%s' % (self.name, other.name), - '%s %s' % (self.uT, other.uT), - self.value*other.value, - '(%s)*(%s)' % (self.unit, other.unit) - ) + new = valWUnit( + '%s_times_%s' % (self.name, other.name), + '%s %s' % (self.uT, other.uT), + self.value * other.value, + '(%s)*(%s)' % (self.unit, other.unit), + ) # really need to put in here something to resolve new.uT return new else: @@ -154,12 +151,15 @@ def __mul__(self, other): def __add__(self, other): if isinstance(other, float): - new = valWUnit(self.name, self.uT, - self.value + other, self.unit) + new = valWUnit(self.name, self.uT, self.value + other, self.unit) return new elif isinstance(other, valWUnit): - new = valWUnit(self.name, self.uT, - self.value + other.getVal(self.unit), self.unit) + new = valWUnit( + self.name, + self.uT, + self.value + other.getVal(self.unit), + self.unit, + ) return new else: raise RuntimeError("add with unsupported operand") @@ -169,8 +169,12 @@ def __sub__(self, other): new = valWUnit(self.name, self.uT, self.value - other, self.unit) return new elif isinstance(other, valWUnit): - new = valWUnit(self.name, self.uT, self.value - - other.getVal(self.unit), self.unit) + new = valWUnit( + self.name, + self.uT, + self.value - other.getVal(self.unit), + self.unit, + ) return new else: raise RuntimeError("add with unsupported operand") @@ -212,13 +216,15 @@ def _convert(self, toUnit): # from_to = (self.unit, toUnit) try: - return cv_dict[from_to]*self.value - except(KeyError): + return cv_dict[from_to] * self.value + except KeyError: special_case = ('keV', 'angstrom') if from_to == special_case or from_to == special_case[::-1]: return keVToAngstrom(self.value) - raise RuntimeError(f"Unit conversion '{from_to[0]} --> " - + f"{from_to[1]}' not recognized") + raise RuntimeError( + f"Unit conversion '{from_to[0]} --> " + + f"{from_to[1]}' not recognized" + ) def isLength(self): """Return true if quantity is a length""" @@ -332,4 +338,5 @@ def testConversions(): for u in ulist: print((' in ', u, ': ', v.getVal(u))) return + testConversions() diff --git a/hexrd/hed/instrument/__init__.py b/hexrd/hed/instrument/__init__.py new file mode 100644 index 000000000..b5414013c --- /dev/null +++ b/hexrd/hed/instrument/__init__.py @@ -0,0 +1,13 @@ +from .hedm_instrument import ( + calc_angles_from_beam_vec, + calc_beam_vec, + centers_of_edge_vec, + GenerateEtaOmeMaps, + GrainDataWriter, + HEDMInstrument, + max_tth, + switch_xray_source, + unwrap_dict_to_h5, + unwrap_h5_to_dict, +) +from .detector import Detector diff --git a/hexrd/hed/instrument/detector.py b/hexrd/hed/instrument/detector.py index 0c2d89d52..37cb8fcfe 100644 --- a/hexrd/hed/instrument/detector.py +++ b/hexrd/hed/instrument/detector.py @@ -3,7 +3,11 @@ import os from typing import Optional -from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +from hexrd.core.instrument.constants import ( + COATING_DEFAULT, + FILTER_DEFAULTS, + PHOSPHOR_DEFAULT, +) from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba @@ -11,18 +15,29 @@ from hexrd.core import constants as ct from hexrd.core import distortion as distortion_pkg from hexrd.core import matrixutil as mutil + from hexrd.hedm import xrdutil from hexrd.core.rotations import mapAngle from hexrd.core.material import crystallography from hexrd.core.material.crystallography import PlaneData -from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec +from hexrd.core.transforms.xfcapi import ( + xy_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_rmat_of_expmap, + oscill_angles_of_hkls, + angles_to_dvec, +) from hexrd.core.utils.decorators import memoize from hexrd.core.gridutil import cellIndices from hexrd.core.instrument import detector_coatings -from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_incoherent_scattering +from hexrd.core.material.utils import ( + calculate_linear_absorption_length, + calculate_incoherent_scattering, +) distortion_registry = distortion_pkg.Registry() @@ -279,7 +294,8 @@ def __init__( if detector_filter is None: detector_filter = detector_coatings.Filter( - **FILTER_DEFAULTS.TARDIS) + **FILTER_DEFAULTS.TARDIS + ) self.filter = detector_filter if detector_coating is None: @@ -530,8 +546,9 @@ def pixel_coords(self): # METHODS # ========================================================================= - def pixel_Q(self, energy: np.floating, - origin: np.ndarray = ct.zeros_3) -> np.ndarray: + def pixel_Q( + self, energy: np.floating, origin: np.ndarray = ct.zeros_3 + ) -> np.ndarray: '''get the equivalent momentum transfer for the angles. @@ -550,7 +567,7 @@ def pixel_Q(self, energy: np.floating, ''' lam = ct.keVToAngstrom(energy) tth, _ = self.pixel_angles(origin=origin) - return 4.*np.pi*np.sin(tth*0.5)/lam + return 4.0 * np.pi * np.sin(tth * 0.5) / lam def pixel_compton_energy_loss( self, @@ -577,9 +594,9 @@ def pixel_compton_energy_loss( ''' energy = np.asarray(energy) tth, _ = self.pixel_angles() - ang_fact = (1 - np.cos(tth)) - beta = energy/ct.cRestmasskeV - return energy/(1 + beta*ang_fact) + ang_fact = 1 - np.cos(tth) + beta = energy / ct.cRestmasskeV + return energy / (1 + beta * ang_fact) def pixel_compton_attenuation_length( self, @@ -628,8 +645,7 @@ def compute_compton_scattering_intensity( physics_package: AbstractPhysicsPackage, origin: np.array = ct.zeros_3, ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - - ''' compute the theoretical compton scattering + '''compute the theoretical compton scattering signal on the detector. this value is corrected for the transmission of compton scattered photons and normlaized before getting subtracting from the @@ -652,18 +668,20 @@ def compute_compton_scattering_intensity( q = self.pixel_Q(energy) inc_s = calculate_incoherent_scattering( - physics_package.sample_material, - q.flatten()).reshape(self.shape) + physics_package.sample_material, q.flatten() + ).reshape(self.shape) inc_w = calculate_incoherent_scattering( - physics_package.window_material, - q.flatten()).reshape(self.shape) + physics_package.window_material, q.flatten() + ).reshape(self.shape) t_s = self.calc_compton_physics_package_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) t_w = self.calc_compton_window_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) return inc_s * t_s + inc_w * t_w, t_s, t_w @@ -1087,9 +1105,14 @@ def interpolate_nearest(self, xy, img, pad_with_nans=True): int_xy[on_panel] = int_vals return int_xy - def interpolate_bilinear(self, xy, img, pad_with_nans=True, - clip_to_panel=True, - on_panel: Optional[np.ndarray] = None): + def interpolate_bilinear( + self, + xy, + img, + pad_with_nans=True, + clip_to_panel=True, + on_panel: Optional[np.ndarray] = None, + ): """ Interpolate an image array at the specified cartesian points. @@ -1766,19 +1789,23 @@ def increase_memoization_sizes(funcs, min_size): if cache_info['maxsize'] < min_size: f.set_cache_maxsize(min_size) - def calc_physics_package_transmission(self, energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage) -> np.float64: + def calc_physics_package_transmission( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + ) -> np.float64: """get the transmission from the physics package need to consider HED and HEDM samples separately """ bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1791,17 +1818,17 @@ def calc_physics_package_transmission(self, energy: np.floating, cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_transmission_sample( - seca, secb, energy, physics_package) - T_window = self.calc_transmission_window( - secb, energy, physics_package) + seca, secb, energy, physics_package + ) + T_window = self.calc_transmission_window(secb, energy, physics_package) transmission_physics_package = T_sample * T_window return transmission_physics_package @@ -1818,12 +1845,13 @@ def calc_compton_physics_package_transmission( routine than elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1836,18 +1864,19 @@ def calc_compton_physics_package_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'sample') + seca, secb, energy, physics_package, 'sample' + ) T_window = self.calc_compton_transmission_window( - secb, energy, physics_package) + secb, energy, physics_package + ) return T_sample * T_window @@ -1864,12 +1893,13 @@ def calc_compton_window_transmission( elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1882,45 +1912,54 @@ def calc_compton_window_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_window = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'window') + seca, secb, energy, physics_package, 'window' + ) T_sample = self.calc_compton_transmission_sample( - seca, energy, physics_package) + seca, energy, physics_package + ) return T_sample * T_window - def calc_transmission_sample(self, seca: np.array, - secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_sample( + self, + seca: np.array, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: thickness_s = physics_package.sample_thickness # in microns if np.isclose(thickness_s, 0): return np.ones(self.shape) # in microns^-1 - mu_s = 1./physics_package.sample_absorption_length(energy) - x = (mu_s*thickness_s) - pre = 1./x/(secb - seca) - num = np.exp(-x*seca) - np.exp(-x*secb) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + x = mu_s * thickness_s + pre = 1.0 / x / (secb - seca) + num = np.exp(-x * seca) - np.exp(-x * secb) return pre * num - def calc_transmission_window(self, secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_window( + self, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: material_w = physics_package.window_material thickness_w = physics_package.window_thickness # in microns if material_w is None or np.isclose(thickness_w, 0): return np.ones(self.shape) # in microns^-1 - mu_w = 1./physics_package.window_absorption_length(energy) - return np.exp(-thickness_w*mu_w*secb) + mu_w = 1.0 / physics_package.window_absorption_length(energy) + return np.exp(-thickness_w * mu_w * secb) def calc_compton_transmission( self, @@ -1935,9 +1974,11 @@ def calc_compton_transmission( formula = physics_package.sample_material density = physics_package.sample_density thickness = physics_package.sample_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1. / self.pixel_compton_attenuation_length( - energy, density, formula, + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, + density, + formula, ) elif pp_layer == 'window': formula = physics_package.window_material @@ -1946,17 +1987,18 @@ def calc_compton_transmission( density = physics_package.window_density thickness = physics_package.window_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) if thickness <= 0: return np.ones(self.shape) - x1 = mu*thickness*seca - x2 = mu_prime*thickness*secb - num = (np.exp(-x1) - np.exp(-x2)) - return -num/(x1 - x2) + x1 = mu * thickness * seca + x2 = mu_prime * thickness * secb + num = np.exp(-x1) - np.exp(-x2) + return -num / (x1 - x2) def calc_compton_transmission_sample( self, @@ -1966,9 +2008,8 @@ def calc_compton_transmission_sample( ) -> np.ndarray: thickness_s = physics_package.sample_thickness # in microns - mu_s = 1./physics_package.sample_absorption_length( - energy) - return np.exp(-mu_s*thickness_s*seca) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + return np.exp(-mu_s * thickness_s * seca) def calc_compton_transmission_window( self, @@ -1980,60 +2021,71 @@ def calc_compton_transmission_window( if formula is None: return np.ones(self.shape) - density = physics_package.window_density # in g/cc + density = physics_package.window_density # in g/cc thickness_w = physics_package.window_thickness # in microns - mu_w_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) - return np.exp(-mu_w_prime*thickness_w*secb) - - def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) -> np.array: - """get the effective pinhole area correction - """ - if (np.isclose(physics_package.pinhole_diameter, 0) - or np.isclose(physics_package.pinhole_thickness, 0)): + mu_w_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) + return np.exp(-mu_w_prime * thickness_w * secb) + + def calc_effective_pinhole_area( + self, physics_package: AbstractPhysicsPackage + ) -> np.array: + """get the effective pinhole area correction""" + if np.isclose(physics_package.pinhole_diameter, 0) or np.isclose( + physics_package.pinhole_thickness, 0 + ): return np.ones(self.shape) - hod = (physics_package.pinhole_thickness / - physics_package.pinhole_diameter) + hod = ( + physics_package.pinhole_thickness + / physics_package.pinhole_diameter + ) bvec = self.bvec tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) cth = -dvecs[:, 2].reshape(self.shape) tanth = np.tan(np.arccos(cth)) - f = hod*tanth - f[np.abs(f) > 1.] = np.nan + f = hod * tanth + f[np.abs(f) > 1.0] = np.nan asinf = np.arcsin(f) return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) - def calc_transmission_generic(self, - secb: np.array, - thickness: np.floating, - absorption_length: np.floating) -> np.array: + def calc_transmission_generic( + self, + secb: np.array, + thickness: np.floating, + absorption_length: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - mu = 1./absorption_length # in microns^-1 - return np.exp(-thickness*mu*secb) + mu = 1.0 / absorption_length # in microns^-1 + return np.exp(-thickness * mu * secb) - def calc_transmission_phosphor(self, - secb: np.array, - thickness: np.floating, - readout_length: np.floating, - absorption_length: np.floating, - energy: np.floating, - pre_U0: np.floating) -> np.array: + def calc_transmission_phosphor( + self, + secb: np.array, + thickness: np.floating, + readout_length: np.floating, + absorption_length: np.floating, + energy: np.floating, + pre_U0: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - f1 = absorption_length*thickness - f2 = absorption_length*readout_length - arg = (secb + 1/f2) - return pre_U0 * energy*((1.0 - np.exp(-f1*arg))/arg) + f1 = absorption_length * thickness + f2 = absorption_length * readout_length + arg = secb + 1 / f2 + return pre_U0 * energy * ((1.0 - np.exp(-f1 * arg)) / arg) + # ============================================================================= # UTILITY METHODS diff --git a/hexrd/hed/instrument/hedm_instrument.py b/hexrd/hed/instrument/hedm_instrument.py index 6671d2755..6880b1508 100644 --- a/hexrd/hed/instrument/hedm_instrument.py +++ b/hexrd/hed/instrument/hedm_instrument.py @@ -59,8 +59,15 @@ from hexrd.core.fitting.utils import fit_ring from hexrd.core.gridutil import make_tolerance_grid from hexrd.core import matrixutil as mutil -from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector -# TODO: Resolve extra-workflow-dependency +from hexrd.core.transforms.xfcapi import ( + angles_to_gvec, + gvec_to_xy, + make_sample_rmat, + make_rmat_of_expmap, + unit_vector, +) + +# TODO: Resolve extra-workflow dependency from hexrd.hedm import xrdutil from hexrd.hedm.material.crystallography import PlaneData from hexrd.core import constants as ct @@ -70,6 +77,7 @@ from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict from hexrd.core.utils.yaml import NumpyToNativeDumper from hexrd.core.valunits import valWUnit + # TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import LeBail @@ -79,14 +87,15 @@ from skimage.draw import polygon from skimage.util import random_noise -# TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import wppfsupport try: from fast_histogram import histogram1d + fast_histogram = True except ImportError: from numpy import histogram as histogram1d + fast_histogram = False logger = logging.getLogger() @@ -109,9 +118,9 @@ pixel_size_DFLT = (0.2, 0.2) tilt_params_DFLT = np.zeros(3) -t_vec_d_DFLT = np.r_[0., 0., -1000.] +t_vec_d_DFLT = np.r_[0.0, 0.0, -1000.0] -chi_DFLT = 0. +chi_DFLT = 0.0 t_vec_s_DFLT = np.zeros(3) multi_ims_key = ct.shared_ims_key @@ -125,8 +134,9 @@ # ============================================================================= -def generate_chunks(nrows, ncols, base_nrows, base_ncols, - row_gap=0, col_gap=0): +def generate_chunks( + nrows, ncols, base_nrows, base_ncols, row_gap=0, col_gap=0 +): """ Generate chunking data for regularly tiled composite detectors. @@ -158,18 +168,15 @@ def generate_chunks(nrows, ncols, base_nrows, base_ncols, [[row_start, row_stop], [col_start, col_stop]] """ - row_starts = np.array([i*(base_nrows + row_gap) for i in range(nrows)]) - col_starts = np.array([i*(base_ncols + col_gap) for i in range(ncols)]) + row_starts = np.array([i * (base_nrows + row_gap) for i in range(nrows)]) + col_starts = np.array([i * (base_ncols + col_gap) for i in range(ncols)]) rr = np.vstack([row_starts, row_starts + base_nrows]) cc = np.vstack([col_starts, col_starts + base_ncols]) rects = [] labels = [] for i in range(nrows): for j in range(ncols): - this_rect = np.array( - [[rr[0, i], rr[1, i]], - [cc[0, j], cc[1, j]]] - ) + this_rect = np.array([[rr[0, i], rr[1, i]], [cc[0, j], cc[1, j]]]) rects.append(this_rect) labels.append('%d_%d' % (i, j)) return rects, labels @@ -195,9 +202,11 @@ def chunk_instrument(instr, rects, labels, use_roi=False): """ icfg_dict = instr.write_config() - new_icfg_dict = dict(beam=icfg_dict['beam'], - oscillation_stage=icfg_dict['oscillation_stage'], - detectors={}) + new_icfg_dict = dict( + beam=icfg_dict['beam'], + oscillation_stage=icfg_dict['oscillation_stage'], + detectors={}, + ) for panel_id, panel in instr.detectors.items(): pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] @@ -207,7 +216,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): row_col_dim = np.diff(rect) # (2, 1) shape = tuple(row_col_dim.flatten()) - center = (rect[:, 0].reshape(2, 1) + 0.5*row_col_dim) + center = rect[:, 0].reshape(2, 1) + 0.5 * row_col_dim sp_tvec = np.concatenate( [panel.pixelToCart(center.T).flatten(), np.zeros(1)] @@ -232,7 +241,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): if panel.panel_buffer is not None: if panel.panel_buffer.ndim == 2: # have a mask array! submask = panel.panel_buffer[ - rect[0, 0]:rect[0, 1], rect[1, 0]:rect[1, 1] + rect[0, 0] : rect[0, 1], rect[1, 0] : rect[1, 1] ] new_icfg_dict['detectors'][panel_name]['buffer'] = submask return new_icfg_dict @@ -276,9 +285,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): images_in = imgser_dict[multi_ims_key] elif np.any(matched_det_keys): if sum(matched_det_keys) != 1: - raise RuntimeError( - f"multiple entries found for '{det_key}'" - ) + raise RuntimeError(f"multiple entries found for '{det_key}'") # use boolean array to index the proper key # !!! these should be in the same order img_keys = img_keys = np.asarray(list(imgser_dict.keys())) @@ -298,7 +305,12 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): if isinstance(images_in, ims_classes): # input is an imageseries of some kind - ims = ProcessedImageSeries(images_in, [('rectangle', roi), ]) + ims = ProcessedImageSeries( + images_in, + [ + ('rectangle', roi), + ], + ) if isinstance(images_in, OmegaImageSeries): # if it was an OmegaImageSeries, must re-cast ims = OmegaImageSeries(ims) @@ -306,16 +318,16 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): # 2- or 3-d array of images ndim = images_in.ndim if ndim == 2: - ims = images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims = images_in[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]] elif ndim == 3: nrows = roi[0][1] - roi[0][0] ncols = roi[1][1] - roi[1][0] n_images = len(images_in) - ims = np.empty((n_images, nrows, ncols), - dtype=images_in.dtype) + ims = np.empty((n_images, nrows, ncols), dtype=images_in.dtype) for i, image in images_in: - ims[i, :, :] = \ - images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims[i, :, :] = images_in[ + roi[0][0] : roi[0][1], roi[1][0] : roi[1][1] + ] else: raise RuntimeError( f"image input dim must be 2 or 3; you gave {ndim}" @@ -333,9 +345,8 @@ def calc_beam_vec(azim, pola): tht = np.radians(azim) phi = np.radians(pola) bv = np.r_[ - np.sin(phi)*np.cos(tht), - np.cos(phi), - np.sin(phi)*np.sin(tht)] + np.sin(phi) * np.cos(tht), np.cos(phi), np.sin(phi) * np.sin(tht) + ] return -bv @@ -346,9 +357,7 @@ def calc_angles_from_beam_vec(bvec): """ bvec = np.atleast_1d(bvec).flatten() nvec = unit_vector(-bvec) - azim = float( - np.degrees(np.arctan2(nvec[2], nvec[0])) - ) + azim = float(np.degrees(np.arctan2(nvec[2], nvec[0]))) pola = float(np.degrees(np.arccos(nvec[1]))) return azim, pola @@ -372,9 +381,9 @@ def angle_in_range(angle, ranges, ccw=True, units='degrees'): WARNING: always clockwise; assumes wedges are not overlapping """ - tau = 360. + tau = 360.0 if units.lower() == 'radians': - tau = 2*np.pi + tau = 2 * np.pi w = np.nan for i, wedge in enumerate(ranges): amin = wedge[0] @@ -406,7 +415,7 @@ def max_tth(instr): tth_max : float The maximum observable Bragg angle by the instrument in radians. """ - tth_max = 0. + tth_max = 0.0 for det in instr.detectors.values(): ptth, peta = det.pixel_angles() tth_max = max(np.max(ptth), tth_max) @@ -438,10 +447,9 @@ def pixel_resolution(instr): ang_ps_full = [] for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) ang_ps_full.append(angps) max_tth = min(max_tth, np.min(angps[:, 0])) @@ -473,10 +481,9 @@ def max_resolution(instr): max_eta = np.inf for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) max_tth = min(max_tth, np.min(angps[:, 0])) max_eta = min(max_eta, np.min(angps[:, 1])) @@ -484,16 +491,16 @@ def max_resolution(instr): def _gaussian_dist(x, cen, fwhm): - sigm = fwhm/(2*np.sqrt(2*np.log(2))) - return np.exp(-0.5*(x - cen)**2/sigm**2) + sigm = fwhm / (2 * np.sqrt(2 * np.log(2))) + return np.exp(-0.5 * (x - cen) ** 2 / sigm**2) def _sigma_to_fwhm(sigm): - return sigm*ct.sigma_to_fwhm + return sigm * ct.sigma_to_fwhm def _fwhm_to_sigma(fwhm): - return fwhm/ct.sigma_to_fwhm + return fwhm / ct.sigma_to_fwhm # ============================================================================= @@ -509,12 +516,17 @@ class HEDMInstrument(object): * where should reference eta be defined? currently set to default config """ - def __init__(self, instrument_config=None, - image_series=None, eta_vector=None, - instrument_name=None, tilt_calibration_mapping=None, - max_workers=max_workers_DFLT, - physics_package=None, - active_beam_name: Optional[str] = None): + def __init__( + self, + instrument_config=None, + image_series=None, + eta_vector=None, + instrument_name=None, + tilt_calibration_mapping=None, + max_workers=max_workers_DFLT, + physics_package=None, + active_beam_name: Optional[str] = None, + ): self._id = instrument_name_DFLT self._active_beam_name = active_beam_name @@ -539,7 +551,8 @@ def __init__(self, instrument_config=None, # FIXME: must add cylindrical self._detectors = dict( panel_id_DFLT=PlanarDetector( - rows=nrows_DFLT, cols=ncols_DFLT, + rows=nrows_DFLT, + cols=ncols_DFLT, pixel_size=pixel_size_DFLT, tvec=t_vec_d_DFLT, tilt=tilt_params_DFLT, @@ -547,9 +560,11 @@ def __init__(self, instrument_config=None, xrs_dist=self.source_distance, evec=self._eta_vector, distortion=None, - roi=None, group=None, - max_workers=self.max_workers), - ) + roi=None, + group=None, + max_workers=self.max_workers, + ), + ) self._tvec = t_vec_s_DFLT self._chi = chi_DFLT @@ -576,10 +591,7 @@ def __init__(self, instrument_config=None, self.physics_package = instrument_config['physics_package'] xrs_config = instrument_config['beam'] - is_single_beam = ( - 'energy' in xrs_config and - 'vector' in xrs_config - ) + is_single_beam = 'energy' in xrs_config and 'vector' in xrs_config if is_single_beam: # Assume single beam. Load the same way as multibeam self._create_default_beam() @@ -636,7 +648,7 @@ def __init__(self, instrument_config=None, elif isinstance(det_buffer, list): panel_buffer = np.asarray(det_buffer) elif np.isscalar(det_buffer): - panel_buffer = det_buffer*np.ones(2) + panel_buffer = det_buffer * np.ones(2) else: raise RuntimeError( "panel buffer spec invalid for %s" % det_id @@ -713,9 +725,9 @@ def mean_detector_center(self) -> np.ndarray: def mean_group_center(self, group: str) -> np.ndarray: """Return the mean center for detectors belonging to a group""" - centers = np.array([ - x.tvec for x in self.detectors_in_group(group).values() - ]) + centers = np.array( + [x.tvec for x in self.detectors_in_group(group).values()] + ) return centers.sum(axis=0) / len(centers) @property @@ -749,10 +761,11 @@ def detector_parameters(self): pdict = {} for key, panel in self.detectors.items(): pdict[key] = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) return pdict @@ -856,8 +869,9 @@ def beam_vector(self) -> np.ndarray: def beam_vector(self, x: np.ndarray): x = np.array(x).flatten() if len(x) == 3: - assert sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' bvec = x elif len(x) == 2: bvec = calc_beam_vec(*x) @@ -874,8 +888,9 @@ def source_distance(self): @source_distance.setter def source_distance(self, x): - assert np.isscalar(x), \ - f"'source_distance' must be a scalar; you input '{x}'" + assert np.isscalar( + x + ), f"'source_distance' must be a scalar; you input '{x}'" self.active_beam['distance'] = x self.beam_dict_modified() @@ -886,8 +901,9 @@ def eta_vector(self): @eta_vector.setter def eta_vector(self, x): x = np.array(x).flatten() - assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' self._eta_vector = x # ...maybe change dictionary item behavior for 3.x compatibility? for detector_id in self.detectors: @@ -899,10 +915,11 @@ def eta_vector(self, x): # ========================================================================= def write_config(self, file=None, style='yaml', calibration_dict={}): - """ WRITE OUT YAML FILE """ + """WRITE OUT YAML FILE""" # initialize output dictionary - assert style.lower() in ['yaml', 'hdf5'], \ + assert style.lower() in ['yaml', 'hdf5'], ( "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) par_dict = {} @@ -931,10 +948,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): if calibration_dict: par_dict['calibration_crystal'] = calibration_dict - ostage = dict( - chi=self.chi, - translation=self.tvec.tolist() - ) + ostage = dict(chi=self.chi, translation=self.tvec.tolist()) par_dict['oscillation_stage'] = ostage det_dict = dict.fromkeys(self.detectors) @@ -942,10 +956,13 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): # grab panel config # !!! don't need beam or tvec # !!! have vetted style - pdict = detector.config_dict(chi=self.chi, tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style=style) + pdict = detector.config_dict( + chi=self.chi, + tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style=style, + ) det_dict[det_name] = pdict['detector'] par_dict['detectors'] = det_dict @@ -955,6 +972,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): with open(file, 'w') as f: yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) else: + def _write_group(file): instr_grp = file.create_group('instrument') unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) @@ -970,9 +988,15 @@ def _write_group(file): return par_dict - def extract_polar_maps(self, plane_data, imgser_dict, - active_hkls=None, threshold=None, - tth_tol=None, eta_tol=0.25): + def extract_polar_maps( + self, + plane_data, + imgser_dict, + active_hkls=None, + threshold=None, + tth_tol=None, + eta_tol=0.25, + ): """ Extract eta-omega maps from an imageseries. @@ -996,23 +1020,25 @@ def extract_polar_maps(self, plane_data, imgser_dict, # detectors, so calculate it once # !!! grab first panel panel = next(iter(self.detectors.values())) - pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = \ + pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = ( panel.make_powder_rings( - plane_data, merge_hkls=False, - delta_eta=eta_tol, full_output=True + plane_data, + merge_hkls=False, + delta_eta=eta_tol, + full_output=True, ) + ) if active_hkls is not None: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" # need to re-cast for element-wise operations active_hkls = np.array(active_hkls) # these are all active reflection unique hklIDs - active_hklIDs = plane_data.getHKLID( - plane_data.hkls, master=True - ) + active_hklIDs = plane_data.getHKLID(plane_data.hkls, master=True) # find indices idx = np.zeros_like(active_hkls, dtype=int) @@ -1069,9 +1095,14 @@ def extract_polar_maps(self, plane_data, imgser_dict, # Divide up the images among processes tasks = distribute_tasks(len(ims), self.max_workers) - func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges, - ring_maps=ring_maps, ring_params=ring_params, - threshold=threshold) + func = partial( + _run_histograms, + ims=ims, + tth_ranges=tth_ranges, + ring_maps=ring_maps, + ring_params=ring_params, + threshold=threshold, + ) max_workers = self.max_workers if max_workers == 1 or len(tasks) == 1: @@ -1089,12 +1120,21 @@ def extract_polar_maps(self, plane_data, imgser_dict, return ring_maps_panel, eta_edges - def extract_line_positions(self, plane_data, imgser_dict, - tth_tol=None, eta_tol=1., npdiv=2, - eta_centers=None, - collapse_eta=True, collapse_tth=False, - do_interpolation=True, do_fitting=False, - tth_distortion=None, fitting_kwargs=None): + def extract_line_positions( + self, + plane_data, + imgser_dict, + tth_tol=None, + eta_tol=1.0, + npdiv=2, + eta_centers=None, + collapse_eta=True, + collapse_tth=False, + do_interpolation=True, + do_fitting=False, + tth_distortion=None, + fitting_kwargs=None, + ): """ Perform annular interpolation on diffraction images. @@ -1169,8 +1209,12 @@ def extract_line_positions(self, plane_data, imgser_dict, # LOOP OVER DETECTORS # ===================================================================== logger.info("Interpolating ring data") - pbar_dets = partial(tqdm, total=self.num_panels, desc="Detector", - position=self.num_panels) + pbar_dets = partial( + tqdm, + total=self.num_panels, + desc="Detector", + position=self.num_panels, + ) # Split up the workers among the detectors max_workers_per_detector = max(1, self.max_workers // self.num_panels) @@ -1193,23 +1237,26 @@ def extract_line_positions(self, plane_data, imgser_dict, def make_instr_cfg(panel): return panel.config_dict( - chi=self.chi, tvec=self.tvec, + chi=self.chi, + tvec=self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) images = [] for detector_id, panel in self.detectors.items(): - images.append(_parse_imgser_dict(imgser_dict, detector_id, - roi=panel.roi)) + images.append( + _parse_imgser_dict(imgser_dict, detector_id, roi=panel.roi) + ) panels = [self.detectors[k] for k in self.detectors] instr_cfgs = [make_instr_cfg(x) for x in panels] pbp_array = np.arange(self.num_panels) iter_args = zip(panels, instr_cfgs, images, pbp_array) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=self.num_panels) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=self.num_panels + ) as executor: results = list(pbar_dets(executor.map(func, iter_args))) panel_data = {} @@ -1218,12 +1265,9 @@ def make_instr_cfg(panel): return panel_data - def simulate_powder_pattern(self, - mat_list, - params=None, - bkgmethod=None, - origin=None, - noise=None): + def simulate_powder_pattern( + self, mat_list, params=None, bkgmethod=None, origin=None, noise=None + ): """ Generate powder diffraction iamges from specified materials. @@ -1262,8 +1306,7 @@ def simulate_powder_pattern(self, if origin is None: origin = self.tvec origin = np.asarray(origin).squeeze() - assert len(origin) == 3, \ - "origin must be a 3-element sequence" + assert len(origin) == 3, "origin must be a 3-element sequence" if bkgmethod is None: bkgmethod = {'chebyshev': 3} @@ -1303,7 +1346,7 @@ def simulate_powder_pattern(self, # find min and max tth over all panels tth_mi = np.inf - tth_ma = 0. + tth_ma = 0.0 ptth_dict = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): ptth, peta = panel.pixel_angles(origin=origin) @@ -1325,7 +1368,7 @@ def simulate_powder_pattern(self, ang_res = max_resolution(self) # !!! calc nsteps by oversampling - nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0]))) + nsteps = int(np.ceil(2 * (tth_ma - tth_mi) / np.degrees(ang_res[0]))) # evaulation vector for LeBail tth = np.linspace(tth_mi, tth_ma, nsteps) @@ -1334,7 +1377,7 @@ def simulate_powder_pattern(self, wavelength = [ valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), - 1. + 1.0, ] ''' @@ -1347,23 +1390,25 @@ def simulate_powder_pattern(self, tth = mat.planeData.getTTh() - LP = (1 + np.cos(tth)**2) / \ - np.cos(0.5*tth)/np.sin(0.5*tth)**2 + LP = ( + (1 + np.cos(tth) ** 2) + / np.cos(0.5 * tth) + / np.sin(0.5 * tth) ** 2 + ) intensity[mat.name] = {} - intensity[mat.name]['synchrotron'] = \ + intensity[mat.name]['synchrotron'] = ( mat.planeData.structFact * LP * multiplicity + ) kwargs = { 'expt_spectrum': expt, 'params': params, 'phases': mat_list, - 'wavelength': { - 'synchrotron': wavelength - }, + 'wavelength': {'synchrotron': wavelength}, 'bkgmethod': bkgmethod, 'intensity_init': intensity, - 'peakshape': 'pvtch' + 'peakshape': 'pvtch', } self.WPPFclass = LeBail(**kwargs) @@ -1381,9 +1426,11 @@ def simulate_powder_pattern(self, for det_key, panel in self.detectors.items(): ptth = ptth_dict[det_key] - img = np.interp(np.degrees(ptth), - self.simulated_spectrum.x, - self.simulated_spectrum.y + self.background.y) + img = np.interp( + np.degrees(ptth), + self.simulated_spectrum.x, + self.simulated_spectrum.y + self.background.y, + ) if noise is None: img_dict[det_key] = img @@ -1394,13 +1441,11 @@ def simulate_powder_pattern(self, img /= prev_max if noise.lower() == 'poisson': - im_noise = random_noise(img, - mode='poisson', - clip=True) + im_noise = random_noise(img, mode='poisson', clip=True) mi = im_noise.min() ma = im_noise.max() if ma > mi: - im_noise = (im_noise - mi)/(ma - mi) + im_noise = (im_noise - mi) / (ma - mi) elif noise.lower() == 'gaussian': im_noise = random_noise(img, mode='gaussian', clip=True) @@ -1422,9 +1467,14 @@ def simulate_powder_pattern(self, return img_dict - def simulate_laue_pattern(self, crystal_data, - minEnergy=5., maxEnergy=35., - rmat_s=None, grain_params=None): + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + grain_params=None, + ): """ Simulate Laue diffraction over the instrument. @@ -1454,17 +1504,28 @@ def simulate_laue_pattern(self, crystal_data, for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_laue_pattern( crystal_data, - minEnergy=minEnergy, maxEnergy=maxEnergy, - rmat_s=rmat_s, tvec_s=self.tvec, + minEnergy=minEnergy, + maxEnergy=maxEnergy, + rmat_s=rmat_s, + tvec_s=self.tvec, grain_params=grain_params, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) return results - def simulate_rotation_series(self, plane_data, grain_param_list, - eta_ranges=[(-np.pi, np.pi), ], - ome_ranges=[(-np.pi, np.pi), ], - ome_period=(-np.pi, np.pi), - wavelength=None): + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + wavelength=None, + ): """ Simulate a monochromatic rotation series over the instrument. @@ -1493,24 +1554,39 @@ def simulate_rotation_series(self, plane_data, grain_param_list, results = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_rotation_series( - plane_data, grain_param_list, + plane_data, + grain_param_list, eta_ranges=eta_ranges, ome_ranges=ome_ranges, ome_period=ome_period, - chi=self.chi, tVec_s=self.tvec, - wavelength=wavelength) + chi=self.chi, + tVec_s=self.tvec, + wavelength=wavelength, + ) return results - def pull_spots(self, plane_data, grain_params, - imgser_dict, - tth_tol=0.25, eta_tol=1., ome_tol=1., - npdiv=2, threshold=10, - eta_ranges=[(-np.pi, np.pi), ], - ome_period=None, - dirname='results', filename=None, output_format='text', - return_spot_list=False, - quiet=True, check_only=False, - interp='nearest'): + def pull_spots( + self, + plane_data, + grain_params, + imgser_dict, + tth_tol=0.25, + eta_tol=1.0, + ome_tol=1.0, + npdiv=2, + threshold=10, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_period=None, + dirname='results', + filename=None, + output_format='text', + return_spot_list=False, + quiet=True, + check_only=False, + interp='nearest', + ): """ Exctract reflection info from a rotation series. @@ -1570,12 +1646,14 @@ def pull_spots(self, plane_data, grain_params, # WARNING: all imageseries AND all wedges within are assumed to have # the same omega values; put in a check that they are all the same??? oims0 = next(iter(imgser_dict.values())) - ome_ranges = [np.radians([i['ostart'], i['ostop']]) - for i in oims0.omegawedges.wedges] + ome_ranges = [ + np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges + ] if ome_period is None: ims = next(iter(imgser_dict.values())) ostart = ims.omega[0, 0] - ome_period = np.radians(ostart + np.r_[0., 360.]) + ome_period = np.radians(ostart + np.r_[0.0, 360.0]) # delta omega in DEGREES grabbed from first imageseries in the dict delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] @@ -1583,7 +1661,10 @@ def pull_spots(self, plane_data, grain_params, # make omega grid for frame expansion around reference frame # in DEGREES ndiv_ome, ome_del = make_tolerance_grid( - delta_ome, ome_tol, 1, adjust_window=True, + delta_ome, + ome_tol, + 1, + adjust_window=True, ) # generate structuring element for connected component labeling @@ -1594,24 +1675,37 @@ def pull_spots(self, plane_data, grain_params, # simulate rotation series sim_results = self.simulate_rotation_series( - plane_data, [grain_params, ], + plane_data, + [ + grain_params, + ], eta_ranges=eta_ranges, ome_ranges=ome_ranges, - ome_period=ome_period) + ome_period=ome_period, + ) # patch vertex generator (global for instrument) - tol_vec = 0.5*np.radians( - [-tth_tol, -eta_tol, - -tth_tol, eta_tol, - tth_tol, eta_tol, - tth_tol, -eta_tol]) + tol_vec = 0.5 * np.radians( + [ + -tth_tol, + -eta_tol, + -tth_tol, + eta_tol, + tth_tol, + eta_tol, + tth_tol, + -eta_tol, + ] + ) # prepare output if requested if filename is not None and output_format.lower() == 'hdf5': this_filename = os.path.join(dirname, filename) writer = GrainDataWriter_h5( os.path.join(dirname, filename), - self.write_config(), grain_params) + self.write_config(), + grain_params, + ) # ===================================================================== # LOOP OVER PANELS @@ -1623,28 +1717,25 @@ def pull_spots(self, plane_data, grain_params, for detector_id, panel in self.detectors.items(): # initialize text-based output writer if filename is not None and output_format.lower() == 'text': - output_dir = os.path.join( - dirname, detector_id - ) + output_dir = os.path.join(dirname, detector_id) os.makedirs(output_dir, exist_ok=True) - this_filename = os.path.join( - output_dir, filename - ) + this_filename = os.path.join(output_dir, filename) writer = PatchDataWriter(this_filename) # grab panel instr_cfg = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) native_area = panel.pixel_area # pixel ref area # pull out the OmegaImageSeries for this panel from input dict - ome_imgser = _parse_imgser_dict(imgser_dict, - detector_id, - roi=panel.roi) + ome_imgser = _parse_imgser_dict( + imgser_dict, detector_id, roi=panel.roi + ) # extract simulation results sim_results_p = sim_results[detector_id] @@ -1660,19 +1751,24 @@ def pull_spots(self, plane_data, grain_params, # patch vertex array from sim nangs = len(ang_centers) patch_vertices = ( - np.tile(ang_centers[:, :2], (1, 4)) + - np.tile(tol_vec, (nangs, 1)) - ).reshape(4*nangs, 2) - ome_dupl = np.tile( - ang_centers[:, 2], (4, 1) - ).T.reshape(len(patch_vertices), 1) + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4 * nangs, 2) + ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape( + len(patch_vertices), 1 + ) # find vertices that all fall on the panel det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), - panel.rmat, rMat_c, self.chi, - panel.tvec, tVec_c, self.tvec, - panel.distortion) + panel.rmat, + rMat_c, + self.chi, + panel.tvec, + tVec_c, + self.tvec, + panel.distortion, + ) _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) # all vertices must be on... @@ -1703,7 +1799,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d %d %d) falls outside omega range - """ % tuple(hkls_p[i_pt, :]) + """ % tuple( + hkls_p[i_pt, :] + ) print(msg) continue else: @@ -1721,11 +1819,16 @@ def pull_spots(self, plane_data, grain_params, # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( instr_cfg, - ang_centers[:, :2], ang_pixel_size, + ang_centers[:, :2], + ang_pixel_size, omega=ang_centers[:, 2], - tth_tol=tth_tol, eta_tol=eta_tol, - rmat_c=rMat_c, tvec_c=tVec_c, - npdiv=npdiv, quiet=True) + tth_tol=tth_tol, + eta_tol=eta_tol, + rmat_c=rMat_c, + tvec_c=tVec_c, + npdiv=npdiv, + quiet=True, + ) # GRAND LOOP over reflections for this panel patch_output = [] @@ -1735,7 +1838,7 @@ def pull_spots(self, plane_data, grain_params, vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch prows, pcols = areas.shape - nrm_fac = areas/float(native_area) + nrm_fac = areas / float(native_area) nrm_fac = nrm_fac / np.min(nrm_fac) # grab hkl info @@ -1749,8 +1852,9 @@ def pull_spots(self, plane_data, grain_params, delta_eta = eta_edges[1] - eta_edges[0] # need to reshape eval pts for interpolation - xy_eval = np.vstack([xy_eval[0].flatten(), - xy_eval[1].flatten()]).T + xy_eval = np.vstack( + [xy_eval[0].flatten(), xy_eval[1].flatten()] + ).T # the evaluation omegas; # expand about the central value using tol vector @@ -1765,7 +1869,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d%d%d) falls outside omega range - """ % tuple(hkl) + """ % tuple( + hkl + ) print(msg) continue else: @@ -1774,8 +1880,8 @@ def pull_spots(self, plane_data, grain_params, peak_id = next_invalid_peak_id sum_int = np.nan max_int = np.nan - meas_angs = np.nan*np.ones(3) - meas_xy = np.nan*np.ones(2) + meas_angs = np.nan * np.ones(3) + meas_xy = np.nan * np.ones(2) # quick check for intensity contains_signal = False @@ -1793,19 +1899,23 @@ def pull_spots(self, plane_data, grain_params, # initialize patch data array for intensities if interp.lower() == 'bilinear': patch_data = np.zeros( - (len(frame_indices), prows, pcols)) + (len(frame_indices), prows, pcols) + ) for i, i_frame in enumerate(frame_indices): - patch_data[i] = \ - panel.interpolate_bilinear( - xy_eval, - ome_imgser[i_frame], - pad_with_nans=False - ).reshape(prows, pcols) # * nrm_fac + patch_data[i] = panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False, + ).reshape( + prows, pcols + ) # * nrm_fac elif interp.lower() == 'nearest': patch_data = patch_data_raw # * nrm_fac else: - msg = "interpolation option " + \ - "'%s' not understood" + msg = ( + "interpolation option " + + "'%s' not understood" + ) raise RuntimeError(msg % interp) # now have interpolated patch data... @@ -1818,9 +1928,10 @@ def pull_spots(self, plane_data, grain_params, peak_id = iRefl props = regionprops(labels, patch_data) coms = np.vstack( - [x.weighted_centroid for x in props]) + [x.weighted_centroid for x in props] + ) if num_peaks > 1: - center = np.r_[patch_data.shape]*0.5 + center = np.r_[patch_data.shape] * 0.5 center_t = np.tile(center, (num_peaks, 1)) com_diff = coms - center_t closest_peak_idx = np.argmin( @@ -1831,15 +1942,17 @@ def pull_spots(self, plane_data, grain_params, coms = coms[closest_peak_idx] # meas_omes = \ # ome_edges[0] + (0.5 + coms[0])*delta_ome - meas_omes = \ - ome_eval[0] + coms[0]*delta_ome + meas_omes = ome_eval[0] + coms[0] * delta_ome meas_angs = np.hstack( - [tth_edges[0] + (0.5 + coms[2])*delta_tth, - eta_edges[0] + (0.5 + coms[1])*delta_eta, - mapAngle( - np.radians(meas_omes), ome_period - ) - ] + [ + tth_edges[0] + + (0.5 + coms[2]) * delta_tth, + eta_edges[0] + + (0.5 + coms[1]) * delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ), + ] ) # intensities @@ -1866,15 +1979,21 @@ def pull_spots(self, plane_data, grain_params, meas_angs, chi=self.chi, rmat_c=rMat_c, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) rMat_s = make_sample_rmat( self.chi, meas_angs[2] ) meas_xy = gvec_to_xy( gvec_c, - panel.rmat, rMat_s, rMat_c, - panel.tvec, self.tvec, tVec_c, - beam_vec=self.beam_vector) + panel.rmat, + rMat_s, + rMat_c, + panel.tvec, + self.tvec, + tVec_c, + beam_vec=self.beam_vector, + ) if panel.distortion is not None: meas_xy = panel.distortion.apply_inverse( np.atleast_2d(meas_xy) @@ -1893,19 +2012,38 @@ def pull_spots(self, plane_data, grain_params, if filename is not None: if output_format.lower() == 'text': writer.dump_patch( - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, - xy_centers[i_pt], meas_xy) + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + xy_centers[i_pt], + meas_xy, + ) elif output_format.lower() == 'hdf5': xyc_arr = xy_eval.reshape( prows, pcols, 2 ).transpose(2, 0, 1) writer.dump_patch( - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy) + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, + ) if return_spot_list: # Full output @@ -1913,17 +2051,34 @@ def pull_spots(self, plane_data, grain_params, prows, pcols, 2 ).transpose(2, 0, 1) _patch_output = [ - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, ] else: # Trimmed output _patch_output = [ - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, meas_xy + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + meas_xy, ] patch_output.append(_patch_output) iRefl += 1 @@ -1941,7 +2096,9 @@ def update_memoization_sizes(self): PlanarDetector.update_memoization_sizes(all_panels) CylindricalDetector.update_memoization_sizes(all_panels) - def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: + def calc_transmission( + self, rMat_s: np.ndarray = None + ) -> dict[str, np.ndarray]: """calculate the transmission from the filter and polymer coating. the inverse of this number is the intensity correction that needs @@ -1955,26 +2112,31 @@ def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: transmissions = {} for det_name, det in self.detectors.items(): transmission_filter, transmission_phosphor = ( - det.calc_filter_coating_transmission(energy)) + det.calc_filter_coating_transmission(energy) + ) transmission = transmission_filter * transmission_phosphor if self.physics_package is not None: transmission_physics_package = ( det.calc_physics_package_transmission( - energy, rMat_s, self.physics_package)) + energy, rMat_s, self.physics_package + ) + ) effective_pinhole_area = det.calc_effective_pinhole_area( - self.physics_package) + self.physics_package + ) transmission = ( - transmission * - transmission_physics_package * - effective_pinhole_area + transmission + * transmission_physics_package + * effective_pinhole_area ) transmissions[det_name] = transmission return transmissions + # ============================================================================= # UTILITIES # ============================================================================= @@ -1985,6 +2147,7 @@ class PatchDataWriter(object): def __init__(self, filename): self._delim = ' ' + # fmt: off header_items = ( '# ID', 'PID', 'H', 'K', 'L', @@ -1999,6 +2162,8 @@ def __init__(self, filename): self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) ]) + + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2011,30 +2176,34 @@ def __del__(self): def close(self): self.fid.close() - def dump_patch(self, peak_id, hkl_id, - hkl, spot_int, max_int, - pangs, mangs, pxy, mxy): + def dump_patch( + self, peak_id, hkl_id, hkl, spot_int, max_int, pangs, mangs, pxy, mxy + ): """ !!! maybe need to check that last four inputs are arrays """ if mangs is None: spot_int = np.nan max_int = np.nan - mangs = np.nan*np.ones(3) - mxy = np.nan*np.ones(2) - - res = [int(peak_id), int(hkl_id)] \ - + np.array(hkl, dtype=int).tolist() \ - + [spot_int, max_int] \ - + pangs.tolist() \ - + mangs.tolist() \ - + pxy.tolist() \ + mangs = np.nan * np.ones(3) + mxy = np.nan * np.ones(2) + + res = ( + [int(peak_id), int(hkl_id)] + + np.array(hkl, dtype=int).tolist() + + [spot_int, max_int] + + pangs.tolist() + + mangs.tolist() + + pxy.tolist() + mxy.tolist() + ) output_str = self._delim.join( - [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), - self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), - self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + [ + self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:]), + ] ) print(output_str, file=self.fid) return output_str @@ -2050,20 +2219,23 @@ def __init__(self, filename=None, array=None): """ if filename is None and array is None: raise RuntimeError( - 'GrainDataWriter must be specified with filename or array') + 'GrainDataWriter must be specified with filename or array' + ) self.array = None self.fid = None # array supersedes filename if array is not None: - assert array.shape[1] == 21, \ - f'grain data table must have 21 columns not {array.shape[21]}' + assert ( + array.shape[1] == 21 + ), f'grain data table must have 21 columns not {array.shape[21]}' self.array = array self._array_row = 0 return self._delim = ' ' + # fmt: off header_items = ( '# grain ID', 'completeness', 'chi^2', 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', @@ -2083,6 +2255,7 @@ def __init__(self, filename=None, array=None): np.tile('{:<23}', len(header_items) - 3) ).format(*header_items[3:])] ) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2096,35 +2269,40 @@ def close(self): if self.fid is not None: self.fid.close() - def dump_grain(self, grain_id, completeness, chisq, - grain_params): - assert len(grain_params) == 12, \ - "len(grain_params) must be 12, not %d" % len(grain_params) + def dump_grain(self, grain_id, completeness, chisq, grain_params): + assert ( + len(grain_params) == 12 + ), "len(grain_params) must be 12, not %d" % len(grain_params) # extract strain emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) evec = mutil.symmToVecMV(emat, scale=False) - res = [int(grain_id), completeness, chisq] \ - + grain_params.tolist() \ + res = ( + [int(grain_id), completeness, chisq] + + grain_params.tolist() + evec.tolist() + ) if self.array is not None: row = self._array_row - assert row < self.array.shape[0], \ - f'invalid row {row} in array table' + assert ( + row < self.array.shape[0] + ), f'invalid row {row} in array table' self.array[row] = res self._array_row += 1 return res # (else) format and write to file output_str = self._delim.join( - [self._delim.join( - ['{:<12d}', '{:<12f}', '{:<12e}'] - ).format(*res[:3]), - self._delim.join( - np.tile('{:<23.16e}', len(res) - 3) - ).format(*res[3:])] + [ + self._delim.join(['{:<12d}', '{:<12f}', '{:<12e}']).format( + *res[:3] + ), + self._delim.join(np.tile('{:<23.16e}', len(res) - 3)).format( + *res[3:] + ), + ] ) print(output_str, file=self.fid) return output_str @@ -2154,12 +2332,12 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): vinv_s = np.array(grain_params[6:]).flatten() vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) - if use_attr: # attribute version + if use_attr: # attribute version self.grain_grp.attrs.create('rmat_c', rmat_c) self.grain_grp.attrs.create('tvec_c', tvec_c) self.grain_grp.attrs.create('inv(V)_s', vinv_s) self.grain_grp.attrs.create('vmat_s', vmat_s) - else: # dataset version + else: # dataset version self.grain_grp.create_dataset('rmat_c', data=rmat_c) self.grain_grp.create_dataset('tvec_c', data=tvec_c) self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) @@ -2178,11 +2356,26 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): def close(self): self.fid.close() - def dump_patch(self, panel_id, - i_refl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, ome_centers, - xy_centers, ijs, frame_indices, - spot_data, pangs, pxy, mangs, mxy, gzip=1): + def dump_patch( + self, + panel_id, + i_refl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + ome_centers, + xy_centers, + ijs, + frame_indices, + spot_data, + pangs, + pxy, + mangs, + mxy, + gzip=1, + ): """ to be called inside loop over patches @@ -2198,10 +2391,10 @@ def dump_patch(self, panel_id, spot_grp.attrs.create('predicted_angles', pangs) spot_grp.attrs.create('predicted_xy', pxy) if mangs is None: - mangs = np.nan*np.ones(3) + mangs = np.nan * np.ones(3) spot_grp.attrs.create('measured_angles', mangs) if mxy is None: - mxy = np.nan*np.ones(3) + mxy = np.nan * np.ones(3) spot_grp.attrs.create('measured_xy', mxy) # get centers crds from edge arrays @@ -2220,27 +2413,55 @@ def dump_patch(self, panel_id, eta_crd = centers_of_edge_vec(eta_edges) shuffle_data = True # reduces size by 20% - spot_grp.create_dataset('tth_crd', data=tth_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('eta_crd', data=eta_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ome_crd', data=ome_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('xy_centers', data=xy_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ij_centers', data=ijs, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('frame_indices', data=fi, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('intensities', data=spot_data, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) + spot_grp.create_dataset( + 'tth_crd', + data=tth_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'eta_crd', + data=eta_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ome_crd', + data=ome_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'xy_centers', + data=xy_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ij_centers', + data=ijs, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'frame_indices', + data=fi, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'intensities', + data=spot_data, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) return @@ -2262,9 +2483,16 @@ class GenerateEtaOmeMaps(object): """ - def __init__(self, image_series_dict, instrument, plane_data, - active_hkls=None, eta_step=0.25, threshold=None, - ome_period=(0, 360)): + def __init__( + self, + image_series_dict, + instrument, + plane_data, + active_hkls=None, + eta_step=0.25, + threshold=None, + ome_period=(0, 360), + ): """ image_series must be OmegaImageSeries class instrument_params must be a dict (loaded from yaml spec) @@ -2278,13 +2506,12 @@ def __init__(self, image_series_dict, instrument, plane_data, # ???: change name of iHKLList? # ???: can we change the behavior of iHKLList? if active_hkls is None: - self._iHKLList = plane_data.getHKLID( - plane_data.hkls, master=True - ) + self._iHKLList = plane_data.getHKLID(plane_data.hkls, master=True) n_rings = len(self._iHKLList) else: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" self._iHKLList = active_hkls n_rings = len(active_hkls) @@ -2299,14 +2526,18 @@ def __init__(self, image_series_dict, instrument, plane_data, omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES delta_ome = omegas_array[0][-1] - omegas_array[0][0] frame_mask = None - ome_period = omegas_array[0, 0] + np.r_[0., 360.] # !!! be careful + ome_period = omegas_array[0, 0] + np.r_[0.0, 360.0] # !!! be careful if this_det_ims.omegawedges.nwedges > 1: - delta_omes = [(i['ostop'] - i['ostart'])/i['nsteps'] - for i in this_det_ims.omegawedges.wedges] - check_wedges = mutil.uniqueVectors(np.atleast_2d(delta_omes), - tol=1e-6).squeeze() - assert check_wedges.size == 1, \ - "all wedges must have the same delta omega to 1e-6" + delta_omes = [ + (i['ostop'] - i['ostart']) / i['nsteps'] + for i in this_det_ims.omegawedges.wedges + ] + check_wedges = mutil.uniqueVectors( + np.atleast_2d(delta_omes), tol=1e-6 + ).squeeze() + assert ( + check_wedges.size == 1 + ), "all wedges must have the same delta omega to 1e-6" # grab representative delta ome # !!! assuming positive delta consistent with OmegaImageSeries delta_ome = delta_omes[0] @@ -2322,9 +2553,9 @@ def __init__(self, image_series_dict, instrument, plane_data, ) # compute total nsteps # FIXME: need check for roundoff badness - nsteps = int((ostop - ostart)/delta_ome) + nsteps = int((ostop - ostart) / delta_ome) ome_edges_full = np.linspace( - ostart, ostop, num=nsteps+1, endpoint=True + ostart, ostop, num=nsteps + 1, endpoint=True ) omegas_array = np.vstack( [ome_edges_full[:-1], ome_edges_full[1:]] @@ -2335,15 +2566,21 @@ def __init__(self, image_series_dict, instrument, plane_data, # !!! this array has -1 outside a wedge # !!! again assuming the valid frame order increases monotonically frame_mask = np.array( - [this_det_ims.omega_to_frame(ome)[0] != -1 - for ome in ome_centers] + [ + this_det_ims.omega_to_frame(ome)[0] != -1 + for ome in ome_centers + ] ) # ???: need to pass a threshold? eta_mapping, etas = instrument.extract_polar_maps( - plane_data, image_series_dict, - active_hkls=active_hkls, threshold=threshold, - tth_tol=None, eta_tol=eta_step) + plane_data, + image_series_dict, + active_hkls=active_hkls, + threshold=threshold, + tth_tol=None, + eta_tol=eta_step, + ) # for convenience grab map shape from first map_shape = next(iter(eta_mapping.values())).shape[1:] @@ -2370,7 +2607,7 @@ def __init__(self, image_series_dict, instrument, plane_data, if frame_mask is not None: # !!! must expand row dimension to include # skipped omegas - tmp = np.ones((len(frame_mask), map_shape[1]))*np.nan + tmp = np.ones((len(frame_mask), map_shape[1])) * np.nan tmp[frame_mask, :] = full_map full_map = tmp data_store.append(full_map) @@ -2379,11 +2616,11 @@ def __init__(self, image_series_dict, instrument, plane_data, # set required attributes self._omegas = mapAngle( np.radians(np.average(omegas_array, axis=1)), - np.radians(ome_period) + np.radians(ome_period), ) self._omeEdges = mapAngle( np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), - np.radians(ome_period) + np.radians(ome_period), ) # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the @@ -2397,7 +2634,7 @@ def __init__(self, image_series_dict, instrument, plane_data, # WARNING: unlinke the omegas in imageseries metadata, # these are in RADIANS and represent bin centers self._etaEdges = etas - self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + self._etas = self._etaEdges[:-1] + 0.5 * np.radians(eta_step) @property def dataStore(self): @@ -2433,9 +2670,7 @@ def save(self, filename): def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): # mark pixels in the spec'd tth range - pixels_in_tthr = np.logical_and( - ptth >= tthr[0], ptth <= tthr[1] - ) + pixels_in_tthr = np.logical_and(ptth >= tthr[0], ptth <= tthr[1]) # catch case where ring isn't on detector if not np.any(pixels_in_tthr): @@ -2452,8 +2687,7 @@ def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): def run_fast_histogram(x, bins, weights=None): - return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), - weights=weights) + return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), weights=weights) def run_numpy_histogram(x, bins, weights=None): @@ -2471,7 +2705,7 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): if threshold is not None: # !!! NaNs get preserved image = np.array(image) - image[image < threshold] = 0. + image[image < threshold] = 0.0 for i_r, tthr in enumerate(tth_ranges): this_map = ring_maps[i_r] @@ -2488,12 +2722,21 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): this_map[i_row, bins_on_detector] = result[bins_on_detector] -def _extract_detector_line_positions(iter_args, plane_data, tth_tol, - eta_tol, eta_centers, npdiv, - collapse_tth, collapse_eta, - do_interpolation, do_fitting, - fitting_kwargs, tth_distortion, - max_workers): +def _extract_detector_line_positions( + iter_args, + plane_data, + tth_tol, + eta_tol, + eta_centers, + npdiv, + collapse_tth, + collapse_eta, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, + max_workers, +): panel, instr_cfg, images, pbp = iter_args if images.ndim == 2: @@ -2508,9 +2751,13 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, tth_distr_cls = tth_distortion[panel.name] pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( - plane_data, merge_hkls=True, - delta_tth=tth_tol, delta_eta=eta_tol, - eta_list=eta_centers, tth_distortion=tth_distr_cls) + plane_data, + merge_hkls=True, + delta_tth=tth_tol, + delta_eta=eta_tol, + eta_list=eta_centers, + tth_distortion=tth_distr_cls, + ) tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) @@ -2525,8 +2772,9 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, # ================================================================= # LOOP OVER RING SETS # ================================================================= - pbar_rings = partial(tqdm, total=len(pow_angs), desc="Ringset", - position=pbp) + pbar_rings = partial( + tqdm, total=len(pow_angs), desc="Ringset", position=pbp + ) kwargs = { 'instr_cfg': instr_cfg, @@ -2543,15 +2791,26 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, } func = partial(_extract_ring_line_positions, **kwargs) iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=max_workers) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=max_workers + ) as executor: return list(pbar_rings(executor.map(func, iter_arg))) -def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, - collapse_tth, collapse_eta, images, - do_interpolation, do_fitting, fitting_kwargs, - tth_distortion): +def _extract_ring_line_positions( + iter_args, + instr_cfg, + panel, + eta_tol, + npdiv, + collapse_tth, + collapse_eta, + images, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, +): """ Extracts data for a single Debye-Scherrer ring . @@ -2599,16 +2858,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) nan_mask = np.logical_or.reduce(nan_mask, 1) if angs.ndim > 1 and xys.ndim > 1: - angs = angs[nan_mask,:] - xys = xys[nan_mask, :] + angs = angs[nan_mask, :] + xys = xys[nan_mask, :] n_images = len(images) native_area = panel.pixel_area # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( - instr_cfg, angs, panel.angularPixelSize(xys), - tth_tol=tth_tol, eta_tol=eta_tol, npdiv=npdiv, quiet=True) + instr_cfg, + angs, + panel.angularPixelSize(xys), + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + quiet=True, + ) # loop over patches # FIXME: fix initialization @@ -2621,9 +2886,7 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch # need to reshape eval pts for interpolation - xy_eval = np.vstack([ - xys_eval[0].flatten(), - xys_eval[1].flatten()]).T + xy_eval = np.vstack([xys_eval[0].flatten(), xys_eval[1].flatten()]).T _, on_panel = panel.clip_to_panel(xy_eval) @@ -2631,25 +2894,20 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, continue if collapse_tth: - ang_data = (vtx_angs[0][0, [0, -1]], - vtx_angs[1][[0, -1], 0]) + ang_data = (vtx_angs[0][0, [0, -1]], vtx_angs[1][[0, -1], 0]) elif collapse_eta: # !!! yield the tth bin centers tth_centers = np.average( - np.vstack( - [vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]] - ), - axis=0 + np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]), axis=0 ) - ang_data = (tth_centers, - angs[i_p][-1]) + ang_data = (tth_centers, angs[i_p][-1]) if do_fitting: fit_data = [] else: ang_data = vtx_angs prows, pcols = areas.shape - area_fac = areas/float(native_area) + area_fac = areas / float(native_area) # interpolate if not collapse_tth: @@ -2658,19 +2916,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, # catch interpolation type image = images[j_p] if do_interpolation: - p_img = panel.interpolate_bilinear( + p_img = ( + panel.interpolate_bilinear( xy_eval, image, - ).reshape(prows, pcols)*area_fac + ).reshape(prows, pcols) + * area_fac + ) else: - p_img = image[ijs[0], ijs[1]]*area_fac + p_img = image[ijs[0], ijs[1]] * area_fac # catch flat spectrum data, which will cause # fitting to fail. # ???: best here, or make fitting handle it? mxval = np.max(p_img) mnval = np.min(p_img) - if mxval == 0 or (1. - mnval/mxval) < 0.01: + if mxval == 0 or (1.0 - mnval / mxval) < 0.01: continue # catch collapsing options @@ -2687,11 +2948,16 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, tmp = tth_distortion.apply( panel.angles_to_cart( np.vstack( - [np.radians(this_tth0), - np.tile(ang_data[-1], len(this_tth0))] + [ + np.radians(this_tth0), + np.tile( + ang_data[-1], len(this_tth0) + ), + ] ).T ), - return_nominal=True) + return_nominal=True, + ) pk_centers = np.degrees(tmp[:, 0]) else: pk_centers = this_tth0 diff --git a/hexrd/hed/xrdutil/phutil.py b/hexrd/hed/xrdutil/phutil.py index 2740bbfb8..d6fc130f4 100644 --- a/hexrd/hed/xrdutil/phutil.py +++ b/hexrd/hed/xrdutil/phutil.py @@ -13,15 +13,20 @@ from numba import njit from hexrd.core import constants as ct -from hexrd.core.instrument import Detector +from hexrd.hed.instrument import Detector from hexrd.core.transforms import xfcapi from hexrd.core.utils.concurrent import distribute_tasks class SampleLayerDistortion: - def __init__(self, panel, - layer_standoff, layer_thickness, - pinhole_thickness, pinhole_radius): + def __init__( + self, + panel, + layer_standoff, + layer_thickness, + pinhole_thickness, + pinhole_radius, + ): self._panel = panel self._layer_standoff = layer_standoff self._layer_thickness = layer_thickness @@ -70,18 +75,20 @@ def pinhole_radius(self, x): self._pinhole_radius = float(x) def apply(self, xy_pts, return_nominal=True): - """ - """ - return tth_corr_sample_layer(self.panel, xy_pts, - self.layer_standoff, self.layer_thickness, - self.pinhole_thickness, - self.pinhole_radius, - return_nominal=return_nominal) + """ """ + return tth_corr_sample_layer( + self.panel, + xy_pts, + self.layer_standoff, + self.layer_thickness, + self.pinhole_thickness, + self.pinhole_radius, + return_nominal=return_nominal, + ) class JHEPinholeDistortion: - def __init__(self, panel, - pinhole_thickness, pinhole_radius): + def __init__(self, panel, pinhole_thickness, pinhole_radius): self._panel = panel self._pinhole_thickness = pinhole_thickness self._pinhole_radius = pinhole_radius @@ -112,11 +119,14 @@ def pinhole_radius(self, x): self._pinhole_radius = float(x) def apply(self, xy_pts, return_nominal=True): - """ - """ - return tth_corr_pinhole(self.panel, xy_pts, - self.pinhole_thickness, self.pinhole_radius, - return_nominal=return_nominal) + """ """ + return tth_corr_pinhole( + self.panel, + xy_pts, + self.pinhole_thickness, + self.pinhole_radius, + return_nominal=return_nominal, + ) # Make an alias to the name for backward compatibility @@ -124,8 +134,14 @@ def apply(self, xy_pts, return_nominal=True): class RyggPinholeDistortion: - def __init__(self, panel, absorption_length, - pinhole_thickness, pinhole_radius, num_phi_elements=60): + def __init__( + self, + panel, + absorption_length, + pinhole_thickness, + pinhole_radius, + num_phi_elements=60, + ): self.panel = panel self.absorption_length = absorption_length @@ -134,17 +150,26 @@ def __init__(self, panel, absorption_length, self.num_phi_elements = num_phi_elements def apply(self, xy_pts, return_nominal=True): - return tth_corr_rygg_pinhole(self.panel, self.absorption_length, - xy_pts, self.pinhole_thickness, - self.pinhole_radius, - return_nominal=return_nominal, - num_phi_elements=self.num_phi_elements) + return tth_corr_rygg_pinhole( + self.panel, + self.absorption_length, + xy_pts, + self.pinhole_thickness, + self.pinhole_radius, + return_nominal=return_nominal, + num_phi_elements=self.num_phi_elements, + ) -def tth_corr_sample_layer(panel, xy_pts, - layer_standoff, layer_thickness, - pinhole_thickness, pinhole_radius, - return_nominal=True): +def tth_corr_sample_layer( + panel, + xy_pts, + layer_standoff, + layer_thickness, + pinhole_thickness, + pinhole_radius, + return_nominal=True, +): """ Compute the Bragg angle distortion associated with a specific sample layer in a pinhole camera. @@ -180,11 +205,11 @@ def tth_corr_sample_layer(panel, xy_pts, xy_pts = np.atleast_2d(xy_pts) # !!! full z offset from center of pinhole to center of layer - zs = layer_standoff + 0.5*layer_thickness + 0.5*pinhole_thickness + zs = layer_standoff + 0.5 * layer_thickness + 0.5 * pinhole_thickness - ref_angs, _ = panel.cart_to_angles(xy_pts, - rmat_s=None, tvec_s=None, - tvec_c=None, apply_distortion=True) + ref_angs, _ = panel.cart_to_angles( + xy_pts, rmat_s=None, tvec_s=None, tvec_c=None, apply_distortion=True + ) ref_tth = ref_angs[:, 0] dhats = xfcapi.unit_vector(panel.cart_to_dvecs(xy_pts)) @@ -193,7 +218,9 @@ def tth_corr_sample_layer(panel, xy_pts, cos_beta[np.arccos(cos_beta) > critical_beta] = np.nan cos_tthn = np.cos(ref_tth) sin_tthn = np.sin(ref_tth) - tth_corr = np.arctan(sin_tthn/(source_distance*cos_beta/zs - cos_tthn)) + tth_corr = np.arctan( + sin_tthn / (source_distance * cos_beta / zs - cos_tthn) + ) if return_nominal: return np.vstack([ref_tth - tth_corr, ref_angs[:, 1]]).T else: @@ -201,9 +228,12 @@ def tth_corr_sample_layer(panel, xy_pts, return np.vstack([-tth_corr, ref_angs[:, 1]]).T -def invalidate_past_critical_beta(panel: Detector, xy_pts: np.ndarray, - pinhole_thickness: float, - pinhole_radius: float) -> None: +def invalidate_past_critical_beta( + panel: Detector, + xy_pts: np.ndarray, + pinhole_thickness: float, + pinhole_radius: float, +) -> None: """Set any xy_pts past critical beta to be nan""" # Compute the critical beta angle. Anything past this is invalid. critical_beta = np.arctan(2 * pinhole_radius / pinhole_thickness) @@ -212,9 +242,13 @@ def invalidate_past_critical_beta(panel: Detector, xy_pts: np.ndarray, xy_pts[np.arccos(cos_beta) > critical_beta] = np.nan -def tth_corr_map_sample_layer(instrument, - layer_standoff, layer_thickness, - pinhole_thickness, pinhole_radius): +def tth_corr_map_sample_layer( + instrument, + layer_standoff, + layer_thickness, + pinhole_thickness, + pinhole_radius, +): """ Compute the Bragg angle distortion fields for an instrument associated with a specific sample layer in a pinhole camera. @@ -251,7 +285,7 @@ def tth_corr_map_sample_layer(instrument, # view. But that is something we could do in the future: # critical_beta = np.arctan(2 * pinhole_radius / pinhole_thickness) - zs = layer_standoff + 0.5*layer_thickness + 0.5*pinhole_thickness + zs = layer_standoff + 0.5 * layer_thickness + 0.5 * pinhole_thickness tth_corr = dict.fromkeys(instrument.detectors) for det_key, panel in instrument.detectors.items(): ref_ptth, _ = panel.pixel_angles() @@ -264,14 +298,14 @@ def tth_corr_map_sample_layer(instrument, cos_tthn = np.cos(ref_ptth.flatten()) sin_tthn = np.sin(ref_ptth.flatten()) tth_corr[det_key] = np.arctan( - sin_tthn/(instrument.source_distance*cos_beta/zs - cos_tthn) + sin_tthn / (instrument.source_distance * cos_beta / zs - cos_tthn) ).reshape(panel.shape) return tth_corr -def tth_corr_pinhole(panel, xy_pts, - pinhole_thickness, pinhole_radius, - return_nominal=True): +def tth_corr_pinhole( + panel, xy_pts, pinhole_thickness, pinhole_radius, return_nominal=True +): """ Compute the Bragg angle distortion associated with the pinhole as a source. @@ -305,17 +339,13 @@ def tth_corr_pinhole(panel, xy_pts, cp_det = copy.deepcopy(panel) cp_det.bvec = ct.beam_vec # !!! [0, 0, -1] ref_angs, _ = cp_det.cart_to_angles( - xy_pts, - rmat_s=None, tvec_s=None, - tvec_c=None, apply_distortion=True + xy_pts, rmat_s=None, tvec_s=None, tvec_c=None, apply_distortion=True ) ref_eta = ref_angs[:, 1] # These are the nominal tth values nom_angs, _ = panel.cart_to_angles( - xy_pts, - rmat_s=None, tvec_s=None, - tvec_c=None, apply_distortion=True + xy_pts, rmat_s=None, tvec_s=None, tvec_c=None, apply_distortion=True ) nom_tth = nom_angs[:, 0] @@ -323,8 +353,8 @@ def tth_corr_pinhole(panel, xy_pts, for i, (pxy, reta) in enumerate(zip(xy_pts, ref_eta)): # !!! JHE used pinhole center, but the back surface # seems to hew a bit closer to JRR's solution - origin = -pinhole_radius*np.array( - [np.cos(reta), np.sin(reta), 0.5*pinhole_thickness] + origin = -pinhole_radius * np.array( + [np.cos(reta), np.sin(reta), 0.5 * pinhole_thickness] ) angs, _ = panel.cart_to_angles(np.atleast_2d(pxy), tvec_c=origin) pin_tth[i] = angs[:, 0] @@ -377,8 +407,8 @@ def tth_corr_map_pinhole(instrument, pinhole_thickness, pinhole_radius): for i, (pxy, reta) in enumerate(zip(pcrds, ref_peta)): # !!! JHE used pinhole center, but the back surface # seems to hew a bit closer to JRR's solution - origin = -pinhole_radius*np.array( - [np.cos(reta), np.sin(reta), 0.5*pinhole_thickness] + origin = -pinhole_radius * np.array( + [np.cos(reta), np.sin(reta), 0.5 * pinhole_thickness] ) angs, _ = panel.cart_to_angles(np.atleast_2d(pxy), tvec_c=origin) new_ptth[i] = angs[:, 0] @@ -391,10 +421,10 @@ def calc_phi_x(bvec, eHat_l): returns phi_x in RADIANS """ bv = np.array(bvec) - bv[2] = 0. + bv[2] = 0.0 bv_norm = np.linalg.norm(bv) if np.isclose(bv_norm, 0): - return 0. + return 0.0 else: bv = bv / bv_norm return np.arccos(np.dot(bv, -eHat_l)).item() @@ -453,7 +483,7 @@ def _infer_eHat_l(panel): eHat_l_dict = { 'TARDIS': -ct.lab_x.reshape((3, 1)), - 'PXRDIP': -ct.lab_x.reshape((3, 1)) + 'PXRDIP': -ct.lab_x.reshape((3, 1)), } return eHat_l_dict[instr_type] @@ -470,9 +500,16 @@ def _infer_eta_shift(panel): return eta_shift_dict[instr_type] -def calc_tth_rygg_pinhole(panels, absorption_length, tth, eta, - pinhole_thickness, pinhole_radius, - num_phi_elements=60, clip_to_panel=True): +def calc_tth_rygg_pinhole( + panels, + absorption_length, + tth, + eta, + pinhole_thickness, + pinhole_radius, + num_phi_elements=60, + clip_to_panel=True, +): """Return pinhole twotheta [rad] and effective scattering volume [mm3]. num_phi_elements: number of pinhole phi elements for integration @@ -523,8 +560,9 @@ def calc_tth_rygg_pinhole(panels, absorption_length, tth, eta, mu_p = 1000 * mu_p # convert to [mm^-1] # Convert tth and eta to phi_d, beta, and r_d - dvec_arg = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(np.prod(eta.shape)))) + dvec_arg = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(np.prod(eta.shape))) + ) dvectors = xfcapi.angles_to_dvec(dvec_arg.T, bvec, eta_vec=eHat_l) v0 = np.array([0, 0, 1]) @@ -556,7 +594,7 @@ def calc_tth_rygg_pinhole(panels, absorption_length, tth, eta, dvecs = panel.cart_to_dvecs(cart) full_dvecs = dvecs.T.reshape(3, *tth.shape).T - panel_r_d = np.sqrt(np.sum((full_dvecs)**2, axis=2)).T + panel_r_d = np.sqrt(np.sum((full_dvecs) ** 2, axis=2)).T # Only overwrite positions that are still nan on r_d r_d[np.isnan(r_d)] = panel_r_d[np.isnan(r_d)] @@ -583,24 +621,28 @@ def calc_tth_rygg_pinhole(panels, absorption_length, tth, eta, phi_vec = np.arange(dphi / 2, 2 * np.pi, dphi) # includes elements for X and D edges - z_vec = np.arange(-h_p/2 - dz/2, h_p/2 + dz/1.999, dz) - z_vec[0] = -h_p/2 # X-side edge (negative z) - z_vec[-1] = h_p/2 # D-side edge (positive z) + z_vec = np.arange(-h_p / 2 - dz / 2, h_p / 2 + dz / 1.999, dz) + z_vec[0] = -h_p / 2 # X-side edge (negative z) + z_vec[-1] = h_p / 2 # D-side edge (positive z) phi_i, z_i = np.meshgrid(phi_vec, z_vec) # [Nz x Np] - phi_i = phi_i[:, :, None, None] # [Nz x Np x 1 x 1] - z_i = z_i[:, :, None, None] # axes 0,1 => P; axes 2,3 => D + phi_i = phi_i[:, :, None, None] # [Nz x Np x 1 x 1] + z_i = z_i[:, :, None, None] # axes 0,1 => P; axes 2,3 => D # ------ calculate twotheta_i [a.k.a. qq_i], for each grid element ------ - bx, bd = (d_p / (2 * r_x), d_p / (2 * r_d)) + bx, bd = (d_p / (2 * r_x), d_p / (2 * r_d)) sin_a, cos_a, tan_a = np.sin(alpha), np.cos(alpha), np.tan(alpha) - sin_b, cos_b, tan_b = np.sin(beta), np.cos(beta), np.tan(beta) + sin_b, cos_b, tan_b = np.sin(beta), np.cos(beta), np.tan(beta) sin_phii, cos_phii = np.sin(phi_i), np.cos(phi_i) cos_dphi_x = np.cos(phi_i - phi_x + np.pi) # [Nz x Np x Nu x Nv] - alpha_i = np.arctan2(np.sqrt(sin_a**2 + 2*bx*sin_a*cos_dphi_x + bx**2), - cos_a + z_i/r_x) - phi_xi = np.arctan2(sin_a * np.sin(phi_x) - bx*sin_phii, - sin_a * np.cos(phi_x) - bx * cos_phii) + alpha_i = np.arctan2( + np.sqrt(sin_a**2 + 2 * bx * sin_a * cos_dphi_x + bx**2), + cos_a + z_i / r_x, + ) + phi_xi = np.arctan2( + sin_a * np.sin(phi_x) - bx * sin_phii, + sin_a * np.cos(phi_x) - bx * cos_phii, + ) # !!! This section used 4D arrays before, which was very time consuming # for large grids. Instead, we now loop over the columns and do them @@ -694,26 +736,51 @@ def _compute_qq_p(use_numba=True, *args, **kwargs): with np.errstate(divide='ignore', invalid='ignore'): # Ignore the errors this will inevitably produce - return np.nansum(V_i * qq_i, - axis=(0, 1)) / V_p # [Nu x Nv] <= detector - - -def _compute_vi_qq_i(phi_d, sin_b, bd, sin_phii, cos_phii, alpha_i, phi_xi, - sin_a, cos_dphi_x, cos_a, cos_b, dV_s, dV_e, z_i, h_p, - d_p, tan_b, tan_a, phi_i, r_d): + return ( + np.nansum(V_i * qq_i, axis=(0, 1)) / V_p + ) # [Nu x Nv] <= detector + + +def _compute_vi_qq_i( + phi_d, + sin_b, + bd, + sin_phii, + cos_phii, + alpha_i, + phi_xi, + sin_a, + cos_dphi_x, + cos_a, + cos_b, + dV_s, + dV_e, + z_i, + h_p, + d_p, + tan_b, + tan_a, + phi_i, + r_d, +): # This function can be numbafied, and has a numbafied version below. # Compute V_i and qq_i cos_dphi_d = np.cos(phi_i - phi_d + np.pi) - beta_i = np.arctan2(np.sqrt(sin_b**2 + 2*bd*sin_b*cos_dphi_d + bd**2), - cos_b - z_i/r_d) + beta_i = np.arctan2( + np.sqrt(sin_b**2 + 2 * bd * sin_b * cos_dphi_d + bd**2), + cos_b - z_i / r_d, + ) - phi_di = np.arctan2(sin_b * np.sin(phi_d) - bd*sin_phii, - sin_b * np.cos(phi_d) - bd * cos_phii) + phi_di = np.arctan2( + sin_b * np.sin(phi_d) - bd * sin_phii, + sin_b * np.cos(phi_d) - bd * cos_phii, + ) - arg = (np.cos(alpha_i) * np.cos(beta_i) - np.sin(alpha_i) * - np.sin(beta_i) * np.cos(phi_di - phi_xi)) + arg = np.cos(alpha_i) * np.cos(beta_i) - np.sin(alpha_i) * np.sin( + beta_i + ) * np.cos(phi_di - phi_xi) # scattering angle for each P to each D qq_i = np.arccos(np.clip(arg, -1, 1)) @@ -733,12 +800,14 @@ def _compute_vi_qq_i(phi_d, sin_b, bd, sin_phii, cos_phii, alpha_i, phi_xi, # ------ visibility of each grid element ------ # pinhole surface - is_seen = np.logical_and(z_i > h_p/2 - d_p/tan_b * cos_dphi_d, - z_i < -h_p/2 + d_p/tan_a * cos_dphi_x) + is_seen = np.logical_and( + z_i > h_p / 2 - d_p / tan_b * cos_dphi_d, + z_i < -h_p / 2 + d_p / tan_a * cos_dphi_x, + ) # X-side edge - is_seen[0] = np.where(h_p/d_p * tan_b < cos_dphi_d[0], 1, 0) + is_seen[0] = np.where(h_p / d_p * tan_b < cos_dphi_d[0], 1, 0) # D-side edge - is_seen[-1] = np.where(h_p/d_p * tan_a < cos_dphi_x[-1], 1, 0) + is_seen[-1] = np.where(h_p / d_p * tan_a < cos_dphi_x[-1], 1, 0) # ------ weighted sum over elements to obtain average ------ V_i *= is_seen # zero weight to elements with no view of both X and D @@ -746,25 +815,35 @@ def _compute_vi_qq_i(phi_d, sin_b, bd, sin_phii, cos_phii, alpha_i, phi_xi, # The numba version (works better in conjunction with multi-threading) -_compute_vi_qq_i_numba = njit( - nogil=True, cache=True)(_compute_vi_qq_i) - - -def tth_corr_rygg_pinhole(panel, absorption_length, xy_pts, - pinhole_thickness, pinhole_radius, - return_nominal=True, num_phi_elements=60): +_compute_vi_qq_i_numba = njit(nogil=True, cache=True)(_compute_vi_qq_i) + + +def tth_corr_rygg_pinhole( + panel, + absorption_length, + xy_pts, + pinhole_thickness, + pinhole_radius, + return_nominal=True, + num_phi_elements=60, +): # These are the nominal tth values nom_angs, _ = panel.cart_to_angles( - xy_pts, - rmat_s=None, tvec_s=None, - tvec_c=None, apply_distortion=True + xy_pts, rmat_s=None, tvec_s=None, tvec_c=None, apply_distortion=True ) nom_tth, nom_eta = nom_angs[:, :2].T # Don't clip these values to the panel because they will be shifted qq_p = calc_tth_rygg_pinhole( - panel, absorption_length, nom_tth, nom_eta, pinhole_thickness, - pinhole_radius, num_phi_elements, clip_to_panel=False) + panel, + absorption_length, + nom_tth, + nom_eta, + pinhole_thickness, + pinhole_radius, + num_phi_elements, + clip_to_panel=False, + ) # Make the distortion shift to the left instead of the right # FIXME: why is qq_p shifting the data to the right instead of the left? @@ -783,23 +862,49 @@ def tth_corr_rygg_pinhole(panel, absorption_length, xy_pts, return angs -def tth_corr_map_rygg_pinhole(instrument, absorption_length, pinhole_thickness, - pinhole_radius, num_phi_elements=60): +def tth_corr_map_rygg_pinhole( + instrument, + absorption_length, + pinhole_thickness, + pinhole_radius, + num_phi_elements=60, +): tth_corr = {} for det_key, panel in instrument.detectors.items(): nom_ptth, nom_peta = panel.pixel_angles() qq_p = calc_tth_rygg_pinhole( - panel, absorption_length, nom_ptth, nom_peta, pinhole_thickness, - pinhole_radius, num_phi_elements) + panel, + absorption_length, + nom_ptth, + nom_peta, + pinhole_thickness, + pinhole_radius, + num_phi_elements, + ) tth_corr[det_key] = nom_ptth - qq_p return tth_corr -def polar_tth_corr_map_rygg_pinhole(tth, eta, instrument, absorption_length, - pinhole_thickness, pinhole_radius, - num_phi_elements=60): +def polar_tth_corr_map_rygg_pinhole( + tth, + eta, + instrument, + absorption_length, + pinhole_thickness, + pinhole_radius, + num_phi_elements=60, +): """Generate a polar tth corr map directly for all panels""" panels = list(instrument.detectors.values()) - return calc_tth_rygg_pinhole(panels, absorption_length, tth, eta, - pinhole_thickness, pinhole_radius, - num_phi_elements) - tth + return ( + calc_tth_rygg_pinhole( + panels, + absorption_length, + tth, + eta, + pinhole_thickness, + pinhole_radius, + num_phi_elements, + ) + - tth + ) diff --git a/hexrd/hed/xrdutil/utils.py b/hexrd/hed/xrdutil/utils.py index 2ebb5c936..073064818 100644 --- a/hexrd/hed/xrdutil/utils.py +++ b/hexrd/hed/xrdutil/utils.py @@ -28,6 +28,7 @@ from typing import Optional, Union, Any, Generator + # TODO: Resolve extra-workflow dependency from hexrd.hedm.material.crystallography import PlaneData from hexrd.core.distortion.distortionabc import DistortionABC @@ -509,9 +510,8 @@ def _filter_hkls_eta_ome( angMask_eta = np.zeros(len(angles), dtype=bool) for etas in eta_range: angMask_eta = np.logical_or( - angMask_eta, xfcapi.validate_angle_ranges( - angles[:, 1], etas[0], etas[1] - ) + angMask_eta, + xfcapi.validate_angle_ranges(angles[:, 1], etas[0], etas[1]), ) ccw = True @@ -992,8 +992,13 @@ def simulateGVecs( # first find valid G-vectors angList = np.vstack( xfcapi.oscill_angles_of_hkls( - full_hkls[:, 1:], chi, rMat_c, bMat, wlen, v_inv=vInv_s, - beam_vec=beam_vector + full_hkls[:, 1:], + chi, + rMat_c, + bMat, + wlen, + v_inv=vInv_s, + beam_vec=beam_vector, ) ) allAngs, allHKLs = _filter_hkls_eta_ome( @@ -1009,8 +1014,15 @@ def simulateGVecs( else: # ??? preallocate for speed? det_xy, rMat_ss, _ = _project_on_detector_plane( - allAngs, rMat_d, rMat_c, chi, tVec_d, tVec_c, tVec_s, distortion, - beamVec=beam_vector + allAngs, + rMat_d, + rMat_c, + chi, + tVec_d, + tVec_c, + tVec_s, + distortion, + beamVec=beam_vector, ) on_panel = np.logical_and( @@ -1473,7 +1485,7 @@ def make_reflection_patches( def extract_detector_transformation( - detector_params: Union[dict[str, Any], np.ndarray] + detector_params: Union[dict[str, Any], np.ndarray], ) -> tuple[np.ndarray, np.ndarray, float, np.ndarray]: """ Construct arrays from detector parameters. diff --git a/hexrd/hedm/cli/__init__.py b/hexrd/hedm/cli/__init__.py index 09b5763f7..634ae5cd1 100644 --- a/hexrd/hedm/cli/__init__.py +++ b/hexrd/hedm/cli/__init__.py @@ -7,5 +7,4 @@ # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. - # from .main import main diff --git a/hexrd/hedm/cli/documentation.py b/hexrd/hedm/cli/documentation.py index 69c2b481a..4ac2559aa 100644 --- a/hexrd/hedm/cli/documentation.py +++ b/hexrd/hedm/cli/documentation.py @@ -12,4 +12,5 @@ def execute(args, parser): import webbrowser import hexrd + webbrowser.open_new_tab(hexrd.doc_url) diff --git a/hexrd/hedm/cli/find_orientations.py b/hexrd/hedm/cli/find_orientations.py index efad7ec46..8e299d08e 100644 --- a/hexrd/hedm/cli/find_orientations.py +++ b/hexrd/hedm/cli/find_orientations.py @@ -10,7 +10,10 @@ from hexrd.hedm import config from hexrd.core import instrument from hexrd.core.transforms import xfcapi -from hexrd.hedm.findorientations import find_orientations, write_scored_orientations +from hexrd.hedm.findorientations import ( + find_orientations, + write_scored_orientations, +) descr = 'Process rotation image series to find grain orientations' @@ -22,36 +25,42 @@ def configure_parser(sub_parsers): p = sub_parsers.add_parser( - 'find-orientations', - description=descr, - help=descr - ) - p.add_argument( - 'yml', type=str, - help='YAML configuration file' - ) + 'find-orientations', description=descr, help=descr + ) + p.add_argument('yml', type=str, help='YAML configuration file') p.add_argument( - '-q', '--quiet', action='store_true', - help="don't report progress in terminal" - ) + '-q', + '--quiet', + action='store_true', + help="don't report progress in terminal", + ) p.add_argument( - '-f', '--force', action='store_true', - help='overwrites existing analysis' - ) + '-f', + '--force', + action='store_true', + help='overwrites existing analysis', + ) p.add_argument( - '-c', '--clean', action='store_true', - help='overwrites existing analysis, including maps' - ) + '-c', + '--clean', + action='store_true', + help='overwrites existing analysis, including maps', + ) p.add_argument( - '--hkls', metavar='HKLs', type=str, default=None, + '--hkls', + metavar='HKLs', + type=str, + default=None, help="""\ list hkl entries in the materials file to use for fitting; - if None, defaults to list specified in the yml file""" - ) + if None, defaults to list specified in the yml file""", + ) p.add_argument( - '-p', '--profile', action='store_true', + '-p', + '--profile', + action='store_true', help='runs the analysis with cProfile enabled', - ) + ) p.set_defaults(func=execute) @@ -61,16 +70,15 @@ def write_results(results, cfg): # Write accepted orientations. qbar_filename = str(cfg.find_orientations.accepted_orientations_file) - np.savetxt(qbar_filename, results['qbar'].T, - fmt='%.18e', delimiter='\t') + np.savetxt(qbar_filename, results['qbar'].T, fmt='%.18e', delimiter='\t') # Write grains.out. gw = instrument.GrainDataWriter(cfg.find_orientations.grains_file) for gid, q in enumerate(results['qbar'].T): - phi = 2*np.arccos(q[0]) + phi = 2 * np.arccos(q[0]) n = xfcapi.unit_vector(q[1:]) - grain_params = np.hstack([phi*n, const.zeros_3, const.identity_6x1]) - gw.dump_grain(gid, 1., 0., grain_params) + grain_params = np.hstack([phi * n, const.zeros_3, const.identity_6x1]) + gw.dump_grain(gid, 1.0, 0.0, grain_params) gw.close() @@ -93,7 +101,7 @@ def execute(args, parser): ch.setLevel(logging.CRITICAL if args.quiet else log_level) ch.setFormatter( logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S') - ) + ) logger.addHandler(ch) logger.info('=== begin find-orientations ===') @@ -106,7 +114,7 @@ def execute(args, parser): if (quats_f.exists()) and not (args.force or args.clean): logger.error( '%s already exists. Change yml file or specify "force" or "clean"', - quats_f + quats_f, ) sys.exit() @@ -119,10 +127,9 @@ def execute(args, parser): fh.setLevel(log_level) fh.setFormatter( logging.Formatter( - '%(asctime)s - %(name)s - %(message)s', - '%m-%d %H:%M:%S' - ) + '%(asctime)s - %(name)s - %(message)s', '%m-%d %H:%M:%S' ) + ) logger.info("logging to %s", logfile) logger.addHandler(fh) @@ -130,15 +137,13 @@ def execute(args, parser): import cProfile as profile import pstats from io import StringIO + pr = profile.Profile() pr.enable() # process the data results = find_orientations( - cfg, - hkls=args.hkls, - clean=args.clean, - profile=args.profile + cfg, hkls=args.hkls, clean=args.clean, profile=args.profile ) # Write out the results diff --git a/hexrd/hedm/cli/fit_grains.py b/hexrd/hedm/cli/fit_grains.py index 1018f433e..3860b60a5 100644 --- a/hexrd/hedm/cli/fit_grains.py +++ b/hexrd/hedm/cli/fit_grains.py @@ -22,7 +22,13 @@ """ _flds = [ - "id", "completeness", "chisq", "expmap", "centroid", "inv_Vs", "ln_Vs" + "id", + "completeness", + "chisq", + "expmap", + "centroid", + "inv_Vs", + "ln_Vs", ] _BaseGrainData = namedtuple("_BaseGrainData", _flds) del _flds @@ -65,7 +71,7 @@ def from_grains_out(cls, fname): def from_array(cls, a): """Return GrainData instance from numpy datatype array""" return cls( - id=a[:,0].astype(int), + id=a[:, 0].astype(int), completeness=a[:, 1], chisq=a[:, 2], expmap=a[:, 3:6], @@ -93,13 +99,12 @@ def num_grains(self): @property def quaternions(self): - """Return quaternions as array(num_grains, 4). - """ + """Return quaternions as array(num_grains, 4).""" return rotations.quatOfExpMap(self.expmap.T).T @property def rotation_matrices(self): - """"Return rotation matrices from exponential map parameters""" + """ "Return rotation matrices from exponential map parameters""" # # Compute the rotation matrices only once, the first time this is # called, and save the results. @@ -140,44 +145,55 @@ def select(self, min_completeness=0.0, max_chisq=None): sel = sel_comp & (self.chisq <= max_chisq) if has_chisq else sel_comp return __class__( - self.id[sel], self.completeness[sel], self.chisq[sel], - self.expmap[sel], self.centroid[sel], self.inv_Vs[sel], - self.ln_Vs[sel] + self.id[sel], + self.completeness[sel], + self.chisq[sel], + self.expmap[sel], + self.centroid[sel], + self.inv_Vs[sel], + self.ln_Vs[sel], ) def configure_parser(sub_parsers): p = sub_parsers.add_parser('fit-grains', description=descr, help=descr) + p.add_argument('yml', type=str, help='YAML configuration file') p.add_argument( - 'yml', type=str, - help='YAML configuration file' - ) + '-g', + '--grains', + type=str, + default=None, + help="comma-separated list of IDs to refine, defaults to all", + ) p.add_argument( - '-g', '--grains', type=str, default=None, - help="comma-separated list of IDs to refine, defaults to all" - ) - p.add_argument( - '-q', '--quiet', action='store_true', - help="don't report progress in terminal" - ) + '-q', + '--quiet', + action='store_true', + help="don't report progress in terminal", + ) p.add_argument( - '-c', '--clean', action='store_true', - help='overwrites existing analysis, uses initial orientations' - ) + '-c', + '--clean', + action='store_true', + help='overwrites existing analysis, uses initial orientations', + ) p.add_argument( - '-f', '--force', action='store_true', - help='overwrites existing analysis' - ) + '-f', + '--force', + action='store_true', + help='overwrites existing analysis', + ) p.add_argument( - '-p', '--profile', action='store_true', + '-p', + '--profile', + action='store_true', help='runs the analysis with cProfile enabled', - ) + ) p.set_defaults(func=execute) def write_results( - fit_results, cfg, - grains_filename='grains.out', grains_npz='grains.npz' + fit_results, cfg, grains_filename='grains.out', grains_npz='grains.npz' ): instr = cfg.instrument.hedm nfit = len(fit_results) @@ -187,7 +203,7 @@ def write_results( for det_key in instr.detectors: (cfg.analysis_dir / det_key).mkdir(parents=True, exist_ok=True) - gw = instrument.GrainDataWriter(str(cfg.analysis_dir /grains_filename)) + gw = instrument.GrainDataWriter(str(cfg.analysis_dir / grains_filename)) gd_array = np.zeros((nfit, 21)) gwa = instrument.GrainDataWriter(array=gd_array) for fit_result in fit_results: @@ -242,9 +258,11 @@ def execute(args, parser): if have_orientations: try: qbar = np.loadtxt(quats_f, ndmin=2).T - except(IOError): - raise(RuntimeError, - "error loading indexing results '%s'" % quats_f) + except IOError: + raise ( + RuntimeError, + "error loading indexing results '%s'" % quats_f, + ) else: logger.info("Missing %s, running find-orientations", quats_f) logger.removeHandler(ch) @@ -263,8 +281,8 @@ def execute(args, parser): logger.error( 'Analysis "%s" already exists. ' 'Change yml file or specify "force"', - cfg.analysis_name - ) + cfg.analysis_name, + ) sys.exit() # Set up analysis directory and output directories. @@ -288,9 +306,7 @@ def execute(args, parser): maxtth = np.radians(cfg.fit_grains.tth_max) excl_p = excl_p._replace(tthmax=maxtth) - cfg.material.plane_data.exclude( - **excl_p._asdict() - ) + cfg.material.plane_data.exclude(**excl_p._asdict()) using_nhkls = np.count_nonzero( np.logical_not(cfg.material.plane_data.exclusions) ) @@ -303,9 +319,8 @@ def execute(args, parser): fh = logging.FileHandler(logfile, mode='w') fh.setLevel(log_level) ff = logging.Formatter( - '%(asctime)s - %(name)s - %(message)s', - '%m-%d %H:%M:%S' - ) + '%(asctime)s - %(name)s - %(message)s', '%m-%d %H:%M:%S' + ) fh.setFormatter(ff) logger.info("logging to %s", logfile) logger.addHandler(fh) @@ -318,7 +333,6 @@ def execute(args, parser): pr = profile.Profile() pr.enable() - # some conditionals for arg handling existing_analysis = grains_filename.exists() fit_estimate = cfg.fit_grains.estimate @@ -352,29 +366,32 @@ def execute(args, parser): # grains.out file gw = instrument.GrainDataWriter(grains_filename) for i_g, q in enumerate(qbar.T): - phi = 2*np.arccos(q[0]) + phi = 2 * np.arccos(q[0]) n = xfcapi.unit_vector(q[1:]) grain_params = np.hstack( - [phi*n, cnst.zeros_3, cnst.identity_6x1] + [phi * n, cnst.zeros_3, cnst.identity_6x1] ) - gw.dump_grain(int(i_g), 1., 0., grain_params) + gw.dump_grain(int(i_g), 1.0, 0.0, grain_params) gw.close() - except(IOError): - raise(RuntimeError, - "indexing results '%s' not found!" - % str(grains_filename)) + except IOError: + raise ( + RuntimeError, + "indexing results '%s' not found!" % str(grains_filename), + ) elif force_with_estimate or new_with_estimate: grains_filename = fit_estimate logger.info("using initial estimate '%s'", fit_estimate) elif existing_analysis and not clobber: - raise(RuntimeError, - "fit results '%s' exist, " % grains_filename - + "but --clean or --force options not specified") + raise ( + RuntimeError, + "fit results '%s' exist, " % grains_filename + + "but --clean or --force options not specified", + ) # get grain parameters by loading grains table try: grains_table = np.loadtxt(grains_filename, ndmin=2) - except(IOError): + except IOError: raise RuntimeError("problem loading '%s'" % grains_filename) # process the data @@ -387,7 +404,7 @@ def execute(args, parser): grains_table, show_progress=not args.quiet, ids_to_refine=gid_list, - ) + ) if args.profile: pr.disable() diff --git a/hexrd/hedm/cli/help.py b/hexrd/hedm/cli/help.py index 5a7afbe90..0f2a09dc5 100644 --- a/hexrd/hedm/cli/help.py +++ b/hexrd/hedm/cli/help.py @@ -1,18 +1,15 @@ - - descr = "Displays a list of available conda commands and their help strings." + def configure_parser(sub_parsers): - p = sub_parsers.add_parser('help', - description = descr, - help = descr) + p = sub_parsers.add_parser('help', description=descr, help=descr) p.add_argument( 'command', - metavar = 'COMMAND', - action = "store", - nargs = '?', - help = "print help information for COMMAND " - "(same as: conda COMMAND -h)", + metavar='COMMAND', + action="store", + nargs='?', + help="print help information for COMMAND " + "(same as: conda COMMAND -h)", ) p.set_defaults(func=execute) diff --git a/hexrd/hedm/cli/pickle23.py b/hexrd/hedm/cli/pickle23.py index 34b23712d..e499e67a2 100644 --- a/hexrd/hedm/cli/pickle23.py +++ b/hexrd/hedm/cli/pickle23.py @@ -1,4 +1,5 @@ """Convert python 2 hexrd pickles to hexrd3""" + import sys import shutil @@ -8,15 +9,10 @@ def configure_parser(sub_parsers): - p = sub_parsers.add_parser('pickle23', - description = descr, - help = descr) + p = sub_parsers.add_parser('pickle23', description=descr, help=descr) p.set_defaults(func=execute) - p.add_argument( - 'file', type=str, - help='name of file to convert' - ) + p.add_argument('file', type=str, help='name of file to convert') def execute(args, p): @@ -24,9 +20,9 @@ def execute(args, p): fname = args.file fback = fname + ".bak" shutil.copy(fname, fback) - with open(fname, "w") as fnew: + with open(fname, "w") as fnew: with open(fback, "r") as f: - for l in f: + for l in f: l = l.replace('hexrd.xrd.', 'hexrd.') fnew.write(l) return diff --git a/hexrd/hedm/cli/test.py b/hexrd/hedm/cli/test.py index 9387e9525..7d92a854b 100644 --- a/hexrd/hedm/cli/test.py +++ b/hexrd/hedm/cli/test.py @@ -1,6 +1,5 @@ """Command to run tests""" - descr = 'runs the hexrd test suite' example = """ examples: @@ -9,17 +8,19 @@ def configure_parser(sub_parsers): - p = sub_parsers.add_parser('test', description = descr, help = descr) + p = sub_parsers.add_parser('test', description=descr, help=descr) p.set_defaults(func=execute) p.add_argument( - '-v', '--verbose', action='store_true', - help="report detailed results in terminal" - ) + '-v', + '--verbose', + action='store_true', + help="report detailed results in terminal", + ) def execute(args, parser): import unittest suite = unittest.TestLoader().discover('hexrd') - unittest.TextTestRunner(verbosity = args.verbose + 1).run(suite) + unittest.TextTestRunner(verbosity=args.verbose + 1).run(suite) diff --git a/hexrd/hedm/config/dumper.py b/hexrd/hedm/config/dumper.py index 92d3596da..4067ed93c 100644 --- a/hexrd/hedm/config/dumper.py +++ b/hexrd/hedm/config/dumper.py @@ -8,7 +8,7 @@ def _dict_path_by_id(d, value, path=()): return path elif isinstance(d, dict): for k, v in d.items(): - p = _dict_path_by_id(v, value, path + (k, )) + p = _dict_path_by_id(v, value, path + (k,)) if p is not None: return p elif isinstance(d, list): @@ -32,6 +32,7 @@ class NumPyIncludeDumper(yaml.Dumper): The ndarray would be saved in foo/bar.npy. """ + def __init__(self, stream, **kwargs): super().__init__(stream, **kwargs) @@ -58,5 +59,6 @@ def represent(self, data): return super().represent(data) -NumPyIncludeDumper.add_representer(np.ndarray, - NumPyIncludeDumper.ndarray_representer) +NumPyIncludeDumper.add_representer( + np.ndarray, NumPyIncludeDumper.ndarray_representer +) diff --git a/hexrd/hedm/config/findorientations.py b/hexrd/hedm/config/findorientations.py index 09a78a315..1b86e2ab4 100644 --- a/hexrd/hedm/config/findorientations.py +++ b/hexrd/hedm/config/findorientations.py @@ -13,12 +13,16 @@ # TODO: set these as defaults seed_search_methods = { 'label': dict(filter_radius=1, threshold=1), - 'blob_log': dict(min_sigma=0.5, max_sigma=5, - num_sigma=10, threshold=0.01, - overlap=0.1), - 'blob_dog': dict(min_sigma=0.5, max_sigma=5, - sigma_ratio=1.6, - threshold=0.01, overlap=0.1) + 'blob_log': dict( + min_sigma=0.5, max_sigma=5, num_sigma=10, threshold=0.01, overlap=0.1 + ), + 'blob_dog': dict( + min_sigma=0.5, + max_sigma=5, + sigma_ratio=1.6, + threshold=0.01, + overlap=0.1, + ), } @@ -50,9 +54,7 @@ def accepted_orientations_file(self): newname = f"accepted-orientations-{actmat}.dat" aof_path = self.parent.analysis_dir / newname else: - oldname = ( - 'accepted_orientations_%s.dat' % self.parent.analysis_id - ) + oldname = 'accepted_orientations_%s.dat' % self.parent.analysis_id aof_path = self.parent.working_dir / oldname return aof_path @@ -98,16 +100,13 @@ def use_quaternion_grid(self): temp = os.path.join(self._cfg.working_dir, temp) if os.path.isfile(temp): return temp - raise IOError( - '"%s": "%s" does not exist' % (key, temp) - ) + raise IOError('"%s": "%s" does not exist' % (key, temp)) @property def extract_measured_g_vectors(self): return self._cfg.get( - 'find_orientations:extract_measured_g_vectors', - False - ) + 'find_orientations:extract_measured_g_vectors', False + ) class ClusteringConfig(Config): @@ -122,7 +121,7 @@ def algorithm(self): raise RuntimeError( '"%s": "%s" not recognized, must be one of %s' % (key, temp, choices) - ) + ) @property def completeness(self): @@ -130,9 +129,7 @@ def completeness(self): temp = self._cfg.get(key, None) if temp is not None: return temp - raise RuntimeError( - '"%s" must be specified' % key - ) + raise RuntimeError('"%s" must be specified' % key) @property def radius(self): @@ -140,9 +137,7 @@ def radius(self): temp = self._cfg.get(key, None) if temp is not None: return temp - raise RuntimeError( - '"%s" must be specified' % key - ) + raise RuntimeError('"%s" must be specified' % key) class OmegaConfig(Config): @@ -153,22 +148,21 @@ class OmegaConfig(Config): def period(self): # FIXME: this is deprecated and now set from the imageseries key = 'find_orientations:omega:period' - temp = self._cfg.get(key, [-180., 180]) - range = np.abs(temp[1]-temp[0]) + temp = self._cfg.get(key, [-180.0, 180]) + range = np.abs(temp[1] - temp[0]) logger.warning('omega period specification is deprecated') if range != 360: raise RuntimeError( '"%s": range must be 360 degrees, range of %s is %g' % (key, temp, range) - ) + ) return temp @property def tolerance(self): return self._cfg.get( - 'find_orientations:omega:tolerance', - self.tolerance_dflt - ) + 'find_orientations:omega:tolerance', self.tolerance_dflt + ) class EtaConfig(Config): @@ -178,9 +172,8 @@ class EtaConfig(Config): @property def tolerance(self): return self._cfg.get( - 'find_orientations:eta:tolerance', - self.tolerance_dflt - ) + 'find_orientations:eta:tolerance', self.tolerance_dflt + ) @property def mask(self): @@ -191,7 +184,9 @@ def range(self): mask = self.mask if mask is None: return mask - return np.array([[-90. + mask, 90. - mask], [90. + mask, 270. - mask]]) + return np.array( + [[-90.0 + mask, 90.0 - mask], [90.0 + mask, 270.0 - mask]] + ) class SeedSearchConfig(Config): @@ -202,41 +197,39 @@ def hkl_seeds(self): try: temp = self._cfg.get(key) if isinstance(temp, int): - temp = [temp, ] + temp = [ + temp, + ] return temp except: if self._cfg.find_orientations.use_quaternion_grid is None: raise RuntimeError( '"%s" must be defined for seeded search' % key - ) + ) @property def fiber_step(self): return self._cfg.get( 'find_orientations:seed_search:fiber_step', - self._cfg.find_orientations.omega.tolerance - ) + self._cfg.find_orientations.omega.tolerance, + ) @property def method(self): key = 'find_orientations:seed_search:method' try: temp = self._cfg.get(key) - assert len(temp) == 1., \ - "method must have exactly one key" + assert len(temp) == 1.0, "method must have exactly one key" if isinstance(temp, dict): method_spec = next(iter(list(temp.keys()))) if method_spec.lower() not in seed_search_methods: raise RuntimeError( - 'invalid seed search method "%s"' - % method_spec + 'invalid seed search method "%s"' % method_spec ) else: return temp except: - raise RuntimeError( - '"%s" must be defined for seeded search' % key - ) + raise RuntimeError('"%s" must be defined for seeded search' % key) @property def fiber_ndiv(self): @@ -249,7 +242,7 @@ class OrientationMapsConfig(Config): def active_hkls(self): temp = self._cfg.get( 'find_orientations:orientation_maps:active_hkls', default='all' - ) + ) if isinstance(temp, int): temp = [temp] if temp == 'all': @@ -260,13 +253,13 @@ def active_hkls(self): def bin_frames(self): return self._cfg.get( 'find_orientations:orientation_maps:bin_frames', default=1 - ) + ) @property def eta_step(self): return self._cfg.get( 'find_orientations:orientation_maps:eta_step', default=0.25 - ) + ) @property def file(self): @@ -287,8 +280,7 @@ def file(self): # Now check the YAML. temp = self._cfg.get( - 'find_orientations:orientation_maps:file', - default=None + 'find_orientations:orientation_maps:file', default=None ) if temp is None: return mapf @@ -321,5 +313,6 @@ def threshold(self): @property def filter_maps(self): - return self._cfg.get('find_orientations:orientation_maps:filter_maps', - default=False) + return self._cfg.get( + 'find_orientations:orientation_maps:filter_maps', default=False + ) diff --git a/hexrd/hedm/config/fitgrains.py b/hexrd/hedm/config/fitgrains.py index 7755ec0ca..432a01048 100644 --- a/hexrd/hedm/config/fitgrains.py +++ b/hexrd/hedm/config/fitgrains.py @@ -99,7 +99,7 @@ def refit(self): raise RuntimeError( '"%s" must be None, a scalar, or a list, got "%s"' % (key, temp) - ) + ) if isinstance(temp, (int, float)): temp = [temp, temp] return temp @@ -107,6 +107,7 @@ def refit(self): """ TODO: evaluate the need for this """ + @property def skip_on_estimate(self): key = 'fit_grains:skip_on_estimate' @@ -115,7 +116,7 @@ def skip_on_estimate(self): return temp raise RuntimeError( '"%s" must be true or false, got "%s"' % (key, temp) - ) + ) @property def fit_only(self): @@ -125,7 +126,7 @@ def fit_only(self): return temp raise RuntimeError( '"%s" must be true or false, got "%s"' % (key, temp) - ) + ) @property def tth_max(self): @@ -138,4 +139,4 @@ def tth_max(self): return temp raise RuntimeError( '"%s" must be > 0, true, or false, got "%s"' % (key, temp) - ) + ) diff --git a/hexrd/hedm/config/instrument.py b/hexrd/hedm/config/instrument.py index 651e66341..35fbebe4b 100644 --- a/hexrd/hedm/config/instrument.py +++ b/hexrd/hedm/config/instrument.py @@ -31,7 +31,7 @@ def hedm(self): try: icfg = h5py.File(self.configuration, 'r') - except(OSError): + except OSError: with open(self.configuration, 'r') as f: icfg = yaml.load(f, Loader=NumPyIncludeLoader) @@ -47,7 +47,7 @@ def hedm(self, icfg_fname): """Set the HEDMInstrument class.""" try: icfg = h5py.File(icfg_fname, 'r') - except(OSError): + except OSError: with open(icfg_fname, 'r') as f: icfg = yaml.load(f, Loader=NumPyIncludeLoader) diff --git a/hexrd/hedm/config/root.py b/hexrd/hedm/config/root.py index 3f8c17d50..c8254d8a9 100644 --- a/hexrd/hedm/config/root.py +++ b/hexrd/hedm/config/root.py @@ -67,8 +67,10 @@ def analysis_dir(self): @property def analysis_id(self): return '_'.join( - [self.analysis_name.strip().replace(' ', '-'), - self.material.active.strip().replace(' ', '-')] + [ + self.analysis_name.strip().replace(' ', '-'), + self.material.active.strip().replace(' ', '-'), + ] ) @property @@ -134,8 +136,9 @@ def multiprocessing(self): if multiproc > ncpus: logger.warning( 'Resuested %s processes, %d available', - multiproc, ncpus - ) + multiproc, + ncpus, + ) res = ncpus else: res = multiproc if multiproc else 1 @@ -144,17 +147,15 @@ def multiprocessing(self): if temp < 1: logger.warning( 'Cannot use less than 1 process, requested %d of %d', - temp, ncpus - ) + temp, + ncpus, + ) res = 1 else: res = temp else: temp = ncpus - 1 - logger.warning( - "Invalid value %s for multiprocessing", - multiproc - ) + logger.warning("Invalid value %s for multiprocessing", multiproc) res = temp return res @@ -163,13 +164,13 @@ def multiprocessing(self, val): isint = isinstance(val, int) if val in ('half', 'all', -1): self.set('multiprocessing', val) - elif (isint and val >= 0 and val <= mp.cpu_count()): + elif isint and val >= 0 and val <= mp.cpu_count(): self.set('multiprocessing', int(val)) else: raise RuntimeError( '"multiprocessing": must be 1:%d, got %s' % (mp.cpu_count(), val) - ) + ) @property def image_series(self): @@ -189,10 +190,10 @@ def image_series(self): panel = '_'.join(panel) elif panel is None: panel = shared_ims_key - except(KeyError): + except KeyError: try: panel = oms.metadata['panel'] - except(KeyError): + except KeyError: panel = shared_ims_key self._image_dict[panel] = oms diff --git a/hexrd/hedm/config/utils.py b/hexrd/hedm/config/utils.py index e31322f1b..4733a6efe 100644 --- a/hexrd/hedm/config/utils.py +++ b/hexrd/hedm/config/utils.py @@ -4,12 +4,21 @@ ExclusionParameters = namedtuple( - 'ExclusionParameters', ["dmin", "dmax", "tthmin", "tthmax", - "sfacmin", "sfacmax", "pintmin", "pintmax"] + 'ExclusionParameters', + [ + "dmin", + "dmax", + "tthmin", + "tthmax", + "sfacmin", + "sfacmax", + "pintmin", + "pintmax", + ], ) -class Null(): +class Null: pass @@ -52,22 +61,22 @@ def get_exclusion_parameters(cfg, prefix): if sfmin_dflt is not None: warnings.warn( '"min_sfac_ratio" is deprecated, use "sfacmin" instead', - DeprecationWarning + DeprecationWarning, ) # Default for reset_exclusions is True so that old config files will # produce the same behavior. - reset_exclusions= cfg.get(yaml_key("reset_exclusions"), True) + reset_exclusions = cfg.get(yaml_key("reset_exclusions"), True) - return( + return ( reset_exclusions, ExclusionParameters( - dmin = cfg.get(yaml_key("dmin"), None), - dmax = cfg.get(yaml_key("dmax"), None), - tthmin = cfg.get(yaml_key("tthmin"), None), - tthmax = cfg.get(yaml_key("tthmax"), None), - sfacmin = cfg.get(yaml_key("sfacmin"), sfmin_dflt), - sfacmax = cfg.get(yaml_key("sfacmax"), None), - pintmin = cfg.get(yaml_key("pintmin"), None), - pintmax = cfg.get(yaml_key("pintmax"), None), - ) + dmin=cfg.get(yaml_key("dmin"), None), + dmax=cfg.get(yaml_key("dmax"), None), + tthmin=cfg.get(yaml_key("tthmin"), None), + tthmax=cfg.get(yaml_key("tthmax"), None), + sfacmin=cfg.get(yaml_key("sfacmin"), sfmin_dflt), + sfacmax=cfg.get(yaml_key("sfacmax"), None), + pintmin=cfg.get(yaml_key("pintmin"), None), + pintmax=cfg.get(yaml_key("pintmax"), None), + ), ) diff --git a/hexrd/hedm/findorientations.py b/hexrd/hedm/findorientations.py index 043cd5fe1..1dbbb8a7d 100644 --- a/hexrd/hedm/findorientations.py +++ b/hexrd/hedm/findorientations.py @@ -5,6 +5,7 @@ import timeit import numpy as np + # np.seterr(over='ignore', invalid='ignore') # import tqdm @@ -26,6 +27,7 @@ try: from sklearn.cluster import dbscan from sklearn.metrics.pairwise import pairwise_distances + have_sklearn = True except ImportError: pass @@ -54,18 +56,15 @@ def write_scored_orientations(results, cfg): """ np.savez_compressed( cfg.find_orientations.orientation_maps.scored_orientations_file, - **results['scored_orientations'] + **results['scored_orientations'], ) def _process_omegas(omegaimageseries_dict): """Extract omega period and ranges from an OmegaImageseries dictionary.""" oims = next(iter(omegaimageseries_dict.values())) - ome_period = oims.omega[0, 0] + np.r_[0., 360.] - ome_ranges = [ - ([i['ostart'], i['ostop']]) - for i in oims.omegawedges.wedges - ] + ome_period = oims.omega[0, 0] + np.r_[0.0, 360.0] + ome_ranges = [([i['ostart'], i['ostop']]) for i in oims.omegawedges.wedges] return ome_period, ome_ranges @@ -97,8 +96,7 @@ def generate_orientation_fibers(cfg, eta_ome): # default values for each case? They must be specified as of now. method = next(iter(method_dict.keys())) method_kwargs = method_dict[method] - logger.info('\tusing "%s" method for fiber generation' - % method) + logger.info('\tusing "%s" method for fiber generation' % method) # crystallography data from the pd object pd = eta_ome.planeData @@ -108,24 +106,16 @@ def generate_orientation_fibers(cfg, eta_ome): # !!! changed recently where iHKLList are now master hklIDs pd_hkl_ids = eta_ome.iHKLList[seed_hkl_ids] - pd_hkl_idx = pd.getHKLID( - pd.getHKLs(*eta_ome.iHKLList).T, - master=False - ) + pd_hkl_idx = pd.getHKLID(pd.getHKLs(*eta_ome.iHKLList).T, master=False) seed_hkls = pd.getHKLs(*pd_hkl_ids) seed_tths = tTh[pd_hkl_idx][seed_hkl_ids] - logger.info('\tusing seed hkls: %s' - % [str(i) for i in seed_hkls]) + logger.info('\tusing seed hkls: %s' % [str(i) for i in seed_hkls]) # grab angular grid infor from maps del_ome = eta_ome.omegas[1] - eta_ome.omegas[0] del_eta = eta_ome.etas[1] - eta_ome.etas[0] - params = dict( - bMat=bMat, - chi=chi, - csym=csym, - fiber_ndiv=fiber_ndiv) + params = dict(bMat=bMat, chi=chi, csym=csym, fiber_ndiv=fiber_ndiv) # ========================================================================= # Labeling of spots from seed hkls @@ -144,8 +134,12 @@ def generate_orientation_fibers(cfg, eta_ome): for i, (this_hkl, this_tth) in enumerate(zip(seed_hkls, seed_tths)): for ispot in range(numSpots[i]): if not np.isnan(coms[i][ispot][0]): - ome_c = eta_ome.omeEdges[0] + (0.5 + coms[i][ispot][0])*del_ome - eta_c = eta_ome.etaEdges[0] + (0.5 + coms[i][ispot][1])*del_eta + ome_c = ( + eta_ome.omeEdges[0] + (0.5 + coms[i][ispot][0]) * del_ome + ) + eta_c = ( + eta_ome.etaEdges[0] + (0.5 + coms[i][ispot][1]) * del_eta + ) input_p.append(np.hstack([this_hkl, this_tth, eta_c, ome_c])) # do the mapping @@ -154,12 +148,9 @@ def generate_orientation_fibers(cfg, eta_ome): if ncpus > 1: # multiple process version # ???: Need a chunksize in map? - chunksize = max(1, len(input_p)//(10*ncpus)) - pool = mp.Pool(ncpus, discretefiber_init, (params, )) - qfib = pool.map( - discretefiber_reduced, input_p, - chunksize=chunksize - ) + chunksize = max(1, len(input_p) // (10 * ncpus)) + pool = mp.Pool(ncpus, discretefiber_init, (params,)) + qfib = pool.map(discretefiber_reduced, input_p, chunksize=chunksize) ''' # This is an experiment... ntotal= 10*ncpus + np.remainder(len(input_p), 10*ncpus) > 0 @@ -182,7 +173,7 @@ def generate_orientation_fibers(cfg, eta_ome): qfib = list(map(discretefiber_reduced, input_p)) discretefiber_cleanup() - elapsed = (timeit.default_timer() - start) + elapsed = timeit.default_timer() - start logger.info("\tfiber generation took %.3f seconds", elapsed) return np.hstack(qfib) @@ -212,25 +203,20 @@ def discretefiber_reduced(params_in): gVec_s = xfcapi.angles_to_gvec( np.atleast_2d(params_in[3:]), chi=chi, - ).T + ).T tmp = mutil.uniqueVectors( rot.discreteFiber( - hkl, - gVec_s, - B=bMat, - ndiv=fiber_ndiv, - invert=False, - csym=csym - )[0] - ) + hkl, gVec_s, B=bMat, ndiv=fiber_ndiv, invert=False, csym=csym + )[0] + ) return tmp -def run_cluster(compl, qfib, qsym, cfg, - min_samples=None, compl_thresh=None, radius=None): - """ - """ +def run_cluster( + compl, qfib, qsym, cfg, min_samples=None, compl_thresh=None, radius=None +): + """ """ algorithm = cfg.find_orientations.clustering.algorithm cl_radius = cfg.find_orientations.clustering.radius @@ -263,8 +249,7 @@ def run_cluster(compl, qfib, qsym, cfg, def quat_distance(x, y): return xfcapi.quat_distance( - np.array(x, order='C'), np.array(y, order='C'), - qsym + np.array(x, order='C'), np.array(y, order='C'), qsym ) qfib_r = qfib[:, np.array(compl) > min_compl] @@ -282,19 +267,22 @@ def quat_distance(x, y): logger.info( "Feeding %d orientations above %.1f%% to clustering", - num_ors, 100*min_compl - ) + num_ors, + 100 * min_compl, + ) if algorithm == 'dbscan' and not have_sklearn: algorithm = 'fclusterdata' logger.warning( "sklearn >= 0.14 required for dbscan; using fclusterdata" - ) + ) if algorithm in ['dbscan', 'ort-dbscan', 'sph-dbscan']: # munge min_samples according to options - if min_samples is None \ - or cfg.find_orientations.use_quaternion_grid is not None: + if ( + min_samples is None + or cfg.find_orientations.use_quaternion_grid is not None + ): min_samples = 1 if algorithm == 'sph-dbscan': @@ -302,7 +290,7 @@ def quat_distance(x, y): # compute distance matrix pdist = pairwise_distances( qfib_r.T, metric=quat_distance, n_jobs=1 - ) + ) # run dbscan core_samples, labels = dbscan( @@ -311,16 +299,16 @@ def quat_distance(x, y): min_samples=min_samples, metric='precomputed', n_jobs=ncpus, - ) + ) else: if algorithm == 'ort-dbscan': logger.info("using euclidean orthographic DBSCAN") pts = qfib_r[1:, :].T - eps = 0.25*np.radians(cl_radius) + eps = 0.25 * np.radians(cl_radius) else: logger.info("using euclidean DBSCAN") pts = qfib_r.T - eps = 0.5*np.radians(cl_radius) + eps = 0.5 * np.radians(cl_radius) # run dbscan core_samples, labels = dbscan( @@ -330,7 +318,7 @@ def quat_distance(x, y): metric='minkowski', p=2, n_jobs=ncpus, - ) + ) # extract cluster labels cl = np.array(labels, dtype=int) # convert to array @@ -344,12 +332,12 @@ def quat_distance(x, y): qfib_r.T, np.radians(cl_radius), criterion='distance', - metric=quat_distance - ) + metric=quat_distance, + ) else: raise RuntimeError( "Clustering algorithm %s not recognized" % algorithm - ) + ) # extract number of clusters if np.any(cl == -1): @@ -365,18 +353,20 @@ def quat_distance(x, y): qfib_r[:, cl == i + 1], qsym ).flatten() - if algorithm in ('dbscan', 'ort-dbscan') and qbar.size/4 > 1: + if algorithm in ('dbscan', 'ort-dbscan') and qbar.size / 4 > 1: logger.info("\tchecking for duplicate orientations...") cl = cluster.hierarchy.fclusterdata( qbar.T, np.radians(cl_radius), criterion='distance', - metric=quat_distance) + metric=quat_distance, + ) nblobs_new = len(np.unique(cl)) if nblobs_new < nblobs: logger.info( "\tfound %d duplicates within %f degrees", - nblobs - nblobs_new, cl_radius + nblobs - nblobs_new, + cl_radius, ) tmp = np.zeros((4, nblobs_new)) for i in range(nblobs_new): @@ -390,10 +380,10 @@ def quat_distance(x, y): logger.info( "Found %d orientation clusters with >=%.1f%% completeness" " and %2f misorientation", - qbar.size/4, - 100.*min_compl, - cl_radius - ) + qbar.size / 4, + 100.0 * min_compl, + cl_radius, + ) return np.atleast_2d(qbar), cl @@ -438,7 +428,7 @@ def load_eta_ome_maps(cfg, pd, image_series, hkls=None, clean=False): shkls = pd.getHKLs(*res.iHKLList, asStr=True) logger.info( 'hkls used to generate orientation maps: %s', - [f'[{i}]' for i in shkls] + [f'[{i}]' for i in shkls], ) except (AttributeError, IOError): logger.info( @@ -462,12 +452,10 @@ def filter_maps_if_requested(eta_ome, cfg): if filter_maps: if not isinstance(filter_maps, bool): sigm = const.fwhm_to_sigma * filter_maps - logger.info("filtering eta/ome maps incl LoG with %.2f std dev", - sigm) - _filter_eta_ome_maps( - eta_ome, - filter_stdev=sigm + logger.info( + "filtering eta/ome maps incl LoG with %.2f std dev", sigm ) + _filter_eta_ome_maps(eta_ome, filter_stdev=sigm) else: logger.info("filtering eta/ome maps") _filter_eta_ome_maps(eta_ome) @@ -532,8 +520,9 @@ def generate_eta_ome_maps(cfg, hkls=None, save=True): # we have actual hkls hkls = plane_data.getHKLID(temp.tolist(), master=True) else: - raise RuntimeError('active_hkls spec must be 1-d or 2-d, not %d-d' - % temp.ndim) + raise RuntimeError( + 'active_hkls spec must be 1-d or 2-d, not %d-d' % temp.ndim + ) # apply some checks to active_hkls specificaton if not use_all: @@ -558,8 +547,7 @@ def generate_eta_ome_maps(cfg, hkls=None, save=True): # logging output shkls = plane_data.getHKLs(*active_hklIDs, asStr=True) logger.info( - "building eta_ome maps using hkls: %s", - [f'[{i}]' for i in shkls] + "building eta_ome maps using hkls: %s", [f'[{i}]' for i in shkls] ) # grad imageseries dict from cfg @@ -572,11 +560,14 @@ def generate_eta_ome_maps(cfg, hkls=None, save=True): # make eta_ome maps eta_ome = instrument.GenerateEtaOmeMaps( - imsd, cfg.instrument.hedm, plane_data, + imsd, + cfg.instrument.hedm, + plane_data, active_hkls=active_hklIDs, eta_step=cfg.find_orientations.orientation_maps.eta_step, threshold=cfg.find_orientations.orientation_maps.threshold, - ome_period=ome_period) + ome_period=ome_period, + ) logger.info("\t\t...took %f seconds", timeit.default_timer() - start) @@ -667,18 +658,22 @@ def create_clustering_parameters(cfg, eta_ome): # !!! default to use 100 grains ngrains = 100 rand_q = mutil.unitVector(np.random.randn(4, ngrains)) - rand_e = np.tile(2.*np.arccos(rand_q[0, :]), (3, 1)) \ - * mutil.unitVector(rand_q[1:, :]) + rand_e = np.tile(2.0 * np.arccos(rand_q[0, :]), (3, 1)) * mutil.unitVector( + rand_q[1:, :] + ) grain_param_list = np.vstack( - [rand_e, - np.zeros((3, ngrains)), - np.tile(const.identity_6x1, (ngrains, 1)).T] - ).T + [ + rand_e, + np.zeros((3, ngrains)), + np.tile(const.identity_6x1, (ngrains, 1)).T, + ] + ).T sim_results = instr.simulate_rotation_series( - plane_data, grain_param_list, - eta_ranges=np.radians(eta_ranges), - ome_ranges=np.radians(ome_ranges), - ome_period=np.radians(ome_period) + plane_data, + grain_param_list, + eta_ranges=np.radians(eta_ranges), + ome_ranges=np.radians(ome_ranges), + ome_period=np.radians(ome_period), ) refl_per_grain = np.zeros(ngrains) @@ -687,21 +682,20 @@ def create_clustering_parameters(cfg, eta_ome): for i, refl_ids in enumerate(sim_result[0]): refl_per_grain[i] += len(refl_ids) seed_refl_per_grain[i] += np.sum( - [sum(refl_ids == hkl_id) for hkl_id in seed_hkl_ids] - ) + [sum(refl_ids == hkl_id) for hkl_id in seed_hkl_ids] + ) min_samples = max( - int(np.floor(0.5*compl_thresh*min(seed_refl_per_grain))), - 2 + int(np.floor(0.5 * compl_thresh * min(seed_refl_per_grain))), 2 ) mean_rpg = int(np.round(np.average(refl_per_grain))) return min_samples, mean_rpg -def find_orientations(cfg, - hkls=None, clean=False, profile=False, - use_direct_testing=False): +def find_orientations( + cfg, hkls=None, clean=False, profile=False, use_direct_testing=False +): """ @@ -758,16 +752,17 @@ def find_orientations(cfg, if use_direct_testing: npdiv_DFLT = 2 params = dict( - plane_data=plane_data, - instrument=instr, - imgser_dict=imsd, - tth_tol=tth_tol, - eta_tol=eta_tol, - ome_tol=ome_tol, - eta_ranges=np.radians(eta_ranges), - ome_period=np.radians(ome_period), - npdiv=npdiv_DFLT, - threshold=image_threshold) + plane_data=plane_data, + instrument=instr, + imgser_dict=imsd, + tth_tol=tth_tol, + eta_tol=eta_tol, + ome_tol=ome_tol, + eta_ranges=np.radians(eta_ranges), + ome_period=np.radians(ome_period), + npdiv=npdiv_DFLT, + threshold=image_threshold, + ) logger.info("\tusing direct search on %d processes", ncpus) @@ -776,36 +771,33 @@ def find_orientations(cfg, # doing seeded search logger.info("Will perform seeded search") logger.info( - "\tgenerating search quaternion list using %d processes", - ncpus + "\tgenerating search quaternion list using %d processes", ncpus ) start = timeit.default_timer() # need maps - eta_ome = load_eta_ome_maps(cfg, plane_data, imsd, - hkls=hkls, clean=clean) + eta_ome = load_eta_ome_maps( + cfg, plane_data, imsd, hkls=hkls, clean=clean + ) # generate trial orientations qfib = generate_orientation_fibers(cfg, eta_ome) - logger.info("\t\t...took %f seconds", - timeit.default_timer() - start) + logger.info( + "\t\t...took %f seconds", timeit.default_timer() - start + ) else: # doing grid search try: qfib = np.load(cfg.find_orientations.use_quaternion_grid) - except(IOError): + except IOError: raise RuntimeError( "specified quaternion grid file '%s' not found!" % cfg.find_orientations.use_quaternion_grid ) # execute direct search - pool = mp.Pool( - ncpus, - indexer.test_orientation_FF_init, - (params, ) - ) + pool = mp.Pool(ncpus, indexer.test_orientation_FF_init, (params,)) completeness = pool.map(indexer.test_orientation_FF_reduced, qfib.T) pool.close() pool.join() @@ -815,26 +807,27 @@ def find_orientations(cfg, start = timeit.default_timer() # handle eta-ome maps - eta_ome = load_eta_ome_maps(cfg, plane_data, imsd, - hkls=hkls, clean=clean) + eta_ome = load_eta_ome_maps( + cfg, plane_data, imsd, hkls=hkls, clean=clean + ) # handle search space if cfg.find_orientations.use_quaternion_grid is None: # doing seeded search logger.info( - "\tgenerating search quaternion list using %d processes", - ncpus + "\tgenerating search quaternion list using %d processes", ncpus ) start = timeit.default_timer() qfib = generate_orientation_fibers(cfg, eta_ome) - logger.info("\t\t...took %f seconds", - timeit.default_timer() - start) + logger.info( + "\t\t...took %f seconds", timeit.default_timer() - start + ) else: # doing grid search try: qfib = np.load(cfg.find_orientations.use_quaternion_grid) - except(IOError): + except IOError: raise RuntimeError( "specified quaternion grid file '%s' not found!" % cfg.find_orientations.use_quaternion_grid @@ -842,8 +835,9 @@ def find_orientations(cfg, # do map-based indexing start = timeit.default_timer() - logger.info("will test %d quaternions using %d processes", - qfib.shape[1], ncpus) + logger.info( + "will test %d quaternions using %d processes", qfib.shape[1], ncpus + ) completeness = indexer.paintGrid( qfib, @@ -854,19 +848,21 @@ def find_orientations(cfg, omePeriod=np.radians(cfg.find_orientations.omega.period), threshold=on_map_threshold, doMultiProc=ncpus > 1, - nCPUs=ncpus - ) - logger.info("\t\t...took %f seconds", - timeit.default_timer() - start) + nCPUs=ncpus, + ) + logger.info("\t\t...took %f seconds", timeit.default_timer() - start) completeness = np.array(completeness) - logger.info("\tSaving %d scored orientations with max completeness %f%%", - qfib.shape[1], 100*np.max(completeness)) + logger.info( + "\tSaving %d scored orientations with max completeness %f%%", + qfib.shape[1], + 100 * np.max(completeness), + ) results = {} results['scored_orientations'] = { 'test_quaternions': qfib, - 'score': completeness + 'score': completeness, } # ========================================================================= @@ -887,10 +883,14 @@ def find_orientations(cfg, logger.info("\tneighborhood size: %d", min_samples) qbar, cl = run_cluster( - completeness, qfib, plane_data.q_sym, cfg, + completeness, + qfib, + plane_data.q_sym, + cfg, min_samples=min_samples, compl_thresh=compl_thresh, - radius=cl_radius) + radius=cl_radius, + ) logger.info("\t\t...took %f seconds", (timeit.default_timer() - start)) logger.info("\tfound %d grains", qbar.shape[1]) diff --git a/hexrd/hedm/fitgrains.py b/hexrd/hedm/fitgrains.py index 461aaa639..57ebba311 100644 --- a/hexrd/hedm/fitgrains.py +++ b/hexrd/hedm/fitgrains.py @@ -107,17 +107,23 @@ def fit_grain_FF_reduced(grain_id): for tols in zip(tth_tol, eta_tol, ome_tol): complvec, results = instrument.pull_spots( - plane_data, grain_params, + plane_data, + grain_params, imgser_dict, tth_tol=tols[0], eta_tol=tols[1], ome_tol=tols[2], - npdiv=npdiv, threshold=threshold, + npdiv=npdiv, + threshold=threshold, eta_ranges=eta_ranges, ome_period=ome_period, - dirname=analysis_dirname, filename=spots_filename, + dirname=analysis_dirname, + filename=spots_filename, return_spot_list=False, - quiet=True, check_only=False, interp='nearest') + quiet=True, + check_only=False, + interp='nearest', + ) # ======= DETERMINE VALID REFLECTIONS ======= @@ -153,8 +159,9 @@ def fit_grain_FF_reduced(grain_id): # find unsaturated spots on this panel unsat_spots = np.ones(len(valid_refl_ids), dtype=bool) if panel.saturation_level is not None: - unsat_spots[valid_refl_ids] = \ + unsat_spots[valid_refl_ids] = ( max_int[valid_refl_ids] < panel.saturation_level + ) idx = np.logical_and(valid_refl_ids, unsat_spots) @@ -163,15 +170,15 @@ def fit_grain_FF_reduced(grain_id): try: ot = np.load( os.path.join( - analysis_dirname, os.path.join( - det_key, OVERLAP_TABLE_FILE - ) + analysis_dirname, + os.path.join(det_key, OVERLAP_TABLE_FILE), ) ) for key in ot.keys(): for this_table in ot[key]: these_overlaps = np.where( - this_table[:, 0] == grain_id)[0] + this_table[:, 0] == grain_id + )[0] if len(these_overlaps) > 0: mark_these = np.array( this_table[these_overlaps, 1], dtype=int @@ -183,7 +190,7 @@ def fit_grain_FF_reduced(grain_id): overlaps[otidx] = True idx = np.logical_and(idx, ~overlaps) # logger.info("found overlap table for '%s'", det_key) - except(IOError, IndexError): + except (IOError, IndexError): # logger.info("no overlap table found for '%s'", det_key) pass @@ -198,7 +205,7 @@ def fit_grain_FF_reduced(grain_id): # try: completeness = num_refl_valid / float(num_refl_tot) - except(ZeroDivisionError): + except ZeroDivisionError: raise RuntimeError( "simulated number of relfections is 0; " + "check instrument config or grain parameters" @@ -206,38 +213,51 @@ def fit_grain_FF_reduced(grain_id): # ======= DO LEASTSQ FIT ======= - if num_refl_valid <= 12: # not enough reflections to fit... exit + if num_refl_valid <= 12: # not enough reflections to fit... exit warnings.warn( f'Not enough valid reflections ({num_refl_valid}) to fit, ' f'exiting', - RuntimeWarning + RuntimeWarning, ) return grain_id, completeness, np.inf, grain_params else: grain_params = fitGrain( - grain_params, instrument, culled_results, - plane_data.latVecOps['B'], plane_data.wavelength - ) + grain_params, + instrument, + culled_results, + plane_data.latVecOps['B'], + plane_data.wavelength, + ) # get chisq # TODO: do this while evaluating fit??? chisq = objFuncFitGrain( - grain_params[gFlag_ref], grain_params, gFlag_ref, - instrument, - culled_results, - plane_data.latVecOps['B'], plane_data.wavelength, - ome_period, - simOnly=False, return_value_flag=2) + grain_params[gFlag_ref], + grain_params, + gFlag_ref, + instrument, + culled_results, + plane_data.latVecOps['B'], + plane_data.wavelength, + ome_period, + simOnly=False, + return_value_flag=2, + ) if refit is not None: # first get calculated x, y, ome from previous solution # NOTE: this result is a dict xyo_det_fit_dict = objFuncFitGrain( - grain_params[gFlag_ref], grain_params, gFlag_ref, + grain_params[gFlag_ref], + grain_params, + gFlag_ref, instrument, culled_results, - plane_data.latVecOps['B'], plane_data.wavelength, + plane_data.latVecOps['B'], + plane_data.wavelength, ome_period, - simOnly=True, return_value_flag=2) + simOnly=True, + return_value_flag=2, + ) # make dict to contain new culled results culled_results_r = dict.fromkeys(culled_results) @@ -250,7 +270,7 @@ def fit_grain_FF_reduced(grain_id): continue ims = next(iter(imgser_dict.values())) # grab first for the omes - ome_step = sum(np.r_[-1, 1]*ims.metadata['omega'][0, :]) + ome_step = sum(np.r_[-1, 1] * ims.metadata['omega'][0, :]) xyo_det = np.atleast_2d( np.vstack([np.r_[x[7], x[6][-1]] for x in presults]) @@ -258,25 +278,25 @@ def fit_grain_FF_reduced(grain_id): xyo_det_fit = xyo_det_fit_dict[det_key] - xpix_tol = refit[0]*panel.pixel_size_col - ypix_tol = refit[0]*panel.pixel_size_row - fome_tol = refit[1]*ome_step + xpix_tol = refit[0] * panel.pixel_size_col + ypix_tol = refit[0] * panel.pixel_size_row + fome_tol = refit[1] * ome_step # define difference vectors for spot fits x_diff = abs(xyo_det[:, 0] - xyo_det_fit['calc_xy'][:, 0]) y_diff = abs(xyo_det[:, 1] - xyo_det_fit['calc_xy'][:, 1]) ome_diff = np.degrees( - rotations.angularDifference(xyo_det[:, 2], - xyo_det_fit['calc_omes']) + rotations.angularDifference( + xyo_det[:, 2], xyo_det_fit['calc_omes'] ) + ) # filter out reflections with centroids more than # a pixel and delta omega away from predicted value idx_new = np.logical_and( x_diff <= xpix_tol, - np.logical_and(y_diff <= ypix_tol, - ome_diff <= fome_tol) - ) + np.logical_and(y_diff <= ypix_tol, ome_diff <= fome_tol), + ) # attach to proper dict entry culled_results_r[det_key] = [ @@ -288,28 +308,37 @@ def fit_grain_FF_reduced(grain_id): # only execute fit if left with enough reflections if num_refl_valid > 12: grain_params = fitGrain( - grain_params, instrument, culled_results_r, - plane_data.latVecOps['B'], plane_data.wavelength + grain_params, + instrument, + culled_results_r, + plane_data.latVecOps['B'], + plane_data.wavelength, ) # get chisq # TODO: do this while evaluating fit??? chisq = objFuncFitGrain( - grain_params[gFlag_ref], - grain_params, gFlag_ref, - instrument, - culled_results_r, - plane_data.latVecOps['B'], plane_data.wavelength, - ome_period, - simOnly=False, return_value_flag=2) + grain_params[gFlag_ref], + grain_params, + gFlag_ref, + instrument, + culled_results_r, + plane_data.latVecOps['B'], + plane_data.wavelength, + ome_period, + simOnly=False, + return_value_flag=2, + ) return grain_id, completeness, chisq, grain_params -def fit_grains(cfg, - grains_table, - show_progress=False, - ids_to_refine=None, - write_spots_files=True, - check_if_canceled_func=None): +def fit_grains( + cfg, + grains_table, + show_progress=False, + ids_to_refine=None, + write_spots_files=True, + check_if_canceled_func=None, +): """ Performs optimization of grain parameters. @@ -340,7 +369,7 @@ def fit_grains(cfg, # handle omega period # !!! we assume all detector ims have the same ome ranges, so any will do! oims = next(iter(imsd.values())) - ome_period = np.radians(oims.omega[0, 0] + np.r_[0., 360.]) + ome_period = np.radians(oims.omega[0, 0] + np.r_[0.0, 360.0]) # number of processes ncpus = cfg.multiprocessing @@ -353,20 +382,21 @@ def fit_grains(cfg, spots_filename = SPOTS_OUT_FILE if write_spots_files else None params = dict( - grains_table=grains_table, - plane_data=cfg.material.plane_data, - instrument=instr, - imgser_dict=imsd, - tth_tol=cfg.fit_grains.tolerance.tth, - eta_tol=cfg.fit_grains.tolerance.eta, - ome_tol=cfg.fit_grains.tolerance.omega, - npdiv=cfg.fit_grains.npdiv, - refit=cfg.fit_grains.refit, - threshold=threshold, - eta_ranges=eta_ranges, - ome_period=ome_period, - analysis_dirname=cfg.analysis_dir, - spots_filename=spots_filename) + grains_table=grains_table, + plane_data=cfg.material.plane_data, + instrument=instr, + imgser_dict=imsd, + tth_tol=cfg.fit_grains.tolerance.tth, + eta_tol=cfg.fit_grains.tolerance.eta, + ome_tol=cfg.fit_grains.tolerance.omega, + npdiv=cfg.fit_grains.npdiv, + refit=cfg.fit_grains.refit, + threshold=threshold, + eta_ranges=eta_ranges, + ome_period=ome_period, + analysis_dirname=cfg.analysis_dir, + spots_filename=spots_filename, + ) # ===================================================================== # EXECUTE MP FIT @@ -378,27 +408,25 @@ def fit_grains(cfg, start = timeit.default_timer() fit_grain_FF_init(params) fit_results = list( - map(fit_grain_FF_reduced, - np.array(grains_table[:, 0], dtype=int)) + map(fit_grain_FF_reduced, np.array(grains_table[:, 0], dtype=int)) ) fit_grain_FF_cleanup() elapsed = timeit.default_timer() - start else: nproc = min(ncpus, len(grains_table)) - chunksize = max(1, len(grains_table)//ncpus) - logger.info("\tstarting fit on %d processes with chunksize %d", - nproc, chunksize) - start = timeit.default_timer() - pool = multiprocessing.Pool( + chunksize = max(1, len(grains_table) // ncpus) + logger.info( + "\tstarting fit on %d processes with chunksize %d", nproc, - fit_grain_FF_init, - (params, ) + chunksize, ) + start = timeit.default_timer() + pool = multiprocessing.Pool(nproc, fit_grain_FF_init, (params,)) async_result = pool.map_async( fit_grain_FF_reduced, np.array(grains_table[:, 0], dtype=int), - chunksize=chunksize + chunksize=chunksize, ) while not async_result.ready(): if check_if_canceled_func and check_if_canceled_func(): diff --git a/hexrd/hedm/fitting/calibration/grain.py b/hexrd/hedm/fitting/calibration/grain.py index 6044a5c13..0995e5c44 100644 --- a/hexrd/hedm/fitting/calibration/grain.py +++ b/hexrd/hedm/fitting/calibration/grain.py @@ -7,7 +7,9 @@ from hexrd.core.transforms import xfcapi from ....core.fitting.calibration.abstract_grain import AbstractGrainCalibrator -from ....core.fitting.calibration.lmfit_param_handling import DEFAULT_EULER_CONVENTION +from ....core.fitting.calibration.lmfit_param_handling import ( + DEFAULT_EULER_CONVENTION, +) from .. import grains as grainutil logger = logging.getLogger(__name__) @@ -15,14 +17,27 @@ class GrainCalibrator(AbstractGrainCalibrator): """This is for HEDM grain calibration""" + type = 'grain' - def __init__(self, instr, material, grain_params, ome_period, - index=0, default_refinements=None, calibration_picks=None, - euler_convention=DEFAULT_EULER_CONVENTION): + def __init__( + self, + instr, + material, + grain_params, + ome_period, + index=0, + default_refinements=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + ): super().__init__( - instr, material, grain_params, default_refinements, - calibration_picks, euler_convention, + instr, + material, + grain_params, + default_refinements, + calibration_picks, + euler_convention, ) self.ome_period = ome_period self.index = index @@ -58,22 +73,32 @@ def residual(self): pick_hkls_dict, pick_xys_dict = self._evaluate() return sxcal_obj_func( - [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, - self.bmatx, self.ome_period + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.ome_period, ) def model(self): pick_hkls_dict, pick_xys_dict = self._evaluate() return sxcal_obj_func( - [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, - self.bmatx, self.ome_period, sim_only=True + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.ome_period, + sim_only=True, ) # Objective function for multigrain fitting -def sxcal_obj_func(grain_params, instr, xyo_det, hkls_idx, - bmat, ome_period, sim_only=False): +def sxcal_obj_func( + grain_params, instr, xyo_det, hkls_idx, bmat, ome_period, sim_only=False +): ngrains = len(grain_params) # assign some useful params @@ -108,7 +133,7 @@ def sxcal_obj_func(grain_params, instr, xyo_det, hkls_idx, xy_unwarped[det_key].append(xyo[:, :2]) meas_omes[det_key].append(xyo[:, 2]) - if panel.distortion is not None: # do unwarping + if panel.distortion is not None: # do unwarping xy_unwarped[det_key][ig] = panel.distortion.apply( xy_unwarped[det_key][ig] ) @@ -128,22 +153,28 @@ def sxcal_obj_func(grain_params, instr, xyo_det, hkls_idx, ghat_c = np.dot(rmat_c.T, ghat_s) match_omes, calc_omes_tmp = grainutil.matchOmegas( - xyo, ghkls.T, - chi, rmat_c, bmat, wavelength, + xyo, + ghkls.T, + chi, + rmat_c, + bmat, + wavelength, vInv=vinv_s, beamVec=bvec, - omePeriod=ome_period) + omePeriod=ome_period, + ) rmat_s_arr = xfcapi.make_sample_rmat( chi, np.ascontiguousarray(calc_omes_tmp) ) calc_xy_tmp = xfcapi.gvec_to_xy( - ghat_c.T, rmat_d, rmat_s_arr, rmat_c, - tvec_d, tvec_s, tvec_c + ghat_c.T, rmat_d, rmat_s_arr, rmat_c, tvec_d, tvec_s, tvec_c ) if np.any(np.isnan(calc_xy_tmp)): - logger.warning("infeasible parameters: may want to scale back " - "finite difference step size") + logger.warning( + "infeasible parameters: may want to scale back " + "finite difference step size" + ) calc_omes[det_key].append(calc_omes_tmp) calc_xy[det_key].append(calc_xy_tmp) @@ -178,7 +209,6 @@ def sxcal_obj_func(grain_params, instr, xyo_det, hkls_idx, diff_vecs_xy = calc_xy_all - meas_xy_all diff_ome = angularDifference(calc_omes_all, meas_omes_all) retval = np.hstack( - [diff_vecs_xy, - diff_ome.reshape(npts_tot, 1)] + [diff_vecs_xy, diff_ome.reshape(npts_tot, 1)] ).flatten() return retval diff --git a/hexrd/hedm/fitting/calibration/multigrain.py b/hexrd/hedm/fitting/calibration/multigrain.py index afa1ef198..9e7bf59a2 100644 --- a/hexrd/hedm/fitting/calibration/multigrain.py +++ b/hexrd/hedm/fitting/calibration/multigrain.py @@ -15,22 +15,33 @@ logger.setLevel('INFO') # grains +# fmt: off grain_flags_DFLT = np.array( [1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0], dtype=bool ) +# fmt: on -ext_eta_tol = np.radians(5.) # for HEDM cal, may make this a user param +ext_eta_tol = np.radians(5.0) # for HEDM cal, may make this a user param def calibrate_instrument_from_sx( - instr, grain_params, bmat, xyo_det, hkls_idx, - param_flags=None, grain_flags=None, - ome_period=None, - xtol=cnst.sqrt_epsf, ftol=cnst.sqrt_epsf, - factor=10., sim_only=False, use_robust_lsq=False): + instr, + grain_params, + bmat, + xyo_det, + hkls_idx, + param_flags=None, + grain_flags=None, + ome_period=None, + xtol=cnst.sqrt_epsf, + ftol=cnst.sqrt_epsf, + factor=10.0, + sim_only=False, + use_robust_lsq=False, +): """ arguments xyo_det, hkls_idx are DICTs over panels @@ -51,8 +62,7 @@ def calibrate_instrument_from_sx( for det_key in instr.detectors: for ig in range(ngrains): xyo_det[det_key][ig][:, 2] = rotations.mapAngle( - xyo_det[det_key][ig][:, 2], - ome_period + xyo_det[det_key][ig][:, 2], ome_period ) # first grab the instrument parameters @@ -66,25 +76,35 @@ def calibrate_instrument_from_sx( if grain_flags is None: grain_flags = np.tile(grain_flags_DFLT, ngrains) - plist_full = np.concatenate( - [plist_full, np.hstack(grain_params)] - ) + plist_full = np.concatenate([plist_full, np.hstack(grain_params)]) plf_copy = np.copy(plist_full) # concatenate refinement flags refine_flags = np.hstack([param_flags, grain_flags]) plist_fit = plist_full[refine_flags] - fit_args = (plist_full, - param_flags, grain_flags, - instr, xyo_det, hkls_idx, - bmat, ome_period) + fit_args = ( + plist_full, + param_flags, + grain_flags, + instr, + xyo_det, + hkls_idx, + bmat, + ome_period, + ) if sim_only: return sxcal_obj_func( - plist_fit, plist_full, - param_flags, grain_flags, - instr, xyo_det, hkls_idx, - bmat, ome_period, - sim_only=True) + plist_fit, + plist_full, + param_flags, + grain_flags, + instr, + xyo_det, + hkls_idx, + bmat, + ome_period, + sim_only=True, + ) else: logger.info("Set up to refine:") for i in np.where(refine_flags)[0]: @@ -93,9 +113,13 @@ def calibrate_instrument_from_sx( # run optimization if use_robust_lsq: result = least_squares( - sxcal_obj_func, plist_fit, args=fit_args, - xtol=xtol, ftol=ftol, - loss='soft_l1', method='trf' + sxcal_obj_func, + plist_fit, + args=fit_args, + xtol=xtol, + ftol=ftol, + loss='soft_l1', + method='trf', ) x = result.x resd = result.fun @@ -104,9 +128,13 @@ def calibrate_instrument_from_sx( else: # do least squares problem x, cov_x, infodict, mesg, ierr = leastsq( - sxcal_obj_func, plist_fit, args=fit_args, - factor=factor, xtol=xtol, ftol=ftol, - full_output=1 + sxcal_obj_func, + plist_fit, + args=fit_args, + factor=factor, + xtol=xtol, + ftol=ftol, + full_output=1, ) resd = infodict['fvec'] if ierr not in [1, 2, 3, 4]: @@ -121,11 +149,17 @@ def calibrate_instrument_from_sx( # run simulation with optimized results sim_final = sxcal_obj_func( - x, plist_full, - param_flags, grain_flags, - instr, xyo_det, hkls_idx, - bmat, ome_period, - sim_only=True) + x, + plist_full, + param_flags, + grain_flags, + instr, + xyo_det, + hkls_idx, + bmat, + ome_period, + sim_only=True, + ) # ??? reset instrument here? instr.update_from_parameter_list(fit_params) @@ -133,8 +167,10 @@ def calibrate_instrument_from_sx( # report final logger.info("Optimization Reults:") for i in np.where(refine_flags)[0]: - logger.info("\t%s = %1.7e --> %1.7e" - % (pnames[i], plf_copy[i], fit_params[i])) + logger.info( + "\t%s = %1.7e --> %1.7e" + % (pnames[i], plf_copy[i], fit_params[i]) + ) return fit_params, resd, sim_final @@ -162,9 +198,7 @@ def generate_parameter_names(instr, grain_params): # now add distortion if there if panel.distortion is not None: for j in range(len(panel.distortion.params)): - pnames.append( - '{:>24s}'.format('%s dparam[%d]' % (det_key, j)) - ) + pnames.append('{:>24s}'.format('%s dparam[%d]' % (det_key, j))) grain_params = np.atleast_2d(grain_params) for ig, grain in enumerate(grain_params): @@ -180,26 +214,31 @@ def generate_parameter_names(instr, grain_params): '{:>24s}'.format('grain %d vinv_s[2]' % ig), '{:>24s}'.format('grain %d vinv_s[3]' % ig), '{:>24s}'.format('grain %d vinv_s[4]' % ig), - '{:>24s}'.format('grain %d vinv_s[5]' % ig) + '{:>24s}'.format('grain %d vinv_s[5]' % ig), ] return pnames -def sxcal_obj_func(plist_fit, plist_full, - param_flags, grain_flags, - instr, xyo_det, hkls_idx, - bmat, ome_period, - sim_only=False, return_value_flag=None): - """ - """ +def sxcal_obj_func( + plist_fit, + plist_full, + param_flags, + grain_flags, + instr, + xyo_det, + hkls_idx, + bmat, + ome_period, + sim_only=False, + return_value_flag=None, +): + """ """ npi = len(instr.calibration_parameters) NP_GRN = 12 # stack flags and force bool repr - refine_flags = np.array( - np.hstack([param_flags, grain_flags]), - dtype=bool) + refine_flags = np.array(np.hstack([param_flags, grain_flags]), dtype=bool) # fill out full parameter list # !!! no scaling for now @@ -247,7 +286,7 @@ def sxcal_obj_func(plist_fit, plist_full, xy_unwarped[det_key].append(xyo[:, :2]) meas_omes[det_key].append(xyo[:, 2]) - if panel.distortion is not None: # do unwarping + if panel.distortion is not None: # do unwarping xy_unwarped[det_key][ig] = panel.distortion.apply( xy_unwarped[det_key][ig] ) @@ -267,22 +306,28 @@ def sxcal_obj_func(plist_fit, plist_full, ghat_c = np.dot(rmat_c.T, ghat_s) match_omes, calc_omes_tmp = grainutil.matchOmegas( - xyo, ghkls.T, - chi, rmat_c, bmat, wavelength, + xyo, + ghkls.T, + chi, + rmat_c, + bmat, + wavelength, vInv=vinv_s, beamVec=bvec, - omePeriod=ome_period) + omePeriod=ome_period, + ) rmat_s_arr = xfcapi.make_sample_rmat( chi, np.ascontiguousarray(calc_omes_tmp) ) calc_xy_tmp = xfcapi.gvec_to_xy( - ghat_c.T, rmat_d, rmat_s_arr, rmat_c, - tvec_d, tvec_s, tvec_c + ghat_c.T, rmat_d, rmat_s_arr, rmat_c, tvec_d, tvec_s, tvec_c ) if np.any(np.isnan(calc_xy_tmp)): - logger.warning("infeasible parameters: may want to scale back " - "finite difference step size") + logger.warning( + "infeasible parameters: may want to scale back " + "finite difference step size" + ) calc_omes[det_key].append(calc_omes_tmp) calc_xy[det_key].append(calc_xy_tmp) @@ -317,18 +362,17 @@ def sxcal_obj_func(plist_fit, plist_full, diff_vecs_xy = calc_xy_all - meas_xy_all diff_ome = rotations.angularDifference(calc_omes_all, meas_omes_all) retval = np.hstack( - [diff_vecs_xy, - diff_ome.reshape(npts_tot, 1)] + [diff_vecs_xy, diff_ome.reshape(npts_tot, 1)] ).flatten() if return_value_flag == 1: retval = sum(abs(retval)) elif return_value_flag == 2: - denom = npts_tot - len(plist_fit) - 1. + denom = npts_tot - len(plist_fit) - 1.0 if denom != 0: - nu_fac = 1. / denom + nu_fac = 1.0 / denom else: - nu_fac = 1. - nu_fac = 1 / (npts_tot - len(plist_fit) - 1.) + nu_fac = 1.0 + nu_fac = 1 / (npts_tot - len(plist_fit) - 1.0) retval = nu_fac * sum(retval**2) return retval @@ -346,15 +390,14 @@ def parse_reflection_tables(cfg, instr, grain_ids, refit_idx=None): idx_0[det_key] = [] for ig, grain_id in enumerate(grain_ids): spots_filename = os.path.join( - cfg.analysis_dir, os.path.join( - det_key, 'spots_%05d.out' % grain_id - ) + cfg.analysis_dir, + os.path.join(det_key, 'spots_%05d.out' % grain_id), ) # load pull_spots output table gtable = np.loadtxt(spots_filename, ndmin=2) if len(gtable) == 0: - gtable = np.nan*np.ones((1, 17)) + gtable = np.nan * np.ones((1, 17)) # apply conditions for accepting valid data valid_reflections = gtable[:, 0] >= 0 # is indexed @@ -362,30 +405,37 @@ def parse_reflection_tables(cfg, instr, grain_ids, refit_idx=None): # throw away extremem etas p90 = rotations.angularDifference(gtable[:, 8], cnst.piby2) m90 = rotations.angularDifference(gtable[:, 8], -cnst.piby2) - accept_etas = np.logical_or(p90 > ext_eta_tol, - m90 > ext_eta_tol) + accept_etas = np.logical_or(p90 > ext_eta_tol, m90 > ext_eta_tol) logger.info(f"panel '{det_key}', grain {grain_id}") - logger.info(f"{sum(valid_reflections)} of {len(gtable)} " - "reflections are indexed") - logger.info(f"{sum(not_saturated)} of {sum(valid_reflections)}" - " valid reflections be are below" + - f" saturation threshold of {panel.saturation_level}") - logger.info(f"{sum(accept_etas)} of {len(gtable)}" - " reflections be are greater than " + - f" {np.degrees(ext_eta_tol)} from the rotation axis") + logger.info( + f"{sum(valid_reflections)} of {len(gtable)} " + "reflections are indexed" + ) + logger.info( + f"{sum(not_saturated)} of {sum(valid_reflections)}" + " valid reflections be are below" + + f" saturation threshold of {panel.saturation_level}" + ) + logger.info( + f"{sum(accept_etas)} of {len(gtable)}" + " reflections be are greater than " + + f" {np.degrees(ext_eta_tol)} from the rotation axis" + ) # valid reflections index if refit_idx is None: idx = np.logical_and( valid_reflections, - np.logical_and(not_saturated, accept_etas) + np.logical_and(not_saturated, accept_etas), ) idx_0[det_key].append(idx) else: idx = refit_idx[det_key][ig] idx_0[det_key].append(idx) - logger.info(f"input reflection specify {sum(idx)} of " - f"{len(gtable)} total valid reflections") + logger.info( + f"input reflection specify {sum(idx)} of " + f"{len(gtable)} total valid reflections" + ) hkls[det_key].append(gtable[idx, 2:5]) meas_omes = gtable[idx, 12].reshape(sum(idx), 1) diff --git a/hexrd/hedm/fitting/grains.py b/hexrd/hedm/fitting/grains.py index 9549f1c05..034bfe8c0 100644 --- a/hexrd/hedm/fitting/grains.py +++ b/hexrd/hedm/fitting/grains.py @@ -19,7 +19,7 @@ bVec_ref = constants.beam_vec eta_ref = constants.eta_vec -vInv_ref = np.r_[1., 1., 1., 0., 0., 0.] +vInv_ref = np.r_[1.0, 1.0, 1.0, 0.0, 0.0, 0.0] # for grain parameters @@ -27,11 +27,19 @@ gScl_ref = np.ones(12, dtype=bool) -def fitGrain(gFull, instrument, reflections_dict, - bMat, wavelength, - gFlag=gFlag_ref, gScl=gScl_ref, - omePeriod=None, - factor=0.1, xtol=sqrt_epsf, ftol=sqrt_epsf): +def fitGrain( + gFull, + instrument, + reflections_dict, + bMat, + wavelength, + gFlag=gFlag_ref, + gScl=gScl_ref, + omePeriod=None, + factor=0.1, + xtol=sqrt_epsf, + ftol=sqrt_epsf, +): """ Perform least-squares optimization of grain parameters. @@ -78,11 +86,24 @@ def fitGrain(gFull, instrument, reflections_dict, gFit = gFull[gFlag] - fitArgs = (gFull, gFlag, instrument, reflections_dict, - bMat, wavelength, omePeriod) - results = optimize.leastsq(objFuncFitGrain, gFit, args=fitArgs, - diag=1./gScl[gFlag].flatten(), - factor=0.1, xtol=xtol, ftol=ftol) + fitArgs = ( + gFull, + gFlag, + instrument, + reflections_dict, + bMat, + wavelength, + omePeriod, + ) + results = optimize.leastsq( + objFuncFitGrain, + gFit, + args=fitArgs, + diag=1.0 / gScl[gFlag].flatten(), + factor=0.1, + xtol=xtol, + ftol=ftol, + ) gFit_opt = results[0] @@ -91,13 +112,18 @@ def fitGrain(gFull, instrument, reflections_dict, return retval -def objFuncFitGrain(gFit, gFull, gFlag, - instrument, - reflections_dict, - bMat, wavelength, - omePeriod, - simOnly=False, - return_value_flag=return_value_flag): +def objFuncFitGrain( + gFit, + gFull, + gFlag, + instrument, + reflections_dict, + bMat, + wavelength, + omePeriod, + simOnly=False, + return_value_flag=return_value_flag, +): """ Calculate residual between measured and simulated ff-HEDM G-vectors. @@ -182,7 +208,8 @@ def objFuncFitGrain(gFit, gFull, gFlag, det_keys_ordered.append(det_key) rMat_d, tVec_d, chi, tVec_s = extract_detector_transformation( - instrument.detector_parameters[det_key]) + instrument.detector_parameters[det_key] + ) results = reflections_dict[det_key] if len(results) == 0: @@ -204,9 +231,7 @@ def objFuncFitGrain(gFit, gFull, gFlag, # WARNING: hkls and derived vectors below must be columnwise; # strictly necessary??? change affected APIs instead? # - hkls = np.atleast_2d( - np.vstack([x[2] for x in results]) - ).T + hkls = np.atleast_2d(np.vstack([x[2] for x in results])).T meas_xyo = np.atleast_2d( np.vstack([np.r_[x[7], x[6][-1]] for x in results]) @@ -236,19 +261,33 @@ def objFuncFitGrain(gFit, gFull, gFlag, # !!!: check that this operates on UNWARPED xy match_omes, calc_omes = matchOmegas( - meas_xyo, hkls, chi, rMat_c, bMat, wavelength, - vInv=vInv_s, beamVec=bVec, etaVec=eVec, - omePeriod=omePeriod) + meas_xyo, + hkls, + chi, + rMat_c, + bMat, + wavelength, + vInv=vInv_s, + beamVec=bVec, + etaVec=eVec, + omePeriod=omePeriod, + ) # append to omes dict calc_omes_dict[det_key] = calc_omes # TODO: try Numba implementations rMat_s = xfcapi.make_sample_rmat(chi, calc_omes) - calc_xy = xfcapi.gvec_to_xy(gHat_c.T, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beam_vec=bVec) + calc_xy = xfcapi.gvec_to_xy( + gHat_c.T, + rMat_d, + rMat_s, + rMat_c, + tVec_d, + tVec_s, + tVec_c, + beam_vec=bVec, + ) # append to xy dict calc_xy_dict[det_key] = calc_xy @@ -265,8 +304,9 @@ def objFuncFitGrain(gFit, gFull, gFlag, npts = len(meas_xyo_all) if np.any(np.isnan(calc_xy)): raise RuntimeError( - "infeasible pFull: may want to scale" + - "back finite difference step size") + "infeasible pFull: may want to scale" + + "back finite difference step size" + ) # return values if simOnly: @@ -276,8 +316,10 @@ def objFuncFitGrain(gFit, gFull, gFlag, else: rd = dict.fromkeys(det_keys_ordered) for det_key in det_keys_ordered: - rd[det_key] = {'calc_xy': calc_xy_dict[det_key], - 'calc_omes': calc_omes_dict[det_key]} + rd[det_key] = { + 'calc_xy': calc_xy_dict[det_key], + 'calc_omes': calc_omes_dict[det_key], + } retval = rd else: # return residual vector @@ -286,27 +328,34 @@ def objFuncFitGrain(gFit, gFull, gFlag, diff_ome = rotations.angularDifference( calc_omes_all, meas_xyo_all[:, 2] ) - retval = np.hstack([diff_vecs_xy, - diff_ome.reshape(npts, 1) - ]).flatten() + retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1)]).flatten() if return_value_flag == 1: # return scalar sum of squared residuals retval = sum(abs(retval)) elif return_value_flag == 2: # return DOF-normalized chisq # TODO: check this calculation - denom = 3*npts - len(gFit) - 1. + denom = 3 * npts - len(gFit) - 1.0 if denom != 0: - nu_fac = 1. / denom + nu_fac = 1.0 / denom else: - nu_fac = 1. + nu_fac = 1.0 retval = nu_fac * sum(retval**2) return retval -def matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, - vInv=vInv_ref, beamVec=bVec_ref, etaVec=eta_ref, - omePeriod=None): +def matchOmegas( + xyo_det, + hkls_idx, + chi, + rMat_c, + bMat, + wavelength, + vInv=vInv_ref, + beamVec=bVec_ref, + etaVec=eta_ref, + omePeriod=None, +): """ For a given list of (x, y, ome) points, outputs the index into the results from oscillAnglesOfHKLs, including the calculated omega values. @@ -318,10 +367,15 @@ def matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, meas_omes = xyo_det[:, 2] oangs0, oangs1 = xfcapi.oscill_angles_of_hkls( - hkls_idx.T, chi, rMat_c, bMat, wavelength, - v_inv=vInv, - beam_vec=beamVec, - eta_vec=etaVec) + hkls_idx.T, + chi, + rMat_c, + bMat, + wavelength, + v_inv=vInv, + beam_vec=beamVec, + eta_vec=etaVec, + ) if np.any(np.isnan(oangs0)): # debugging # TODO: remove this @@ -337,8 +391,12 @@ def matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, # CAPI version gives vstacked angles... must be (2, nhkls) calc_omes = np.vstack([oangs0[:, 2], oangs1[:, 2]]) if omePeriod is not None: - calc_omes = np.vstack([rotations.mapAngle(oangs0[:, 2], omePeriod), - rotations.mapAngle(oangs1[:, 2], omePeriod)]) + calc_omes = np.vstack( + [ + rotations.mapAngle(oangs0[:, 2], omePeriod), + rotations.mapAngle(oangs1[:, 2], omePeriod), + ] + ) # do angular difference diff_omes = rotations.angularDifference( np.tile(meas_omes, (2, 1)), calc_omes diff --git a/hexrd/hedm/grainmap/__init__.py b/hexrd/hedm/grainmap/__init__.py index 25873f0f5..d91f97694 100644 --- a/hexrd/hedm/grainmap/__init__.py +++ b/hexrd/hedm/grainmap/__init__.py @@ -1,28 +1,27 @@ # ============================================================ -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. # All rights reserved. -# +# # This file is part of HEXRD. For details on dowloading the source, # see the file COPYING. -# +# # Please also see the file LICENSE. -# +# # This program is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License (as published by the Free Software # Foundation) version 2.1 dated February 1999. -# +# # This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA or visit . # ============================================================ -"""Tools or X-ray diffraction analysis -""" +"""Tools or X-ray diffraction analysis""" diff --git a/hexrd/hedm/grainmap/nfutil.py b/hexrd/hedm/grainmap/nfutil.py index 8030622d8..7b2f348f4 100644 --- a/hexrd/hedm/grainmap/nfutil.py +++ b/hexrd/hedm/grainmap/nfutil.py @@ -40,6 +40,7 @@ rank = 0 try: from mpi4py import MPI + comm = MPI.COMM_WORLD world_size = comm.Get_size() rank = comm.Get_rank() @@ -56,7 +57,7 @@ try: import imageio as imgio -except(ImportError): +except ImportError: from skimage import io as imgio @@ -65,6 +66,7 @@ def load_instrument(yml): icfg = yaml.load(f, Loader=yaml.FullLoader) return instrument.HEDMInstrument(instrument_config=icfg) + # %% @@ -83,8 +85,13 @@ class ProcessController: track the results of the process as well as to provide clues of the progress of the process""" - def __init__(self, result_handler=None, progress_observer=None, ncpus=1, - chunk_size=100): + def __init__( + self, + result_handler=None, + progress_observer=None, + ncpus=1, + chunk_size=100, + ): self.rh = result_handler self.po = progress_observer self.ncpus = ncpus @@ -105,8 +112,12 @@ def finish(self, name): entry = self.timing.pop() assert name == entry[0] total = t - entry[2] - logging.info("%s took %8.3fs (%8.6fs per item).", - entry[0], total, total/entry[1]) + logging.info( + "%s took %8.3fs (%8.6fs per item).", + entry[0], + total, + total / entry[1], + ) def update(self, value): self.po.update(value) @@ -163,8 +174,9 @@ class ProgressBarProgressObserver: def start(self, name, count): from progressbar import ProgressBar, Percentage, Bar - self.pbar = ProgressBar(widgets=[name, Percentage(), Bar()], - maxval=count) + self.pbar = ProgressBar( + widgets=[name, Percentage(), Bar()], maxval=count + ) self.pbar.start() def update(self, value): @@ -187,6 +199,7 @@ def handle_result(self, key, value): def saving_result_handler(filename): """returns a result handler that saves the resulting arrays into a file with name filename""" + class SavingResultHandler: def __init__(self, file_name): self.filename = file_name @@ -216,6 +229,7 @@ def checking_result_handler(filename): match. A FULL PASS will happen when all existing results match """ + class CheckingResultHandler: def __init__(self, reference_file): """Checks the result against those save in 'reference_file'""" @@ -242,8 +256,9 @@ def handle_result(self, key, value): value = value.T check_len = min(len(reference), len(value)) - test_passed = np.allclose(value[:check_len], - reference[:check_len]) + test_passed = np.allclose( + value[:check_len], reference[:check_len] + ) if not test_passed: msg = "'{0}': FAIL" @@ -267,10 +282,11 @@ def handle_result(self, key, value): # %% OPTIMIZED BITS # ============================================================================= + # Some basic 3d algebra ======================================================= @numba.njit(nogil=True, cache=True) def _v3_dot(a, b): - return a[0]*b[0] + a[1]*b[1] + a[2]*b[2] + return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] @numba.njit(nogil=True, cache=True) @@ -278,9 +294,9 @@ def _m33_v3_multiply(m, v, dst): v0 = v[0] v1 = v[1] v2 = v[2] - dst[0] = m[0, 0]*v0 + m[0, 1]*v1 + m[0, 2]*v2 - dst[1] = m[1, 0]*v0 + m[1, 1]*v1 + m[1, 2]*v2 - dst[2] = m[2, 0]*v0 + m[2, 1]*v1 + m[2, 2]*v2 + dst[0] = m[0, 0] * v0 + m[0, 1] * v1 + m[0, 2] * v2 + dst[1] = m[1, 0] * v0 + m[1, 1] * v1 + m[1, 2] * v2 + dst[2] = m[2, 0] * v0 + m[2, 1] * v1 + m[2, 2] * v2 return dst @@ -290,8 +306,8 @@ def _v3_normalized(src, dst): v0 = src[0] v1 = src[1] v2 = src[2] - sqr_norm = v0*v0 + v1*v1 + v2*v2 - inv_norm = 1.0 if sqr_norm == 0.0 else 1./np.sqrt(sqr_norm) + sqr_norm = v0 * v0 + v1 * v1 + v2 * v2 + inv_norm = 1.0 if sqr_norm == 0.0 else 1.0 / np.sqrt(sqr_norm) dst[0] = v0 * inv_norm dst[1] = v1 * inv_norm @@ -306,21 +322,22 @@ def _make_binary_rot_mat(src, dst): v1 = src[1] v2 = src[2] - dst[0, 0] = 2.0*v0*v0 - 1.0 - dst[0, 1] = 2.0*v0*v1 - dst[0, 2] = 2.0*v0*v2 - dst[1, 0] = 2.0*v1*v0 - dst[1, 1] = 2.0*v1*v1 - 1.0 - dst[1, 2] = 2.0*v1*v2 - dst[2, 0] = 2.0*v2*v0 - dst[2, 1] = 2.0*v2*v1 - dst[2, 2] = 2.0*v2*v2 - 1.0 + dst[0, 0] = 2.0 * v0 * v0 - 1.0 + dst[0, 1] = 2.0 * v0 * v1 + dst[0, 2] = 2.0 * v0 * v2 + dst[1, 0] = 2.0 * v1 * v0 + dst[1, 1] = 2.0 * v1 * v1 - 1.0 + dst[1, 2] = 2.0 * v1 * v2 + dst[2, 0] = 2.0 * v2 * v0 + dst[2, 1] = 2.0 * v2 * v1 + dst[2, 2] = 2.0 * v2 * v2 - 1.0 return dst # code transcribed in numba from transforms module ============================ + # This is equivalent to the transform module anglesToGVec, but written in # numba. This should end in a module to share with other scripts @numba.njit(nogil=True, cache=True) @@ -328,29 +345,41 @@ def _anglesToGVec(angs, rMat_ss, rMat_c): """From a set of angles return them in crystal space""" result = np.empty_like(angs) for i in range(len(angs)): - cx = np.cos(0.5*angs[i, 0]) - sx = np.sin(0.5*angs[i, 0]) + cx = np.cos(0.5 * angs[i, 0]) + sx = np.sin(0.5 * angs[i, 0]) cy = np.cos(angs[i, 1]) sy = np.sin(angs[i, 1]) - g0 = cx*cy - g1 = cx*sy + g0 = cx * cy + g1 = cx * sy g2 = sx # with g being [cx*xy, cx*sy, sx] # result = dot(rMat_c, dot(rMat_ss[i], g)) - t0_0 = \ - rMat_ss[i, 0, 0]*g0 + rMat_ss[i, 1, 0]*g1 + rMat_ss[i, 2, 0]*g2 - t0_1 = \ - rMat_ss[i, 0, 1]*g0 + rMat_ss[i, 1, 1]*g1 + rMat_ss[i, 2, 1]*g2 - t0_2 = \ - rMat_ss[i, 0, 2]*g0 + rMat_ss[i, 1, 2]*g1 + rMat_ss[i, 2, 2]*g2 - - result[i, 0] = \ - rMat_c[0, 0]*t0_0 + rMat_c[1, 0]*t0_1 + rMat_c[2, 0]*t0_2 - result[i, 1] = \ - rMat_c[0, 1]*t0_0 + rMat_c[1, 1]*t0_1 + rMat_c[2, 1]*t0_2 - result[i, 2] = \ - rMat_c[0, 2]*t0_0 + rMat_c[1, 2]*t0_1 + rMat_c[2, 2]*t0_2 + t0_0 = ( + rMat_ss[i, 0, 0] * g0 + + rMat_ss[i, 1, 0] * g1 + + rMat_ss[i, 2, 0] * g2 + ) + t0_1 = ( + rMat_ss[i, 0, 1] * g0 + + rMat_ss[i, 1, 1] * g1 + + rMat_ss[i, 2, 1] * g2 + ) + t0_2 = ( + rMat_ss[i, 0, 2] * g0 + + rMat_ss[i, 1, 2] * g1 + + rMat_ss[i, 2, 2] * g2 + ) + + result[i, 0] = ( + rMat_c[0, 0] * t0_0 + rMat_c[1, 0] * t0_1 + rMat_c[2, 0] * t0_2 + ) + result[i, 1] = ( + rMat_c[0, 1] * t0_0 + rMat_c[1, 1] * t0_1 + rMat_c[2, 1] * t0_2 + ) + result[i, 2] = ( + rMat_c[0, 2] * t0_0 + rMat_c[1, 2] * t0_1 + rMat_c[2, 2] * t0_2 + ) return result @@ -361,13 +390,14 @@ def _anglesToGVec(angs, rMat_ss, rMat_c): # temporary arrays is not competitive with the stack allocation using in # the C version of the code (WiP) + # tC varies per coord # gvec_cs, rSm varies per grain # # gvec_cs @numba.njit(nogil=True, cache=True) def _gvec_to_detector_array(vG_sn, rD, rSn, rC, tD, tS, tC): - """ beamVec is the beam vector: (0, 0, -1) in this case """ + """beamVec is the beam vector: (0, 0, -1) in this case""" ztol = xrdutil.epsf p3_l = np.empty((3,)) tmp_vec = np.empty((3,)) @@ -409,8 +439,8 @@ def _gvec_to_detector_array(vG_sn, rD, rSn, rC, tD, tS, tC): result[i, 1] = np.nan continue - u = num/denom - tmp_res = u*tD_l - p3_minus_p1_l + u = num / denom + tmp_res = u * tD_l - p3_minus_p1_l result[i, 0] = _v3_dot(tmp_res, rD[:, 0]) result[i, 1] = _v3_dot(tmp_res, rD[:, 1]) @@ -418,8 +448,9 @@ def _gvec_to_detector_array(vG_sn, rD, rSn, rC, tD, tS, tC): @numba.njit(nogil=True, cache=True) -def _quant_and_clip_confidence(coords, angles, image, - base, inv_deltas, clip_vals, bsp): +def _quant_and_clip_confidence( + coords, angles, image, base, inv_deltas, clip_vals, bsp +): """quantize and clip the parametric coordinates in coords + angles coords - (..., 2) array: input 2d parametric coordinates @@ -444,9 +475,8 @@ def _quant_and_clip_confidence(coords, angles, image, xf = coords[i, 0] yf = coords[i, 1] - # does not count intensity which is covered by the beamstop dcp 5.13.21 - if np.abs(yf-bsp[0])<(bsp[1]/2.): + if np.abs(yf - bsp[0]) < (bsp[1] / 2.0): continue xf = np.floor((xf - base[0]) * inv_deltas[0]) @@ -476,22 +506,24 @@ def _quant_and_clip_confidence(coords, angles, image, if image[z, y, x]: matches += 1 - return 0 if in_sensor == 0 else float(matches)/float(in_sensor) + return 0 if in_sensor == 0 else float(matches) / float(in_sensor) # ============================================================================== # %% DIFFRACTION SIMULATION # ============================================================================== -def get_simulate_diffractions(grain_params, experiment, - cache_file='gold_cubes.npy', - controller=None): + +def get_simulate_diffractions( + grain_params, experiment, cache_file='gold_cubes.npy', controller=None +): """getter functions that handles the caching of the simulation""" try: image_stack = np.load(cache_file, mmap_mode='r', allow_pickle=False) except Exception: - image_stack = simulate_diffractions(grain_params, experiment, - controller=controller) + image_stack = simulate_diffractions( + grain_params, experiment, controller=controller + ) np.save(cache_file, image_stack) controller.handle_result('image_stack', image_stack) @@ -503,9 +535,11 @@ def simulate_diffractions(grain_params, experiment, controller): """actual forward simulation of the diffraction""" # use a packed array for the image_stack - array_dims = (experiment.nframes, - experiment.ncols, - ((experiment.nrows - 1)//8) + 1) + array_dims = ( + experiment.nframes, + experiment.ncols, + ((experiment.nrows - 1) // 8) + 1, + ) image_stack = np.zeros(array_dims, dtype=np.uint8) count = len(grain_params) @@ -518,7 +552,9 @@ def simulate_diffractions(grain_params, experiment, controller): tS = experiment.tVec_s distortion = experiment.distortion - eta_range = [(-np.pi, np.pi), ] + eta_range = [ + (-np.pi, np.pi), + ] ome_range = experiment.ome_range ome_period = (-np.pi, np.pi) @@ -542,11 +578,16 @@ def simulate_diffractions(grain_params, experiment, controller): ) all_angs[:, 2] = rotations.mapAngle(all_angs[:, 2], ome_period) - proj_pts = _project(all_angs, rD, rC, chi, tD, - tC, tS, distortion) + proj_pts = _project(all_angs, rD, rC, chi, tD, tC, tS, distortion) det_xy = proj_pts[0] - _write_pixels(det_xy, all_angs[:, 2], image_stack, experiment.base, - experiment.inv_deltas, experiment.clip_vals) + _write_pixels( + det_xy, + all_angs[:, 2], + image_stack, + experiment.base, + experiment.inv_deltas, + experiment.clip_vals, + ) controller.update(i + 1) @@ -559,15 +600,18 @@ def simulate_diffractions(grain_params, experiment, controller): # ============================================================================== -def get_dilated_image_stack(image_stack, experiment, controller, - cache_file='gold_cubes_dilated.npy'): +def get_dilated_image_stack( + image_stack, experiment, controller, cache_file='gold_cubes_dilated.npy' +): try: - dilated_image_stack = np.load(cache_file, mmap_mode='r', - allow_pickle=False) + dilated_image_stack = np.load( + cache_file, mmap_mode='r', allow_pickle=False + ) except Exception: - dilated_image_stack = dilate_image_stack(image_stack, experiment, - controller) + dilated_image_stack = dilate_image_stack( + image_stack, experiment, controller + ) np.save(cache_file, dilated_image_stack) return dilated_image_stack @@ -577,20 +621,19 @@ def dilate_image_stack(image_stack, experiment, controller): # first, perform image dilation =========================================== # perform image dilation (using scikit_image dilation) subprocess = 'dilate image_stack' - dilation_shape = np.ones((2*experiment.row_dilation + 1, - 2*experiment.col_dilation + 1), - dtype=np.uint8) + dilation_shape = np.ones( + (2 * experiment.row_dilation + 1, 2 * experiment.col_dilation + 1), + dtype=np.uint8, + ) image_stack_dilated = np.empty_like(image_stack) dilated = np.empty( - (image_stack.shape[-2], image_stack.shape[-1] << 3), - dtype=bool + (image_stack.shape[-2], image_stack.shape[-1] << 3), dtype=bool ) n_images = len(image_stack) controller.start(subprocess, n_images) for i_image in range(n_images): to_dilate = np.unpackbits(image_stack[i_image], axis=-1) - ski_dilation(to_dilate, dilation_shape, - out=dilated) + ski_dilation(to_dilate, dilation_shape, out=dilated) image_stack_dilated[i_image] = np.packbits(dilated, axis=-1) controller.update(i_image + 1) controller.finish(subprocess) @@ -607,6 +650,7 @@ def dilate_image_stack(image_stack, experiment, controller): # booleans, an array of uint8 could be used so the image is stored # with a bit per pixel. + @numba.njit(nogil=True, cache=True) def _write_pixels(coords, angles, image, base, inv_deltas, clip_vals): count = len(coords) @@ -625,7 +669,8 @@ def _write_pixels(coords, angles, image, base, inv_deltas, clip_vals): x_byte = x // 8 x_off = 7 - (x % 8) - image[z, y, x_byte] |= (1 << x_off) + image[z, y, x_byte] |= 1 << x_off + def get_offset_size(n_coords): offset = 0 @@ -640,6 +685,7 @@ def get_offset_size(n_coords): return (offset, size) + def gather_confidence(controller, confidence, n_grains, n_coords): if rank == 0: global_confidence = np.empty(n_grains * n_coords, dtype=np.float64) @@ -649,7 +695,9 @@ def gather_confidence(controller, confidence, n_grains, n_coords): # Calculate the send buffer sizes coords_per_rank = n_coords // world_size send_counts = np.full(world_size, coords_per_rank * n_grains) - send_counts[-1] = (n_coords - (coords_per_rank * (world_size-1))) * n_grains + send_counts[-1] = ( + n_coords - (coords_per_rank * (world_size - 1)) + ) * n_grains if rank == 0: # Time how long it takes to perform the MPI gather @@ -657,16 +705,25 @@ def gather_confidence(controller, confidence, n_grains, n_coords): # Transpose so the data will be more easily re-shaped into its final shape # Must be flattened as well so the underlying data is modified... - comm.Gatherv(confidence.T.flatten(), (global_confidence, send_counts), root=0) + comm.Gatherv( + confidence.T.flatten(), (global_confidence, send_counts), root=0 + ) if rank == 0: controller.finish('gather_confidence') confidence = global_confidence.reshape(n_coords, n_grains).T controller.handle_result("confidence", confidence) + # ============================================================================== # %% ORIENTATION TESTING # ============================================================================== -def test_orientations(image_stack, experiment, test_crds, controller, multiprocessing_start_method='fork'): +def test_orientations( + image_stack, + experiment, + test_crds, + controller, + multiprocessing_start_method='fork', +): """grand loop precomputing the grown image stack image-stack -- is the dilated image stack to be tested against. @@ -716,30 +773,43 @@ def test_orientations(image_stack, experiment, test_crds, controller, multiproce # grand loop ============================================================== # The near field simulation 'grand loop'. Where the bulk of computing is # performed. We are looking for a confidence matrix that has a n_grains - chunks = range(offset, offset+size, chunk_size) + chunks = range(offset, offset + size, chunk_size) subprocess = 'grand_loop' controller.start(subprocess, n_coords) finished = 0 ncpus = min(ncpus, len(chunks)) - logging.info(f'For {rank=}, {offset=}, {size=}, {chunks=}, {len(chunks)=}, {ncpus=}') + logging.info( + f'For {rank=}, {offset=}, {size=}, {chunks=}, {len(chunks)=}, {ncpus=}' + ) - logging.info('Checking confidence for %d coords, %d grains.', - n_coords, n_grains) + logging.info( + 'Checking confidence for %d coords, %d grains.', n_coords, n_grains + ) confidence = np.empty((n_grains, size)) if ncpus > 1: global _multiprocessing_start_method - _multiprocessing_start_method=multiprocessing_start_method - logging.info('Running multiprocess %d processes (%s)', - ncpus, _multiprocessing_start_method) - with grand_loop_pool(ncpus=ncpus, - state=(chunk_size, - image_stack, - all_angles, precomp, - test_crds, experiment)) as pool: - for rslice, rvalues in pool.imap_unordered(multiproc_inner_loop, - chunks): + _multiprocessing_start_method = multiprocessing_start_method + logging.info( + 'Running multiprocess %d processes (%s)', + ncpus, + _multiprocessing_start_method, + ) + with grand_loop_pool( + ncpus=ncpus, + state=( + chunk_size, + image_stack, + all_angles, + precomp, + test_crds, + experiment, + ), + ) as pool: + for rslice, rvalues in pool.imap_unordered( + multiproc_inner_loop, chunks + ): count = rvalues.shape[1] # We need to adjust this slice for the offset rslice = slice(rslice.start - offset, rslice.stop - offset) @@ -749,12 +819,15 @@ def test_orientations(image_stack, experiment, test_crds, controller, multiproce else: logging.info('Running in a single process') for chunk_start in chunks: - chunk_stop = min(n_coords, chunk_start+chunk_size) + chunk_stop = min(n_coords, chunk_start + chunk_size) rslice, rvalues = _grand_loop_inner( - image_stack, all_angles, - precomp, test_crds, experiment, + image_stack, + all_angles, + precomp, + test_crds, + experiment, start=chunk_start, - stop=chunk_stop + stop=chunk_stop, ) count = rvalues.shape[1] # We need to adjust this slice for the offset @@ -771,7 +844,6 @@ def test_orientations(image_stack, experiment, test_crds, controller, multiproce else: controller.handle_result("confidence", confidence) - return confidence @@ -794,17 +866,19 @@ def evaluate_diffraction_angles(experiment, controller=None): subprocess = 'evaluate diffraction angles' pbar = controller.start(subprocess, len(exp_maps)) all_angles = [] - ref_gparams = np.array([0., 0., 0., 1., 1., 1., 0., 0., 0.]) + ref_gparams = np.array([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]) for i, exp_map in enumerate(exp_maps): gparams = np.hstack([exp_map, ref_gparams]) - sim_results = xrdutil.simulateGVecs(plane_data, - detector_params, - gparams, - panel_dims=panel_dims_expanded, - pixel_pitch=pixel_size, - ome_range=ome_range, - ome_period=ome_period, - distortion=None) + sim_results = xrdutil.simulateGVecs( + plane_data, + detector_params, + gparams, + panel_dims=panel_dims_expanded, + pixel_pitch=pixel_size, + ome_range=ome_range, + ome_period=ome_period, + distortion=None, + ) all_angles.append(sim_results[2]) controller.update(i + 1) controller.finish(subprocess) @@ -812,8 +886,9 @@ def evaluate_diffraction_angles(experiment, controller=None): return all_angles -def _grand_loop_inner(image_stack, angles, precomp, - coords, experiment, start=0, stop=None): +def _grand_loop_inner( + image_stack, angles, precomp, coords, experiment, start=0, stop=None +): """Actual simulation code for a chunk of data. It will be used both, in single processor and multiprocessor cases. Chunking is performed on the coords. @@ -842,7 +917,7 @@ def _grand_loop_inner(image_stack, angles, precomp, inv_deltas = experiment.inv_deltas clip_vals = experiment.clip_vals distortion = experiment.distortion - bsp = experiment.bsp #beam stop vertical center and width + bsp = experiment.bsp # beam stop vertical center and width _to_detector = xfcapi.gvec_to_xy # _to_detector = _gvec_to_detector_array @@ -856,7 +931,7 @@ def _grand_loop_inner(image_stack, angles, precomp, acc_detector = 0.0 acc_distortion = 0.0 acc_quant_clip = 0.0 - confidence = np.zeros((n_angles, stop-start)) + confidence = np.zeros((n_angles, stop - start)) grains = 0 crds = 0 @@ -872,8 +947,15 @@ def _grand_loop_inner(image_stack, angles, precomp, gvec_cs, rD, rMat_ss, rC, tD, tS, coords[icrd] ) t1 = timeit.default_timer() - c = _quant_and_clip_confidence(det_xy, angs[:, 2], image_stack, - base, inv_deltas, clip_vals, bsp) + c = _quant_and_clip_confidence( + det_xy, + angs[:, 2], + image_stack, + base, + inv_deltas, + clip_vals, + bsp, + ) t2 = timeit.default_timer() acc_detector += t1 - t0 acc_quant_clip += t2 - t1 @@ -889,12 +971,19 @@ def _grand_loop_inner(image_stack, angles, precomp, t0 = timeit.default_timer() tmp_xys = _to_detector( gvec_cs, rD, rMat_ss, rC, tD, tS, coords[icrd] - ) #changed to tmp_xys from det_xy, dcp 2021_05_30 + ) # changed to tmp_xys from det_xy, dcp 2021_05_30 t1 = timeit.default_timer() det_xy = distortion_fn(tmp_xys, distortion_args, invert=True) t2 = timeit.default_timer() - c = _quant_and_clip_confidence(det_xy, angs[:, 2], image_stack, - base, inv_deltas, clip_vals,bsp) + c = _quant_and_clip_confidence( + det_xy, + angs[:, 2], + image_stack, + base, + inv_deltas, + clip_vals, + bsp, + ) t3 = timeit.default_timer() acc_detector += t1 - t0 acc_distortion += t2 - t1 @@ -926,6 +1015,7 @@ def generate_test_grid(low, top, samples): # would be less efficient in memory (as joblib memmaps by default the big # arrays, meaning they may be shared between processes). + def multiproc_inner_loop(chunk): """function to use in multiprocessing that computes the simulation over the task's alloted chunk of data""" @@ -935,7 +1025,7 @@ def multiproc_inner_loop(chunk): (offset, size) = get_offset_size(n_coords) - chunk_stop = min(offset+size, chunk+chunk_size) + chunk_stop = min(offset + size, chunk + chunk_size) return _grand_loop_inner(*_mp_state[1:], start=chunk, stop=chunk_stop) @@ -982,7 +1072,7 @@ def grand_loop_pool(ncpus, state): _mp_state = state pool = multiprocessing.Pool(ncpus) yield pool - del (_mp_state) + del _mp_state else: # Use SPAWN multiprocessing. @@ -994,19 +1084,23 @@ def grand_loop_pool(ncpus, state): # joblib). In theory, joblib uses memmap for arrays if they are not # compressed, so no compression is used for the bigger arrays. import joblib + tmp_dir = tempfile.mkdtemp(suffix='-nf-grand-loop') try: # dumb dumping doesn't seem to work very well.. do something ad-hoc logging.info('Using "%s" as temporary directory.', tmp_dir) - id_exp = joblib.dump(state[-1], - os.path.join(tmp_dir, - 'grand-loop-experiment.gz'), - compress=True) - id_state = joblib.dump(state[:-1], - os.path.join(tmp_dir, 'grand-loop-data')) - pool = multiprocessing.Pool(ncpus, worker_init, - (id_state[0], id_exp[0])) + id_exp = joblib.dump( + state[-1], + os.path.join(tmp_dir, 'grand-loop-experiment.gz'), + compress=True, + ) + id_state = joblib.dump( + state[:-1], os.path.join(tmp_dir, 'grand-loop-data') + ) + pool = multiprocessing.Pool( + ncpus, worker_init, (id_state[0], id_exp[0]) + ) yield pool finally: logging.info('Deleting "%s".', tmp_dir) @@ -1018,37 +1112,53 @@ def grand_loop_pool(ncpus, state): def gen_nf_test_grid(cross_sectional_dim, v_bnds, voxel_spacing): - Zs_list=np.arange(-cross_sectional_dim/2.+voxel_spacing/2.,cross_sectional_dim/2.,voxel_spacing) - Xs_list=np.arange(-cross_sectional_dim/2.+voxel_spacing/2.,cross_sectional_dim/2.,voxel_spacing) - + Zs_list = np.arange( + -cross_sectional_dim / 2.0 + voxel_spacing / 2.0, + cross_sectional_dim / 2.0, + voxel_spacing, + ) + Xs_list = np.arange( + -cross_sectional_dim / 2.0 + voxel_spacing / 2.0, + cross_sectional_dim / 2.0, + voxel_spacing, + ) - if v_bnds[0]==v_bnds[1]: - Xs,Ys,Zs=np.meshgrid(Xs_list,v_bnds[0],Zs_list) + if v_bnds[0] == v_bnds[1]: + Xs, Ys, Zs = np.meshgrid(Xs_list, v_bnds[0], Zs_list) else: - Xs,Ys,Zs=np.meshgrid(Xs_list,np.arange(v_bnds[0]+voxel_spacing/2.,v_bnds[1],voxel_spacing),Zs_list) - #note numpy shaping of arrays is goofy, returns(length(y),length(x),length(z)) - - + Xs, Ys, Zs = np.meshgrid( + Xs_list, + np.arange( + v_bnds[0] + voxel_spacing / 2.0, v_bnds[1], voxel_spacing + ), + Zs_list, + ) + # note numpy shaping of arrays is goofy, returns(length(y),length(x),length(z)) test_crds = np.vstack([Xs.flatten(), Ys.flatten(), Zs.flatten()]).T n_crds = len(test_crds) - return test_crds, n_crds, Xs, Ys, Zs def gen_nf_test_grid_tomo(x_dim_pnts, z_dim_pnts, v_bnds, voxel_spacing): - if v_bnds[0]==v_bnds[1]: - Xs,Ys,Zs=np.meshgrid(np.arange(x_dim_pnts),v_bnds[0],np.arange(z_dim_pnts)) + if v_bnds[0] == v_bnds[1]: + Xs, Ys, Zs = np.meshgrid( + np.arange(x_dim_pnts), v_bnds[0], np.arange(z_dim_pnts) + ) else: - Xs,Ys,Zs=np.meshgrid(np.arange(x_dim_pnts),np.arange(v_bnds[0]+voxel_spacing/2.,v_bnds[1],voxel_spacing),np.arange(z_dim_pnts)) - #note numpy shaping of arrays is goofy, returns(length(y),length(x),length(z)) - - - Zs=(Zs-(z_dim_pnts/2))*voxel_spacing - Xs=(Xs-(x_dim_pnts/2))*voxel_spacing + Xs, Ys, Zs = np.meshgrid( + np.arange(x_dim_pnts), + np.arange( + v_bnds[0] + voxel_spacing / 2.0, v_bnds[1], voxel_spacing + ), + np.arange(z_dim_pnts), + ) + # note numpy shaping of arrays is goofy, returns(length(y),length(x),length(z)) + Zs = (Zs - (z_dim_pnts / 2)) * voxel_spacing + Xs = (Xs - (x_dim_pnts / 2)) * voxel_spacing test_crds = np.vstack([Xs.flatten(), Ys.flatten(), Zs.flatten()]).T n_crds = len(test_crds) @@ -1058,22 +1168,38 @@ def gen_nf_test_grid_tomo(x_dim_pnts, z_dim_pnts, v_bnds, voxel_spacing): # %% -def gen_nf_dark(data_folder,img_nums,num_for_dark,nrows,ncols,dark_type='median',stem='nf_',num_digits=5,ext='.tif'): - dark_stack=np.zeros([num_for_dark,nrows,ncols]) +def gen_nf_dark( + data_folder, + img_nums, + num_for_dark, + nrows, + ncols, + dark_type='median', + stem='nf_', + num_digits=5, + ext='.tif', +): + + dark_stack = np.zeros([num_for_dark, nrows, ncols]) print('Loading data for dark generation...') for ii in np.arange(num_for_dark): print('Image #: ' + str(ii)) - dark_stack[ii,:,:]=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext) - #image_stack[ii,:,:]=np.flipud(tmp_img>threshold) + dark_stack[ii, :, :] = imgio.imread( + data_folder + + '%s' % (stem) + + str(img_nums[ii]).zfill(num_digits) + + ext + ) + # image_stack[ii,:,:]=np.flipud(tmp_img>threshold) - if dark_type=='median': + if dark_type == 'median': print('making median...') - dark=np.median(dark_stack,axis=0) - elif dark_type=='min': + dark = np.median(dark_stack, axis=0) + elif dark_type == 'min': print('making min...') - dark=np.min(dark_stack,axis=0) + dark = np.min(dark_stack, axis=0) return dark @@ -1081,49 +1207,77 @@ def gen_nf_dark(data_folder,img_nums,num_for_dark,nrows,ncols,dark_type='median' # %% -def gen_nf_cleaned_image_stack(data_folder,img_nums,dark,nrows,ncols, \ - process_type='gaussian',process_args=[4.5,5], \ - threshold=1.5,ome_dilation_iter=1,stem='nf_', \ - num_digits=5,ext='.tif'): +def gen_nf_cleaned_image_stack( + data_folder, + img_nums, + dark, + nrows, + ncols, + process_type='gaussian', + process_args=[4.5, 5], + threshold=1.5, + ome_dilation_iter=1, + stem='nf_', + num_digits=5, + ext='.tif', +): - image_stack=np.zeros([img_nums.shape[0],nrows,ncols],dtype=bool) + image_stack = np.zeros([img_nums.shape[0], nrows, ncols], dtype=bool) print('Loading and Cleaning Images...') - - if process_type=='gaussian': - sigma=process_args[0] - size=process_args[1].astype(int) #needs to be int + if process_type == 'gaussian': + sigma = process_args[0] + size = process_args[1].astype(int) # needs to be int for ii in np.arange(img_nums.shape[0]): print('Image #: ' + str(ii)) - tmp_img=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext)-dark - #image procesing + tmp_img = ( + imgio.imread( + data_folder + + '%s' % (stem) + + str(img_nums[ii]).zfill(num_digits) + + ext + ) + - dark + ) + # image procesing tmp_img = filters.gaussian(tmp_img, sigma=sigma) - tmp_img = img.morphology.grey_closing(tmp_img,size=(size,size)) + tmp_img = img.morphology.grey_closing(tmp_img, size=(size, size)) - binary_img = img.morphology.binary_fill_holes(tmp_img>threshold) - image_stack[ii,:,:]=binary_img + binary_img = img.morphology.binary_fill_holes(tmp_img > threshold) + image_stack[ii, :, :] = binary_img else: - num_erosions=process_args[0] - num_dilations=process_args[1] - + num_erosions = process_args[0] + num_dilations = process_args[1] for ii in np.arange(img_nums.shape[0]): print('Image #: ' + str(ii)) - tmp_img=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext)-dark - #image procesing - image_stack[ii,:,:]=img.morphology.binary_erosion(tmp_img>threshold,iterations=num_erosions) - image_stack[ii,:,:]=img.morphology.binary_dilation(image_stack[ii,:,:],iterations=num_dilations) - + tmp_img = ( + imgio.imread( + data_folder + + '%s' % (stem) + + str(img_nums[ii]).zfill(num_digits) + + ext + ) + - dark + ) + # image procesing + image_stack[ii, :, :] = img.morphology.binary_erosion( + tmp_img > threshold, iterations=num_erosions + ) + image_stack[ii, :, :] = img.morphology.binary_dilation( + image_stack[ii, :, :], iterations=num_dilations + ) - #%A final dilation that includes omega + # %A final dilation that includes omega print('Final Dilation Including Omega....') - image_stack=img.morphology.binary_dilation(image_stack,iterations=ome_dilation_iter) - + image_stack = img.morphology.binary_dilation( + image_stack, iterations=ome_dilation_iter + ) return image_stack @@ -1131,87 +1285,104 @@ def gen_nf_cleaned_image_stack(data_folder,img_nums,dark,nrows,ncols, \ # %% -def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp_thresh, chi2_thresh, misorientation_bnd, \ - misorientation_spacing,ome_range_deg, nframes, beam_stop_parms): - +def gen_trial_exp_data( + grain_out_file, + det_file, + mat_file, + mat_name, + max_tth, + comp_thresh, + chi2_thresh, + misorientation_bnd, + misorientation_spacing, + ome_range_deg, + nframes, + beam_stop_parms, +): print('Loading Grain Data...') - #gen_grain_data - ff_data=np.loadtxt(grain_out_file) - - #ff_data=np.atleast_2d(ff_data[2,:]) + # gen_grain_data + ff_data = np.loadtxt(grain_out_file) - exp_maps=ff_data[:,3:6] - t_vec_ds=ff_data[:,6:9] + # ff_data=np.atleast_2d(ff_data[2,:]) + exp_maps = ff_data[:, 3:6] + t_vec_ds = ff_data[:, 6:9] # - completeness=ff_data[:,1] + completeness = ff_data[:, 1] - chi2=ff_data[:,2] + chi2 = ff_data[:, 2] - n_grains=exp_maps.shape[0] + n_grains = exp_maps.shape[0] rMat_c = rotations.rotMatOfExpMap(exp_maps.T) - cut=np.where(np.logical_and(completeness>comp_thresh,chi2 comp_thresh, chi2 < chi2_thresh) + )[0] + exp_maps = exp_maps[cut, :] + t_vec_ds = t_vec_ds[cut, :] + chi2 = chi2[cut] # Add Misorientation - mis_amt=misorientation_bnd*np.pi/180. - spacing=misorientation_spacing*np.pi/180. - - mis_steps = int(misorientation_bnd/misorientation_spacing) + mis_amt = misorientation_bnd * np.pi / 180.0 + spacing = misorientation_spacing * np.pi / 180.0 - ori_pts = np.arange(-mis_amt, (mis_amt+(spacing*0.999)),spacing) - num_ori_grid_pts=ori_pts.shape[0]**3 - num_oris=exp_maps.shape[0] + mis_steps = int(misorientation_bnd / misorientation_spacing) + ori_pts = np.arange(-mis_amt, (mis_amt + (spacing * 0.999)), spacing) + num_ori_grid_pts = ori_pts.shape[0] ** 3 + num_oris = exp_maps.shape[0] XsO, YsO, ZsO = np.meshgrid(ori_pts, ori_pts, ori_pts) grid0 = np.vstack([XsO.flatten(), YsO.flatten(), ZsO.flatten()]).T - - exp_maps_expanded=np.zeros([num_ori_grid_pts*num_oris,3]) - t_vec_ds_expanded=np.zeros([num_ori_grid_pts*num_oris,3]) - + exp_maps_expanded = np.zeros([num_ori_grid_pts * num_oris, 3]) + t_vec_ds_expanded = np.zeros([num_ori_grid_pts * num_oris, 3]) for ii in np.arange(num_oris): - pts_to_use=np.arange(num_ori_grid_pts)+ii*num_ori_grid_pts - exp_maps_expanded[pts_to_use,:]=grid0+np.r_[exp_maps[ii,:] ] - t_vec_ds_expanded[pts_to_use,:]=np.r_[t_vec_ds[ii,:] ] + pts_to_use = np.arange(num_ori_grid_pts) + ii * num_ori_grid_pts + exp_maps_expanded[pts_to_use, :] = grid0 + np.r_[exp_maps[ii, :]] + t_vec_ds_expanded[pts_to_use, :] = np.r_[t_vec_ds[ii, :]] + exp_maps = exp_maps_expanded + t_vec_ds = t_vec_ds_expanded - exp_maps=exp_maps_expanded - t_vec_ds=t_vec_ds_expanded - - n_grains=exp_maps.shape[0] + n_grains = exp_maps.shape[0] rMat_c = rotations.rotMatOfExpMap(exp_maps.T) - print('Loading Instrument Data...') - ome_period_deg=(ome_range_deg[0][0], (ome_range_deg[0][0]+360.)) #degrees - ome_step_deg=(ome_range_deg[0][1]-ome_range_deg[0][0])/nframes #degrees - - - ome_period = (ome_period_deg[0]*np.pi/180.,ome_period_deg[1]*np.pi/180.) - ome_range = [(ome_range_deg[0][0]*np.pi/180.,ome_range_deg[0][1]*np.pi/180.)] - ome_step = ome_step_deg*np.pi/180. - - - - ome_edges = np.arange(nframes+1)*ome_step+ome_range[0][0]#fixed 2/26/17 + ome_period_deg = ( + ome_range_deg[0][0], + (ome_range_deg[0][0] + 360.0), + ) # degrees + ome_step_deg = ( + ome_range_deg[0][1] - ome_range_deg[0][0] + ) / nframes # degrees + + ome_period = ( + ome_period_deg[0] * np.pi / 180.0, + ome_period_deg[1] * np.pi / 180.0, + ) + ome_range = [ + ( + ome_range_deg[0][0] * np.pi / 180.0, + ome_range_deg[0][1] * np.pi / 180.0, + ) + ] + ome_step = ome_step_deg * np.pi / 180.0 + ome_edges = ( + np.arange(nframes + 1) * ome_step + ome_range[0][0] + ) # fixed 2/26/17 - instr=load_instrument(det_file) + instr = load_instrument(det_file) panel = next(iter(instr.detectors.values())) # !!! there is only 1 - # tranform paramters + # tranform paramters # Sample chi = instr.chi tVec_s = instr.tvec @@ -1228,8 +1399,7 @@ def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp ncols = panel.cols # panel dimensions - panel_dims = [tuple(panel.corner_ll), - tuple(panel.corner_ur)] + panel_dims = [tuple(panel.corner_ll), tuple(panel.corner_ur)] x_col_edges = panel.col_edge_vec y_row_edges = panel.row_edge_vec @@ -1242,13 +1412,15 @@ def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp # a different parametrization for the sensor # (makes for faster quantization) - base = np.array([x_col_edges[0], - y_row_edges[0], - ome_edges[0]]) - deltas = np.array([x_col_edges[1] - x_col_edges[0], - y_row_edges[1] - y_row_edges[0], - ome_edges[1] - ome_edges[0]]) - inv_deltas = 1.0/deltas + base = np.array([x_col_edges[0], y_row_edges[0], ome_edges[0]]) + deltas = np.array( + [ + x_col_edges[1] - x_col_edges[0], + y_row_edges[1] - y_row_edges[0], + ome_edges[1] - ome_edges[0], + ] + ) + inv_deltas = 1.0 / deltas clip_vals = np.array([ncols, nrows]) # # dilation @@ -1256,27 +1428,28 @@ def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp # row_dilation = int(np.ceil(0.5 * max_diameter/row_ps)) # col_dilation = int(np.ceil(0.5 * max_diameter/col_ps)) - - print('Loading Materials Data...') # crystallography data - beam_energy = valunits.valWUnit("beam_energy", "energy", instr.beam_energy, "keV") + beam_energy = valunits.valWUnit( + "beam_energy", "energy", instr.beam_energy, "keV" + ) beam_wavelength = constants.keVToAngstrom(beam_energy.getVal('keV')) - dmin = valunits.valWUnit("dmin", "length", - 0.5*beam_wavelength/np.sin(0.5*max_pixel_tth), - "angstrom") + dmin = valunits.valWUnit( + "dmin", + "length", + 0.5 * beam_wavelength / np.sin(0.5 * max_pixel_tth), + "angstrom", + ) # material loading - mats = material.load_materials_hdf5(mat_file, dmin=dmin,kev=beam_energy) + mats = material.load_materials_hdf5(mat_file, dmin=dmin, kev=beam_energy) pd = mats[mat_name].planeData if max_tth is not None: - pd.tThMax = np.amax(np.radians(max_tth)) + pd.tThMax = np.amax(np.radians(max_tth)) else: pd.tThMax = np.amax(max_pixel_tth) - - print('Final Assembly...') experiment = argparse.Namespace() # grains related information @@ -1309,62 +1482,109 @@ def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp experiment.clip_vals = clip_vals experiment.bsp = beam_stop_parms - - if mis_steps ==0: + if mis_steps == 0: nf_to_ff_id_map = cut else: - nf_to_ff_id_map=np.tile(cut,3**3*mis_steps) + nf_to_ff_id_map = np.tile(cut, 3**3 * mis_steps) return experiment, nf_to_ff_id_map -def process_raw_confidence(raw_confidence,vol_shape=None,id_remap=None,min_thresh=0.0): + +def process_raw_confidence( + raw_confidence, vol_shape=None, id_remap=None, min_thresh=0.0 +): print('Compiling Confidence Map...') if vol_shape == None: - confidence_map=np.max(raw_confidence,axis=0) - grain_map=np.argmax(raw_confidence,axis=0) + confidence_map = np.max(raw_confidence, axis=0) + grain_map = np.argmax(raw_confidence, axis=0) else: - confidence_map=np.max(raw_confidence,axis=0).reshape(vol_shape) - grain_map=np.argmax(raw_confidence,axis=0).reshape(vol_shape) - - - #fix grain indexing - not_indexed=np.where(confidence_map<=min_thresh) - grain_map[not_indexed] =-1 + confidence_map = np.max(raw_confidence, axis=0).reshape(vol_shape) + grain_map = np.argmax(raw_confidence, axis=0).reshape(vol_shape) + # fix grain indexing + not_indexed = np.where(confidence_map <= min_thresh) + grain_map[not_indexed] = -1 if id_remap is not None: - max_grain_no=np.max(grain_map) - grain_map_copy=copy.copy(grain_map) + max_grain_no = np.max(grain_map) + grain_map_copy = copy.copy(grain_map) print('Remapping grain ids to ff...') for ii in np.arange(max_grain_no): - this_grain=np.where(grain_map==ii) - grain_map_copy[this_grain]=id_remap[ii] - grain_map=grain_map_copy + this_grain = np.where(grain_map == ii) + grain_map_copy[this_grain] = id_remap[ii] + grain_map = grain_map_copy return grain_map.astype(int), confidence_map # %% -def save_raw_confidence(save_dir,save_stem,raw_confidence,id_remap=None): +def save_raw_confidence(save_dir, save_stem, raw_confidence, id_remap=None): print('Saving raw confidence, might take a while...') if id_remap is not None: - np.savez(save_dir+save_stem+'_raw_confidence.npz',raw_confidence=raw_confidence,id_remap=id_remap) + np.savez( + save_dir + save_stem + '_raw_confidence.npz', + raw_confidence=raw_confidence, + id_remap=id_remap, + ) else: - np.savez(save_dir+save_stem+'_raw_confidence.npz',raw_confidence=raw_confidence) + np.savez( + save_dir + save_stem + '_raw_confidence.npz', + raw_confidence=raw_confidence, + ) + + # %% -def save_nf_data(save_dir,save_stem,grain_map,confidence_map,Xs,Ys,Zs,ori_list,id_remap=None): + +def save_nf_data( + save_dir, + save_stem, + grain_map, + confidence_map, + Xs, + Ys, + Zs, + ori_list, + id_remap=None, +): print('Saving grain map data...') if id_remap is not None: - np.savez(save_dir+save_stem+'_grain_map_data.npz',grain_map=grain_map,confidence_map=confidence_map,Xs=Xs,Ys=Ys,Zs=Zs,ori_list=ori_list,id_remap=id_remap) + np.savez( + save_dir + save_stem + '_grain_map_data.npz', + grain_map=grain_map, + confidence_map=confidence_map, + Xs=Xs, + Ys=Ys, + Zs=Zs, + ori_list=ori_list, + id_remap=id_remap, + ) else: - np.savez(save_dir+save_stem+'_grain_map_data.npz',grain_map=grain_map,confidence_map=confidence_map,Xs=Xs,Ys=Ys,Zs=Zs,ori_list=ori_list) + np.savez( + save_dir + save_stem + '_grain_map_data.npz', + grain_map=grain_map, + confidence_map=confidence_map, + Xs=Xs, + Ys=Ys, + Zs=Zs, + ori_list=ori_list, + ) # %% -def scan_detector_parm(image_stack, experiment,test_crds,controller,parm_to_opt,parm_range,slice_shape,ang='deg'): + +def scan_detector_parm( + image_stack, + experiment, + test_crds, + controller, + parm_to_opt, + parm_range, + slice_shape, + ang='deg', +): # 0-distance # 1-x center # 2-y center @@ -1372,42 +1592,46 @@ def scan_detector_parm(image_stack, experiment,test_crds,controller,parm_to_opt, # 4-ytilt # 5-ztilt - parm_vector=np.arange(parm_range[0],parm_range[1]+1e-6,(parm_range[1]-parm_range[0])/parm_range[2]) + parm_vector = np.arange( + parm_range[0], + parm_range[1] + 1e-6, + (parm_range[1] - parm_range[0]) / parm_range[2], + ) - if parm_to_opt>2 and ang=='deg': - parm_vector=parm_vector*np.pi/180. + if parm_to_opt > 2 and ang == 'deg': + parm_vector = parm_vector * np.pi / 180.0 multiprocessing_start_method = 'fork' if hasattr(os, 'fork') else 'spawn' # current detector parameters, note the value for the actively optimized # parameters will be ignored - distance=experiment.detector_params[5]#mm - x_cen=experiment.detector_params[3]#mm - y_cen=experiment.detector_params[4]#mm - xtilt=experiment.detector_params[0] - ytilt=experiment.detector_params[1] - ztilt=experiment.detector_params[2] - ome_range=copy.copy(experiment.ome_range) - ome_period=copy.copy(experiment.ome_period) - ome_edges=copy.copy(experiment.ome_edges) + distance = experiment.detector_params[5] # mm + x_cen = experiment.detector_params[3] # mm + y_cen = experiment.detector_params[4] # mm + xtilt = experiment.detector_params[0] + ytilt = experiment.detector_params[1] + ztilt = experiment.detector_params[2] + ome_range = copy.copy(experiment.ome_range) + ome_period = copy.copy(experiment.ome_period) + ome_edges = copy.copy(experiment.ome_edges) - num_parm_pts=len(parm_vector) + num_parm_pts = len(parm_vector) - trial_data=np.zeros([num_parm_pts,slice_shape[0],slice_shape[1]]) + trial_data = np.zeros([num_parm_pts, slice_shape[0], slice_shape[1]]) - tmp_td=copy.copy(experiment.tVec_d) + tmp_td = copy.copy(experiment.tVec_d) for jj in np.arange(num_parm_pts): - print('cycle %d of %d'%(jj+1,num_parm_pts)) + print('cycle %d of %d' % (jj + 1, num_parm_pts)) # overwrite translation vector components - if parm_to_opt==0: - tmp_td[2]=parm_vector[jj] + if parm_to_opt == 0: + tmp_td[2] = parm_vector[jj] - if parm_to_opt==1: - tmp_td[0]=parm_vector[jj] + if parm_to_opt == 1: + tmp_td[0] = parm_vector[jj] - if parm_to_opt==2: - tmp_td[1]=parm_vector[jj] + if parm_to_opt == 2: + tmp_td[1] = parm_vector[jj] if parm_to_opt == 3: rMat_d_tmp = xfcapi.make_detector_rmat( @@ -1427,7 +1651,7 @@ def scan_detector_parm(image_stack, experiment,test_crds,controller,parm_to_opt, experiment.rMat_d = rMat_d_tmp experiment.tVec_d = tmp_td - if parm_to_opt==6: + if parm_to_opt == 6: experiment.ome_range = [ ( @@ -1447,24 +1671,34 @@ def scan_detector_parm(image_stack, experiment,test_crds,controller,parm_to_opt, # print(experiment.ome_edges) # print(experiment.base) - conf=test_orientations(image_stack, experiment,test_crds,controller, \ - multiprocessing_start_method) + conf = test_orientations( + image_stack, + experiment, + test_crds, + controller, + multiprocessing_start_method, + ) - trial_data[jj]=np.max(conf,axis=0).reshape(slice_shape) + trial_data[jj] = np.max(conf, axis=0).reshape(slice_shape) return trial_data, parm_vector + # %% -def plot_ori_map(grain_map, confidence_map, exp_maps, layer_no,mat,id_remap=None): - grains_plot=np.squeeze(grain_map[layer_no,:,:]) - conf_plot=np.squeeze(confidence_map[layer_no,:,:]) - n_grains=len(exp_maps) +def plot_ori_map( + grain_map, confidence_map, exp_maps, layer_no, mat, id_remap=None +): + + grains_plot = np.squeeze(grain_map[layer_no, :, :]) + conf_plot = np.squeeze(confidence_map[layer_no, :, :]) + n_grains = len(exp_maps) rgb_image = np.zeros( - [grains_plot.shape[0], grains_plot.shape[1], 4], dtype='float32') - rgb_image[:, :, 3] = 1. + [grains_plot.shape[0], grains_plot.shape[1], 4], dtype='float32' + ) + rgb_image[:, :, 3] = 1.0 for ii in np.arange(n_grains): if id_remap is not None: @@ -1477,25 +1711,32 @@ def plot_ori_map(grain_map, confidence_map, exp_maps, layer_no,mat,id_remap=None rmats = rotations.rotMatOfExpMap(ori) rgb = mat.unitcell.color_orientations( - rmats, ref_dir=np.array([0., 1., 0.])) + rmats, ref_dir=np.array([0.0, 1.0, 0.0]) + ) - #color mapping + # color mapping rgb_image[this_grain[0], this_grain[1], 0] = rgb[0][0] rgb_image[this_grain[0], this_grain[1], 1] = rgb[0][1] rgb_image[this_grain[0], this_grain[1], 2] = rgb[0][2] - - fig1 = plt.figure() plt.imshow(rgb_image, interpolation='none') plt.title('Layer %d Grain Map' % layer_no) - #plt.show() + # plt.show() plt.hold(True) - #fig2 = plt.figure() - plt.imshow(conf_plot, vmin=0.0, vmax=1., - interpolation='none', cmap=plt.cm.gray, alpha=0.5) + # fig2 = plt.figure() + plt.imshow( + conf_plot, + vmin=0.0, + vmax=1.0, + interpolation='none', + cmap=plt.cm.gray, + alpha=0.5, + ) plt.title('Layer %d Confidence Map' % layer_no) plt.show() + + # ============================================================================== # %% SCRIPT ENTRY AND PARAMETER HANDLING # ============================================================================== @@ -1550,12 +1791,15 @@ def plot_ori_map(grain_map, confidence_map, exp_maps, layer_no,mat,id_remap=None # return args -def build_controller(check=None,generate=None,ncpus=2,chunk_size=10,limit=None): +def build_controller( + check=None, generate=None, ncpus=2, chunk_size=10, limit=None +): # builds the controller to use based on the args # result handle try: import progressbar + progress_handler = progressbar_progress_observer() except ImportError: progress_handler = null_progress_observer() @@ -1564,7 +1808,8 @@ def build_controller(check=None,generate=None,ncpus=2,chunk_size=10,limit=None): if generate is not None: logging.warn( "generating and checking can not happen at the same time, " - + "going with checking") + + "going with checking" + ) result_handler = checking_result_handler(check) elif generate is not None: @@ -1576,71 +1821,99 @@ def build_controller(check=None,generate=None,ncpus=2,chunk_size=10,limit=None): # logging.warn("Multiprocessing on Windows is disabled for now") # args.ncpus = 1 - controller = ProcessController(result_handler, progress_handler, - ncpus=ncpus, - chunk_size=chunk_size) + controller = ProcessController( + result_handler, progress_handler, ncpus=ncpus, chunk_size=chunk_size + ) if limit is not None: controller.set_limit('coords', lambda x: min(x, limit)) return controller -def output_grain_map(data_location,data_stems,output_stem,vol_spacing,top_down=True,save_type=['npz']): - num_scans=len(data_stems) +def output_grain_map( + data_location, + data_stems, + output_stem, + vol_spacing, + top_down=True, + save_type=['npz'], +): - confidence_maps=[None]*num_scans - grain_maps=[None]*num_scans - Xss=[None]*num_scans - Yss=[None]*num_scans - Zss=[None]*num_scans + num_scans = len(data_stems) - if len(vol_spacing)==1: - vol_shifts=np.arange(0,vol_spacing[0]*num_scans+1e-12,vol_spacing[0]) - else: - vol_shifts=vol_spacing + confidence_maps = [None] * num_scans + grain_maps = [None] * num_scans + Xss = [None] * num_scans + Yss = [None] * num_scans + Zss = [None] * num_scans + if len(vol_spacing) == 1: + vol_shifts = np.arange( + 0, vol_spacing[0] * num_scans + 1e-12, vol_spacing[0] + ) + else: + vol_shifts = vol_spacing for ii in np.arange(num_scans): - print('Loading Volume %d ....'%(ii)) - conf_data=np.load(os.path.join(data_location,data_stems[ii]+'_grain_map_data.npz')) - - confidence_maps[ii]=conf_data['confidence_map'] - grain_maps[ii]=conf_data['grain_map'] - Xss[ii]=conf_data['Xs'] - Yss[ii]=conf_data['Ys'] - Zss[ii]=conf_data['Zs'] + print('Loading Volume %d ....' % (ii)) + conf_data = np.load( + os.path.join(data_location, data_stems[ii] + '_grain_map_data.npz') + ) - #assumes all volumes to be the same size - num_layers=grain_maps[0].shape[0] + confidence_maps[ii] = conf_data['confidence_map'] + grain_maps[ii] = conf_data['grain_map'] + Xss[ii] = conf_data['Xs'] + Yss[ii] = conf_data['Ys'] + Zss[ii] = conf_data['Zs'] - total_layers=num_layers*num_scans + # assumes all volumes to be the same size + num_layers = grain_maps[0].shape[0] - num_rows=grain_maps[0].shape[1] - num_cols=grain_maps[0].shape[2] + total_layers = num_layers * num_scans - grain_map_stitched=np.zeros((total_layers,num_rows,num_cols)) - confidence_stitched=np.zeros((total_layers,num_rows,num_cols)) - Xs_stitched=np.zeros((total_layers,num_rows,num_cols)) - Ys_stitched=np.zeros((total_layers,num_rows,num_cols)) - Zs_stitched=np.zeros((total_layers,num_rows,num_cols)) + num_rows = grain_maps[0].shape[1] + num_cols = grain_maps[0].shape[2] + grain_map_stitched = np.zeros((total_layers, num_rows, num_cols)) + confidence_stitched = np.zeros((total_layers, num_rows, num_cols)) + Xs_stitched = np.zeros((total_layers, num_rows, num_cols)) + Ys_stitched = np.zeros((total_layers, num_rows, num_cols)) + Zs_stitched = np.zeros((total_layers, num_rows, num_cols)) for ii in np.arange(num_scans): - if top_down==True: - grain_map_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=grain_maps[num_scans-1-ii] - confidence_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=confidence_maps[num_scans-1-ii] - Xs_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=\ - Xss[num_scans-1-ii] - Zs_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=\ - Zss[num_scans-1-ii] - Ys_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=Yss[num_scans-1-ii]+vol_shifts[ii] + if top_down == True: + grain_map_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = grain_maps[num_scans - 1 - ii] + confidence_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = confidence_maps[num_scans - 1 - ii] + Xs_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = Xss[num_scans - 1 - ii] + Zs_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = Zss[num_scans - 1 - ii] + Ys_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = (Yss[num_scans - 1 - ii] + vol_shifts[ii]) else: - grain_map_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=grain_maps[ii] - confidence_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=confidence_maps[ii] - Xs_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=Xss[ii] - Zs_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=Zss[ii] - Ys_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=Yss[ii]+vol_shifts[ii] + grain_map_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = grain_maps[ii] + confidence_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = confidence_maps[ii] + Xs_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = Xss[ii] + Zs_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = Zss[ii] + Ys_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = (Yss[ii] + vol_shifts[ii]) for ii in np.arange(len(save_type)): @@ -1655,31 +1928,34 @@ def output_grain_map(data_location,data_stems,output_stem,vol_spacing,top_down=T hf.create_dataset('Ys', data=Ys_stitched) hf.create_dataset('Zs', data=Zs_stitched) - elif save_type[ii]=='npz': + elif save_type[ii] == 'npz': print('Writing NPZ data...') - np.savez(output_stem + '_assembled.npz',\ - grain_map=grain_map_stitched,confidence=confidence_stitched, - Xs=Xs_stitched,Ys=Ys_stitched,Zs=Zs_stitched) - - elif save_type[ii]=='vtk': + np.savez( + output_stem + '_assembled.npz', + grain_map=grain_map_stitched, + confidence=confidence_stitched, + Xs=Xs_stitched, + Ys=Ys_stitched, + Zs=Zs_stitched, + ) + elif save_type[ii] == 'vtk': print('Writing VTK data...') # VTK Dump - Xslist=Xs_stitched[:,:,:].ravel() - Yslist=Ys_stitched[:,:,:].ravel() - Zslist=Zs_stitched[:,:,:].ravel() - - grainlist=grain_map_stitched[:,:,:].ravel() - conflist=confidence_stitched[:,:,:].ravel() + Xslist = Xs_stitched[:, :, :].ravel() + Yslist = Ys_stitched[:, :, :].ravel() + Zslist = Zs_stitched[:, :, :].ravel() - num_pts=Xslist.shape[0] - num_cells=(total_layers-1)*(num_rows-1)*(num_cols-1) + grainlist = grain_map_stitched[:, :, :].ravel() + conflist = confidence_stitched[:, :, :].ravel() - f = open(os.path.join(output_stem +'_assembled.vtk'), 'w') + num_pts = Xslist.shape[0] + num_cells = (total_layers - 1) * (num_rows - 1) * (num_cols - 1) + f = open(os.path.join(output_stem + '_assembled.vtk'), 'w') f.write('# vtk DataFile Version 3.0\n') f.write('grainmap Data\n') @@ -1688,28 +1964,29 @@ def output_grain_map(data_location,data_stems,output_stem,vol_spacing,top_down=T f.write('POINTS %d double\n' % (num_pts)) for i in np.arange(num_pts): - f.write('%e %e %e \n' %(Xslist[i],Yslist[i],Zslist[i])) - - scale2=num_cols*num_rows - scale1=num_cols - - f.write('CELLS %d %d\n' % (num_cells, 9*num_cells)) - for k in np.arange(Xs_stitched.shape[0]-1): - for j in np.arange(Xs_stitched.shape[1]-1): - for i in np.arange(Xs_stitched.shape[2]-1): - base=scale2*k+scale1*j+i - p1=base - p2=base+1 - p3=base+1+scale1 - p4=base+scale1 - p5=base+scale2 - p6=base+scale2+1 - p7=base+scale2+scale1+1 - p8=base+scale2+scale1 - - f.write('8 %d %d %d %d %d %d %d %d \n' \ - %(p1,p2,p3,p4,p5,p6,p7,p8)) - + f.write('%e %e %e \n' % (Xslist[i], Yslist[i], Zslist[i])) + + scale2 = num_cols * num_rows + scale1 = num_cols + + f.write('CELLS %d %d\n' % (num_cells, 9 * num_cells)) + for k in np.arange(Xs_stitched.shape[0] - 1): + for j in np.arange(Xs_stitched.shape[1] - 1): + for i in np.arange(Xs_stitched.shape[2] - 1): + base = scale2 * k + scale1 * j + i + p1 = base + p2 = base + 1 + p3 = base + 1 + scale1 + p4 = base + scale1 + p5 = base + scale2 + p6 = base + scale2 + 1 + p7 = base + scale2 + scale1 + 1 + p8 = base + scale2 + scale1 + + f.write( + '8 %d %d %d %d %d %d %d %d \n' + % (p1, p2, p3, p4, p5, p6, p7, p8) + ) f.write('CELL_TYPES %d \n' % (num_cells)) for i in np.arange(num_cells): @@ -1719,21 +1996,25 @@ def output_grain_map(data_location,data_stems,output_stem,vol_spacing,top_down=T f.write('SCALARS grain_id int \n') f.write('LOOKUP_TABLE default \n') for i in np.arange(num_pts): - f.write('%d \n' %(grainlist[i])) + f.write('%d \n' % (grainlist[i])) - f.write('FIELD FieldData 1 \n' ) + f.write('FIELD FieldData 1 \n') f.write('confidence 1 %d float \n' % (num_pts)) for i in np.arange(num_pts): - f.write('%e \n' %(conflist[i])) - + f.write('%e \n' % (conflist[i])) f.close() else: print('Not a valid save option, npz, vtk, or hdf5 allowed.') - return grain_map_stitched, confidence_stitched, Xs_stitched, Ys_stitched, \ - Zs_stitched + return ( + grain_map_stitched, + confidence_stitched, + Xs_stitched, + Ys_stitched, + Zs_stitched, + ) # # assume that if os has fork, it will be used by multiprocessing. diff --git a/hexrd/hedm/grainmap/tomoutil.py b/hexrd/hedm/grainmap/tomoutil.py index 1b55c976f..93c83a5ed 100644 --- a/hexrd/hedm/grainmap/tomoutil.py +++ b/hexrd/hedm/grainmap/tomoutil.py @@ -1,178 +1,263 @@ -#%% +# %% import numpy as np -#import scipy as sp + +# import scipy as sp import scipy.ndimage as img + try: import imageio as imgio -except(ImportError): +except ImportError: from skimage import io as imgio import skimage.transform as xformimg +# %% +def gen_bright_field( + tbf_data_folder, + tbf_img_start, + tbf_num_imgs, + nrows, + ncols, + stem='nf_', + num_digits=5, + ext='.tif', +): -#%% - + tbf_img_nums = np.arange(tbf_img_start, tbf_img_start + tbf_num_imgs, 1) -def gen_bright_field(tbf_data_folder,tbf_img_start,tbf_num_imgs,nrows,ncols,stem='nf_',num_digits=5,ext='.tif'): + tbf_stack = np.zeros([tbf_num_imgs, nrows, ncols]) - - tbf_img_nums=np.arange(tbf_img_start,tbf_img_start+tbf_num_imgs,1) - - - tbf_stack=np.zeros([tbf_num_imgs,nrows,ncols]) - print('Loading data for median bright field...') for ii in np.arange(tbf_num_imgs): print('Image #: ' + str(ii)) - tbf_stack[ii,:,:]=imgio.imread(tbf_data_folder+'%s'%(stem)+str(tbf_img_nums[ii]).zfill(num_digits)+ext) - #image_stack[ii,:,:]=np.flipud(tmp_img>threshold) + tbf_stack[ii, :, :] = imgio.imread( + tbf_data_folder + + '%s' % (stem) + + str(tbf_img_nums[ii]).zfill(num_digits) + + ext + ) + # image_stack[ii,:,:]=np.flipud(tmp_img>threshold) print('making median...') - - tbf=np.median(tbf_stack,axis=0) - + + tbf = np.median(tbf_stack, axis=0) + return tbf +def gen_median_image( + data_folder, + img_start, + num_imgs, + nrows, + ncols, + stem='nf_', + num_digits=5, + ext='.tif', +): -def gen_median_image(data_folder,img_start,num_imgs,nrows,ncols,stem='nf_',num_digits=5,ext='.tif'): + img_nums = np.arange(img_start, img_start + num_imgs, 1) + stack = np.zeros([num_imgs, nrows, ncols]) - img_nums=np.arange(img_start,img_start+num_imgs,1) - - - stack=np.zeros([num_imgs,nrows,ncols]) - print('Loading data for median image...') for ii in np.arange(num_imgs): print('Image #: ' + str(ii)) - stack[ii,:,:]=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext) - #image_stack[ii,:,:]=np.flipud(tmp_img>threshold) + stack[ii, :, :] = imgio.imread( + data_folder + + '%s' % (stem) + + str(img_nums[ii]).zfill(num_digits) + + ext + ) + # image_stack[ii,:,:]=np.flipud(tmp_img>threshold) print('making median...') - - med=np.median(stack,axis=0) - - return med - -def gen_attenuation_rads(tomo_data_folder,tbf,tomo_img_start,tomo_num_imgs,nrows,ncols,stem='nf_',num_digits=5,ext='.tif',tdf=None): - - - - #Reconstructs a single tompgrahy layer to find the extent of the sample - tomo_img_nums=np.arange(tomo_img_start,tomo_img_start+tomo_num_imgs,1) - - #if tdf==None: - if len(tdf) == None: - tdf=np.zeros([nrows,ncols]) - - rad_stack=np.zeros([tomo_num_imgs,nrows,ncols]) - + + med = np.median(stack, axis=0) + + return med + + +def gen_attenuation_rads( + tomo_data_folder, + tbf, + tomo_img_start, + tomo_num_imgs, + nrows, + ncols, + stem='nf_', + num_digits=5, + ext='.tif', + tdf=None, +): + + # Reconstructs a single tompgrahy layer to find the extent of the sample + tomo_img_nums = np.arange( + tomo_img_start, tomo_img_start + tomo_num_imgs, 1 + ) + + # if tdf==None: + if len(tdf) == None: + tdf = np.zeros([nrows, ncols]) + + rad_stack = np.zeros([tomo_num_imgs, nrows, ncols]) + print('Loading and Calculating Absorption Radiographs ...') for ii in np.arange(tomo_num_imgs): print('Image #: ' + str(ii)) - tmp_img=imgio.imread(tomo_data_folder+'%s'%(stem)+str(tomo_img_nums[ii]).zfill(num_digits)+ext) - - rad_stack[ii,:,:]=-np.log((tmp_img.astype(float)-tdf)/(tbf.astype(float)-tdf)) - + tmp_img = imgio.imread( + tomo_data_folder + + '%s' % (stem) + + str(tomo_img_nums[ii]).zfill(num_digits) + + ext + ) + + rad_stack[ii, :, :] = -np.log( + (tmp_img.astype(float) - tdf) / (tbf.astype(float) - tdf) + ) + return rad_stack - - -def tomo_reconstruct_layer(rad_stack,cross_sectional_dim,layer_row=1024,start_tomo_ang=0., end_tomo_ang=360.,tomo_num_imgs=360, center=0.,pixel_size=0.00148): - sinogram=np.squeeze(rad_stack[:,layer_row,:]) - - rotation_axis_pos=-int(np.round(center/pixel_size)) - #rotation_axis_pos=13 - - theta = np.linspace(start_tomo_ang, end_tomo_ang, tomo_num_imgs, endpoint=False) - - max_rad=int(cross_sectional_dim/pixel_size/2.*1.1) #10% slack to avoid edge effects - - if rotation_axis_pos>=0: - sinogram_cut=sinogram[:,2*rotation_axis_pos:] + + +def tomo_reconstruct_layer( + rad_stack, + cross_sectional_dim, + layer_row=1024, + start_tomo_ang=0.0, + end_tomo_ang=360.0, + tomo_num_imgs=360, + center=0.0, + pixel_size=0.00148, +): + sinogram = np.squeeze(rad_stack[:, layer_row, :]) + + rotation_axis_pos = -int(np.round(center / pixel_size)) + # rotation_axis_pos=13 + + theta = np.linspace( + start_tomo_ang, end_tomo_ang, tomo_num_imgs, endpoint=False + ) + + max_rad = int( + cross_sectional_dim / pixel_size / 2.0 * 1.1 + ) # 10% slack to avoid edge effects + + if rotation_axis_pos >= 0: + sinogram_cut = sinogram[:, 2 * rotation_axis_pos :] else: - sinogram_cut=sinogram[:,:(2*rotation_axis_pos)] - - dist_from_edge=np.round(sinogram_cut.shape[1]/2.).astype(int)-max_rad - - sinogram_cut=sinogram_cut[:,dist_from_edge:-dist_from_edge] - + sinogram_cut = sinogram[:, : (2 * rotation_axis_pos)] + + dist_from_edge = ( + np.round(sinogram_cut.shape[1] / 2.0).astype(int) - max_rad + ) + + sinogram_cut = sinogram_cut[:, dist_from_edge:-dist_from_edge] + print('Inverting Sinogram....') - reconstruction_fbp = xformimg.iradon(sinogram_cut.T, theta=theta, circle=True) - - reconstruction_fbp=np.rot90(reconstruction_fbp,3)#Rotation to get the result consistent with hexrd, needs to be checked - + reconstruction_fbp = xformimg.iradon( + sinogram_cut.T, theta=theta, circle=True + ) + + reconstruction_fbp = np.rot90( + reconstruction_fbp, 3 + ) # Rotation to get the result consistent with hexrd, needs to be checked + return reconstruction_fbp - -def threshold_and_clean_tomo_layer(reconstruction_fbp,recon_thresh, noise_obj_size,min_hole_size,edge_cleaning_iter=None,erosion_iter=1,dilation_iter=4): - binary_recon=reconstruction_fbp>recon_thresh - #hard coded cleaning, grinding sausage... - binary_recon=img.morphology.binary_dilation(binary_recon,iterations=dilation_iter) - binary_recon=img.morphology.binary_erosion(binary_recon,iterations=erosion_iter) - +def threshold_and_clean_tomo_layer( + reconstruction_fbp, + recon_thresh, + noise_obj_size, + min_hole_size, + edge_cleaning_iter=None, + erosion_iter=1, + dilation_iter=4, +): + binary_recon = reconstruction_fbp > recon_thresh + + # hard coded cleaning, grinding sausage... + binary_recon = img.morphology.binary_dilation( + binary_recon, iterations=dilation_iter + ) + binary_recon = img.morphology.binary_erosion( + binary_recon, iterations=erosion_iter + ) + + labeled_img, num_labels = img.label(binary_recon) - - labeled_img,num_labels=img.label(binary_recon) - print('Cleaning...') print('Removing Noise...') - for ii in np.arange(1,num_labels): - obj1=np.where(labeled_img==ii) - if obj1[0].shape[0]=1 and obj1[0].shape[0]= 1 and obj1[0].shape[0] < min_hole_size: + binary_recon[obj1[0], obj1[1]] = 1 if edge_cleaning_iter is not None: - binary_recon=img.morphology.binary_erosion(binary_recon,iterations=edge_cleaning_iter) - binary_recon=img.morphology.binary_dilation(binary_recon,iterations=edge_cleaning_iter) - + binary_recon = img.morphology.binary_erosion( + binary_recon, iterations=edge_cleaning_iter + ) + binary_recon = img.morphology.binary_dilation( + binary_recon, iterations=edge_cleaning_iter + ) + return binary_recon - -def crop_and_rebin_tomo_layer(binary_recon,recon_thresh,voxel_spacing,pixel_size,cross_sectional_dim,circular_mask_rad=None): - scaling=voxel_spacing/pixel_size - - rows=binary_recon.shape[0] - cols=binary_recon.shape[1] - - new_rows=np.round(rows/scaling).astype(int) - new_cols=np.round(cols/scaling).astype(int) - - tmp_resize=xformimg.resize(binary_recon,[new_rows,new_cols],preserve_range=True) - #tmp_resize_norm=tmp_resize/255 - tmp_resize_norm_force=np.floor(tmp_resize) - - binary_recon_bin=tmp_resize_norm_force.astype(bool) - - - cut_edge=int(np.round((binary_recon_bin.shape[0]*voxel_spacing-cross_sectional_dim)/2./voxel_spacing)) - - binary_recon_bin=binary_recon_bin[cut_edge:-cut_edge,cut_edge:-cut_edge] - - if circular_mask_rad is not None: - center = binary_recon_bin.shape[0]/2 - radius = np.round(circular_mask_rad/voxel_spacing) - nx,ny = binary_recon_bin.shape - y,x = np.ogrid[-center:nx-center,-center:ny-center] - mask = x*x + y*y > radius*radius - - binary_recon_bin[mask]=0 - return binary_recon_bin - +def crop_and_rebin_tomo_layer( + binary_recon, + recon_thresh, + voxel_spacing, + pixel_size, + cross_sectional_dim, + circular_mask_rad=None, +): + scaling = voxel_spacing / pixel_size + + rows = binary_recon.shape[0] + cols = binary_recon.shape[1] + + new_rows = np.round(rows / scaling).astype(int) + new_cols = np.round(cols / scaling).astype(int) + + tmp_resize = xformimg.resize( + binary_recon, [new_rows, new_cols], preserve_range=True + ) + # tmp_resize_norm=tmp_resize/255 + tmp_resize_norm_force = np.floor(tmp_resize) + + binary_recon_bin = tmp_resize_norm_force.astype(bool) + + cut_edge = int( + np.round( + (binary_recon_bin.shape[0] * voxel_spacing - cross_sectional_dim) + / 2.0 + / voxel_spacing + ) + ) + + binary_recon_bin = binary_recon_bin[cut_edge:-cut_edge, cut_edge:-cut_edge] + + if circular_mask_rad is not None: + center = binary_recon_bin.shape[0] / 2 + radius = np.round(circular_mask_rad / voxel_spacing) + nx, ny = binary_recon_bin.shape + y, x = np.ogrid[-center : nx - center, -center : ny - center] + mask = x * x + y * y > radius * radius + + binary_recon_bin[mask] = 0 + + return binary_recon_bin diff --git a/hexrd/hedm/grainmap/vtkutil.py b/hexrd/hedm/grainmap/vtkutil.py index 3af28e407..50c6ba426 100644 --- a/hexrd/hedm/grainmap/vtkutil.py +++ b/hexrd/hedm/grainmap/vtkutil.py @@ -3,124 +3,140 @@ import os +# %% -#%% +def output_grain_map_vtk( + data_location, data_stems, output_stem, vol_spacing, top_down=True +): -def output_grain_map_vtk(data_location,data_stems,output_stem,vol_spacing,top_down=True): + num_scans = len(data_stems) + confidence_maps = [None] * num_scans + grain_maps = [None] * num_scans + Xss = [None] * num_scans + Yss = [None] * num_scans + Zss = [None] * num_scans - - num_scans=len(data_stems) - - confidence_maps=[None]*num_scans - grain_maps=[None]*num_scans - Xss=[None]*num_scans - Yss=[None]*num_scans - Zss=[None]*num_scans - - for ii in np.arange(num_scans): - print('Loading Volume %d ....'%(ii)) - conf_data=np.load(os.path.join(data_location,data_stems[ii]+'_grain_map_data.npz')) - - confidence_maps[ii]=conf_data['confidence_map'] - grain_maps[ii]=conf_data['grain_map'] - Xss[ii]=conf_data['Xs'] - Yss[ii]=conf_data['Ys'] - Zss[ii]=conf_data['Zs'] - - #assumes all volumes to be the same size - num_layers=grain_maps[0].shape[0] - - total_layers=num_layers*num_scans - - num_rows=grain_maps[0].shape[1] - num_cols=grain_maps[0].shape[2] - - grain_map_stitched=np.zeros((total_layers,num_rows,num_cols)) - confidence_stitched=np.zeros((total_layers,num_rows,num_cols)) - Xs_stitched=np.zeros((total_layers,num_rows,num_cols)) - Ys_stitched=np.zeros((total_layers,num_rows,num_cols)) - Zs_stitched=np.zeros((total_layers,num_rows,num_cols)) - - + print('Loading Volume %d ....' % (ii)) + conf_data = np.load( + os.path.join(data_location, data_stems[ii] + '_grain_map_data.npz') + ) + + confidence_maps[ii] = conf_data['confidence_map'] + grain_maps[ii] = conf_data['grain_map'] + Xss[ii] = conf_data['Xs'] + Yss[ii] = conf_data['Ys'] + Zss[ii] = conf_data['Zs'] + + # assumes all volumes to be the same size + num_layers = grain_maps[0].shape[0] + + total_layers = num_layers * num_scans + + num_rows = grain_maps[0].shape[1] + num_cols = grain_maps[0].shape[2] + + grain_map_stitched = np.zeros((total_layers, num_rows, num_cols)) + confidence_stitched = np.zeros((total_layers, num_rows, num_cols)) + Xs_stitched = np.zeros((total_layers, num_rows, num_cols)) + Ys_stitched = np.zeros((total_layers, num_rows, num_cols)) + Zs_stitched = np.zeros((total_layers, num_rows, num_cols)) + for i in np.arange(num_scans): - if top_down==True: - grain_map_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=grain_maps[num_scans-1-i] - confidence_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=confidence_maps[num_scans-1-i] - Xs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Xss[num_scans-1-i] - Zs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Zss[num_scans-1-i] - Ys_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Yss[num_scans-1-i]+vol_spacing*i + if top_down == True: + grain_map_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = grain_maps[num_scans - 1 - i] + confidence_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = confidence_maps[num_scans - 1 - i] + Xs_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = Xss[num_scans - 1 - i] + Zs_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = Zss[num_scans - 1 - i] + Ys_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = (Yss[num_scans - 1 - i] + vol_spacing * i) else: - - grain_map_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=grain_maps[i] - confidence_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=confidence_maps[i] - Xs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Xss[i] - Zs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Zss[i] - Ys_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Yss[i]+vol_spacing*i - - - - + + grain_map_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = grain_maps[i] + confidence_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = confidence_maps[i] + Xs_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = Xss[i] + Zs_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = Zss[i] + Ys_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = (Yss[i] + vol_spacing * i) + print('Writing VTK data...') - # VTK Dump - Xslist=Xs_stitched[:,:,:].ravel() - Yslist=Ys_stitched[:,:,:].ravel() - Zslist=Zs_stitched[:,:,:].ravel() - - grainlist=grain_map_stitched[:,:,:].ravel() - conflist=confidence_stitched[:,:,:].ravel() - - num_pts=Xslist.shape[0] - num_cells=(total_layers-1)*(num_rows-1)*(num_cols-1) - - f = open(os.path.join(data_location, output_stem +'_stitch.vtk'), 'w') - - + # VTK Dump + Xslist = Xs_stitched[:, :, :].ravel() + Yslist = Ys_stitched[:, :, :].ravel() + Zslist = Zs_stitched[:, :, :].ravel() + + grainlist = grain_map_stitched[:, :, :].ravel() + conflist = confidence_stitched[:, :, :].ravel() + + num_pts = Xslist.shape[0] + num_cells = (total_layers - 1) * (num_rows - 1) * (num_cols - 1) + + f = open(os.path.join(data_location, output_stem + '_stitch.vtk'), 'w') + f.write('# vtk DataFile Version 3.0\n') f.write('grainmap Data\n') f.write('ASCII\n') f.write('DATASET UNSTRUCTURED_GRID\n') f.write('POINTS %d double\n' % (num_pts)) - + for i in np.arange(num_pts): - f.write('%e %e %e \n' %(Xslist[i],Yslist[i],Zslist[i])) - - scale2=num_cols*num_rows - scale1=num_cols - - f.write('CELLS %d %d\n' % (num_cells, 9*num_cells)) - for k in np.arange(Xs_stitched.shape[0]-1): - for j in np.arange(Xs_stitched.shape[1]-1): - for i in np.arange(Xs_stitched.shape[2]-1): - base=scale2*k+scale1*j+i - p1=base - p2=base+1 - p3=base+1+scale1 - p4=base+scale1 - p5=base+scale2 - p6=base+scale2+1 - p7=base+scale2+scale1+1 - p8=base+scale2+scale1 - - f.write('8 %d %d %d %d %d %d %d %d \n' %(p1,p2,p3,p4,p5,p6,p7,p8)) - - - f.write('CELL_TYPES %d \n' % (num_cells)) + f.write('%e %e %e \n' % (Xslist[i], Yslist[i], Zslist[i])) + + scale2 = num_cols * num_rows + scale1 = num_cols + + f.write('CELLS %d %d\n' % (num_cells, 9 * num_cells)) + for k in np.arange(Xs_stitched.shape[0] - 1): + for j in np.arange(Xs_stitched.shape[1] - 1): + for i in np.arange(Xs_stitched.shape[2] - 1): + base = scale2 * k + scale1 * j + i + p1 = base + p2 = base + 1 + p3 = base + 1 + scale1 + p4 = base + scale1 + p5 = base + scale2 + p6 = base + scale2 + 1 + p7 = base + scale2 + scale1 + 1 + p8 = base + scale2 + scale1 + + f.write( + '8 %d %d %d %d %d %d %d %d \n' + % (p1, p2, p3, p4, p5, p6, p7, p8) + ) + + f.write('CELL_TYPES %d \n' % (num_cells)) for i in np.arange(num_cells): - f.write('12 \n') - + f.write('12 \n') + f.write('POINT_DATA %d \n' % (num_pts)) - f.write('SCALARS grain_id int \n') - f.write('LOOKUP_TABLE default \n') + f.write('SCALARS grain_id int \n') + f.write('LOOKUP_TABLE default \n') for i in np.arange(num_pts): - f.write('%d \n' %(grainlist[i])) - - f.write('FIELD FieldData 1 \n' ) - f.write('confidence 1 %d float \n' % (num_pts)) + f.write('%d \n' % (grainlist[i])) + + f.write('FIELD FieldData 1 \n') + f.write('confidence 1 %d float \n' % (num_pts)) for i in np.arange(num_pts): - f.write('%e \n' %(conflist[i])) - - - f.close() \ No newline at end of file + f.write('%e \n' % (conflist[i])) + + f.close() diff --git a/hexrd/hedm/instrument/__init__.py b/hexrd/hedm/instrument/__init__.py new file mode 100644 index 000000000..b5414013c --- /dev/null +++ b/hexrd/hedm/instrument/__init__.py @@ -0,0 +1,13 @@ +from .hedm_instrument import ( + calc_angles_from_beam_vec, + calc_beam_vec, + centers_of_edge_vec, + GenerateEtaOmeMaps, + GrainDataWriter, + HEDMInstrument, + max_tth, + switch_xray_source, + unwrap_dict_to_h5, + unwrap_h5_to_dict, +) +from .detector import Detector diff --git a/hexrd/hedm/instrument/detector.py b/hexrd/hedm/instrument/detector.py index 757d5f44d..858fa21d7 100644 --- a/hexrd/hedm/instrument/detector.py +++ b/hexrd/hedm/instrument/detector.py @@ -3,26 +3,42 @@ import os from typing import Optional -from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT -from hexrd.hedm.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.constants import ( + COATING_DEFAULT, + FILTER_DEFAULTS, + PHOSPHOR_DEFAULT, +) +from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba from hexrd.core import constants as ct from hexrd.core import distortion as distortion_pkg from hexrd.core import matrixutil as mutil + +# TODO: Resolve extra-core-dependency from hexrd.hedm import xrdutil from hexrd.core.rotations import mapAngle -from hexrd.hedm.material import crystallography -from hexrd.hedm.material.crystallography import PlaneData +from hexrd.core.material import crystallography +from hexrd.core.material.crystallography import PlaneData -from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec +from hexrd.core.transforms.xfcapi import ( + xy_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_rmat_of_expmap, + oscill_angles_of_hkls, + angles_to_dvec, +) from hexrd.core.utils.decorators import memoize from hexrd.core.gridutil import cellIndices from hexrd.core.instrument import detector_coatings -from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_incoherent_scattering +from hexrd.core.material.utils import ( + calculate_linear_absorption_length, + calculate_incoherent_scattering, +) distortion_registry = distortion_pkg.Registry() @@ -279,7 +295,8 @@ def __init__( if detector_filter is None: detector_filter = detector_coatings.Filter( - **FILTER_DEFAULTS.TARDIS) + **FILTER_DEFAULTS.TARDIS + ) self.filter = detector_filter if detector_coating is None: @@ -530,8 +547,9 @@ def pixel_coords(self): # METHODS # ========================================================================= - def pixel_Q(self, energy: np.floating, - origin: np.ndarray = ct.zeros_3) -> np.ndarray: + def pixel_Q( + self, energy: np.floating, origin: np.ndarray = ct.zeros_3 + ) -> np.ndarray: '''get the equivalent momentum transfer for the angles. @@ -550,7 +568,7 @@ def pixel_Q(self, energy: np.floating, ''' lam = ct.keVToAngstrom(energy) tth, _ = self.pixel_angles(origin=origin) - return 4.*np.pi*np.sin(tth*0.5)/lam + return 4.0 * np.pi * np.sin(tth * 0.5) / lam def pixel_compton_energy_loss( self, @@ -577,9 +595,9 @@ def pixel_compton_energy_loss( ''' energy = np.asarray(energy) tth, _ = self.pixel_angles() - ang_fact = (1 - np.cos(tth)) - beta = energy/ct.cRestmasskeV - return energy/(1 + beta*ang_fact) + ang_fact = 1 - np.cos(tth) + beta = energy / ct.cRestmasskeV + return energy / (1 + beta * ang_fact) def pixel_compton_attenuation_length( self, @@ -628,8 +646,7 @@ def compute_compton_scattering_intensity( physics_package: AbstractPhysicsPackage, origin: np.array = ct.zeros_3, ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - - ''' compute the theoretical compton scattering + '''compute the theoretical compton scattering signal on the detector. this value is corrected for the transmission of compton scattered photons and normlaized before getting subtracting from the @@ -652,18 +669,20 @@ def compute_compton_scattering_intensity( q = self.pixel_Q(energy) inc_s = calculate_incoherent_scattering( - physics_package.sample_material, - q.flatten()).reshape(self.shape) + physics_package.sample_material, q.flatten() + ).reshape(self.shape) inc_w = calculate_incoherent_scattering( - physics_package.window_material, - q.flatten()).reshape(self.shape) + physics_package.window_material, q.flatten() + ).reshape(self.shape) t_s = self.calc_compton_physics_package_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) t_w = self.calc_compton_window_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) return inc_s * t_s + inc_w * t_w, t_s, t_w @@ -1087,9 +1106,14 @@ def interpolate_nearest(self, xy, img, pad_with_nans=True): int_xy[on_panel] = int_vals return int_xy - def interpolate_bilinear(self, xy, img, pad_with_nans=True, - clip_to_panel=True, - on_panel: Optional[np.ndarray] = None): + def interpolate_bilinear( + self, + xy, + img, + pad_with_nans=True, + clip_to_panel=True, + on_panel: Optional[np.ndarray] = None, + ): """ Interpolate an image array at the specified cartesian points. @@ -1766,19 +1790,23 @@ def increase_memoization_sizes(funcs, min_size): if cache_info['maxsize'] < min_size: f.set_cache_maxsize(min_size) - def calc_physics_package_transmission(self, energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage) -> np.float64: + def calc_physics_package_transmission( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + ) -> np.float64: """get the transmission from the physics package need to consider HED and HEDM samples separately """ bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1791,17 +1819,17 @@ def calc_physics_package_transmission(self, energy: np.floating, cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_transmission_sample( - seca, secb, energy, physics_package) - T_window = self.calc_transmission_window( - secb, energy, physics_package) + seca, secb, energy, physics_package + ) + T_window = self.calc_transmission_window(secb, energy, physics_package) transmission_physics_package = T_sample * T_window return transmission_physics_package @@ -1818,12 +1846,13 @@ def calc_compton_physics_package_transmission( routine than elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1836,18 +1865,19 @@ def calc_compton_physics_package_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'sample') + seca, secb, energy, physics_package, 'sample' + ) T_window = self.calc_compton_transmission_window( - secb, energy, physics_package) + secb, energy, physics_package + ) return T_sample * T_window @@ -1864,12 +1894,13 @@ def calc_compton_window_transmission( elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1882,45 +1913,54 @@ def calc_compton_window_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_window = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'window') + seca, secb, energy, physics_package, 'window' + ) T_sample = self.calc_compton_transmission_sample( - seca, energy, physics_package) + seca, energy, physics_package + ) return T_sample * T_window - def calc_transmission_sample(self, seca: np.array, - secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_sample( + self, + seca: np.array, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: thickness_s = physics_package.sample_thickness # in microns if np.isclose(thickness_s, 0): return np.ones(self.shape) # in microns^-1 - mu_s = 1./physics_package.sample_absorption_length(energy) - x = (mu_s*thickness_s) - pre = 1./x/(secb - seca) - num = np.exp(-x*seca) - np.exp(-x*secb) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + x = mu_s * thickness_s + pre = 1.0 / x / (secb - seca) + num = np.exp(-x * seca) - np.exp(-x * secb) return pre * num - def calc_transmission_window(self, secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_window( + self, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: material_w = physics_package.window_material thickness_w = physics_package.window_thickness # in microns if material_w is None or np.isclose(thickness_w, 0): return np.ones(self.shape) # in microns^-1 - mu_w = 1./physics_package.window_absorption_length(energy) - return np.exp(-thickness_w*mu_w*secb) + mu_w = 1.0 / physics_package.window_absorption_length(energy) + return np.exp(-thickness_w * mu_w * secb) def calc_compton_transmission( self, @@ -1935,9 +1975,11 @@ def calc_compton_transmission( formula = physics_package.sample_material density = physics_package.sample_density thickness = physics_package.sample_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1. / self.pixel_compton_attenuation_length( - energy, density, formula, + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, + density, + formula, ) elif pp_layer == 'window': formula = physics_package.window_material @@ -1946,17 +1988,18 @@ def calc_compton_transmission( density = physics_package.window_density thickness = physics_package.window_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) if thickness <= 0: return np.ones(self.shape) - x1 = mu*thickness*seca - x2 = mu_prime*thickness*secb - num = (np.exp(-x1) - np.exp(-x2)) - return -num/(x1 - x2) + x1 = mu * thickness * seca + x2 = mu_prime * thickness * secb + num = np.exp(-x1) - np.exp(-x2) + return -num / (x1 - x2) def calc_compton_transmission_sample( self, @@ -1966,9 +2009,8 @@ def calc_compton_transmission_sample( ) -> np.ndarray: thickness_s = physics_package.sample_thickness # in microns - mu_s = 1./physics_package.sample_absorption_length( - energy) - return np.exp(-mu_s*thickness_s*seca) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + return np.exp(-mu_s * thickness_s * seca) def calc_compton_transmission_window( self, @@ -1980,60 +2022,71 @@ def calc_compton_transmission_window( if formula is None: return np.ones(self.shape) - density = physics_package.window_density # in g/cc + density = physics_package.window_density # in g/cc thickness_w = physics_package.window_thickness # in microns - mu_w_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) - return np.exp(-mu_w_prime*thickness_w*secb) - - def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) -> np.array: - """get the effective pinhole area correction - """ - if (np.isclose(physics_package.pinhole_diameter, 0) - or np.isclose(physics_package.pinhole_thickness, 0)): + mu_w_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) + return np.exp(-mu_w_prime * thickness_w * secb) + + def calc_effective_pinhole_area( + self, physics_package: AbstractPhysicsPackage + ) -> np.array: + """get the effective pinhole area correction""" + if np.isclose(physics_package.pinhole_diameter, 0) or np.isclose( + physics_package.pinhole_thickness, 0 + ): return np.ones(self.shape) - hod = (physics_package.pinhole_thickness / - physics_package.pinhole_diameter) + hod = ( + physics_package.pinhole_thickness + / physics_package.pinhole_diameter + ) bvec = self.bvec tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) cth = -dvecs[:, 2].reshape(self.shape) tanth = np.tan(np.arccos(cth)) - f = hod*tanth - f[np.abs(f) > 1.] = np.nan + f = hod * tanth + f[np.abs(f) > 1.0] = np.nan asinf = np.arcsin(f) return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) - def calc_transmission_generic(self, - secb: np.array, - thickness: np.floating, - absorption_length: np.floating) -> np.array: + def calc_transmission_generic( + self, + secb: np.array, + thickness: np.floating, + absorption_length: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - mu = 1./absorption_length # in microns^-1 - return np.exp(-thickness*mu*secb) + mu = 1.0 / absorption_length # in microns^-1 + return np.exp(-thickness * mu * secb) - def calc_transmission_phosphor(self, - secb: np.array, - thickness: np.floating, - readout_length: np.floating, - absorption_length: np.floating, - energy: np.floating, - pre_U0: np.floating) -> np.array: + def calc_transmission_phosphor( + self, + secb: np.array, + thickness: np.floating, + readout_length: np.floating, + absorption_length: np.floating, + energy: np.floating, + pre_U0: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - f1 = absorption_length*thickness - f2 = absorption_length*readout_length - arg = (secb + 1/f2) - return pre_U0 * energy*((1.0 - np.exp(-f1*arg))/arg) + f1 = absorption_length * thickness + f2 = absorption_length * readout_length + arg = secb + 1 / f2 + return pre_U0 * energy * ((1.0 - np.exp(-f1 * arg)) / arg) + # ============================================================================= # UTILITY METHODS diff --git a/hexrd/hedm/instrument/hedm_instrument.py b/hexrd/hedm/instrument/hedm_instrument.py index d2e95f7f9..5c52f282f 100644 --- a/hexrd/hedm/instrument/hedm_instrument.py +++ b/hexrd/hedm/instrument/hedm_instrument.py @@ -59,7 +59,13 @@ from hexrd.core.fitting.utils import fit_ring from hexrd.core.gridutil import make_tolerance_grid from hexrd.core import matrixutil as mutil -from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +from hexrd.core.transforms.xfcapi import ( + angles_to_gvec, + gvec_to_xy, + make_sample_rmat, + make_rmat_of_expmap, + unit_vector, +) from hexrd.hedm import xrdutil from hexrd.hedm.material.crystallography import PlaneData from hexrd.core import constants as ct @@ -69,6 +75,7 @@ from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict from hexrd.core.utils.yaml import NumpyToNativeDumper from hexrd.core.valunits import valWUnit + # TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import LeBail @@ -78,14 +85,17 @@ from skimage.draw import polygon from skimage.util import random_noise + # TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import wppfsupport try: from fast_histogram import histogram1d + fast_histogram = True except ImportError: from numpy import histogram as histogram1d + fast_histogram = False logger = logging.getLogger() @@ -108,9 +118,9 @@ pixel_size_DFLT = (0.2, 0.2) tilt_params_DFLT = np.zeros(3) -t_vec_d_DFLT = np.r_[0., 0., -1000.] +t_vec_d_DFLT = np.r_[0.0, 0.0, -1000.0] -chi_DFLT = 0. +chi_DFLT = 0.0 t_vec_s_DFLT = np.zeros(3) multi_ims_key = ct.shared_ims_key @@ -124,8 +134,9 @@ # ============================================================================= -def generate_chunks(nrows, ncols, base_nrows, base_ncols, - row_gap=0, col_gap=0): +def generate_chunks( + nrows, ncols, base_nrows, base_ncols, row_gap=0, col_gap=0 +): """ Generate chunking data for regularly tiled composite detectors. @@ -157,18 +168,15 @@ def generate_chunks(nrows, ncols, base_nrows, base_ncols, [[row_start, row_stop], [col_start, col_stop]] """ - row_starts = np.array([i*(base_nrows + row_gap) for i in range(nrows)]) - col_starts = np.array([i*(base_ncols + col_gap) for i in range(ncols)]) + row_starts = np.array([i * (base_nrows + row_gap) for i in range(nrows)]) + col_starts = np.array([i * (base_ncols + col_gap) for i in range(ncols)]) rr = np.vstack([row_starts, row_starts + base_nrows]) cc = np.vstack([col_starts, col_starts + base_ncols]) rects = [] labels = [] for i in range(nrows): for j in range(ncols): - this_rect = np.array( - [[rr[0, i], rr[1, i]], - [cc[0, j], cc[1, j]]] - ) + this_rect = np.array([[rr[0, i], rr[1, i]], [cc[0, j], cc[1, j]]]) rects.append(this_rect) labels.append('%d_%d' % (i, j)) return rects, labels @@ -194,9 +202,11 @@ def chunk_instrument(instr, rects, labels, use_roi=False): """ icfg_dict = instr.write_config() - new_icfg_dict = dict(beam=icfg_dict['beam'], - oscillation_stage=icfg_dict['oscillation_stage'], - detectors={}) + new_icfg_dict = dict( + beam=icfg_dict['beam'], + oscillation_stage=icfg_dict['oscillation_stage'], + detectors={}, + ) for panel_id, panel in instr.detectors.items(): pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] @@ -206,7 +216,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): row_col_dim = np.diff(rect) # (2, 1) shape = tuple(row_col_dim.flatten()) - center = (rect[:, 0].reshape(2, 1) + 0.5*row_col_dim) + center = rect[:, 0].reshape(2, 1) + 0.5 * row_col_dim sp_tvec = np.concatenate( [panel.pixelToCart(center.T).flatten(), np.zeros(1)] @@ -231,7 +241,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): if panel.panel_buffer is not None: if panel.panel_buffer.ndim == 2: # have a mask array! submask = panel.panel_buffer[ - rect[0, 0]:rect[0, 1], rect[1, 0]:rect[1, 1] + rect[0, 0] : rect[0, 1], rect[1, 0] : rect[1, 1] ] new_icfg_dict['detectors'][panel_name]['buffer'] = submask return new_icfg_dict @@ -275,9 +285,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): images_in = imgser_dict[multi_ims_key] elif np.any(matched_det_keys): if sum(matched_det_keys) != 1: - raise RuntimeError( - f"multiple entries found for '{det_key}'" - ) + raise RuntimeError(f"multiple entries found for '{det_key}'") # use boolean array to index the proper key # !!! these should be in the same order img_keys = img_keys = np.asarray(list(imgser_dict.keys())) @@ -297,7 +305,12 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): if isinstance(images_in, ims_classes): # input is an imageseries of some kind - ims = ProcessedImageSeries(images_in, [('rectangle', roi), ]) + ims = ProcessedImageSeries( + images_in, + [ + ('rectangle', roi), + ], + ) if isinstance(images_in, OmegaImageSeries): # if it was an OmegaImageSeries, must re-cast ims = OmegaImageSeries(ims) @@ -305,16 +318,16 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): # 2- or 3-d array of images ndim = images_in.ndim if ndim == 2: - ims = images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims = images_in[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]] elif ndim == 3: nrows = roi[0][1] - roi[0][0] ncols = roi[1][1] - roi[1][0] n_images = len(images_in) - ims = np.empty((n_images, nrows, ncols), - dtype=images_in.dtype) + ims = np.empty((n_images, nrows, ncols), dtype=images_in.dtype) for i, image in images_in: - ims[i, :, :] = \ - images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims[i, :, :] = images_in[ + roi[0][0] : roi[0][1], roi[1][0] : roi[1][1] + ] else: raise RuntimeError( f"image input dim must be 2 or 3; you gave {ndim}" @@ -332,9 +345,8 @@ def calc_beam_vec(azim, pola): tht = np.radians(azim) phi = np.radians(pola) bv = np.r_[ - np.sin(phi)*np.cos(tht), - np.cos(phi), - np.sin(phi)*np.sin(tht)] + np.sin(phi) * np.cos(tht), np.cos(phi), np.sin(phi) * np.sin(tht) + ] return -bv @@ -345,9 +357,7 @@ def calc_angles_from_beam_vec(bvec): """ bvec = np.atleast_1d(bvec).flatten() nvec = unit_vector(-bvec) - azim = float( - np.degrees(np.arctan2(nvec[2], nvec[0])) - ) + azim = float(np.degrees(np.arctan2(nvec[2], nvec[0]))) pola = float(np.degrees(np.arccos(nvec[1]))) return azim, pola @@ -371,9 +381,9 @@ def angle_in_range(angle, ranges, ccw=True, units='degrees'): WARNING: always clockwise; assumes wedges are not overlapping """ - tau = 360. + tau = 360.0 if units.lower() == 'radians': - tau = 2*np.pi + tau = 2 * np.pi w = np.nan for i, wedge in enumerate(ranges): amin = wedge[0] @@ -405,7 +415,7 @@ def max_tth(instr): tth_max : float The maximum observable Bragg angle by the instrument in radians. """ - tth_max = 0. + tth_max = 0.0 for det in instr.detectors.values(): ptth, peta = det.pixel_angles() tth_max = max(np.max(ptth), tth_max) @@ -437,10 +447,9 @@ def pixel_resolution(instr): ang_ps_full = [] for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) ang_ps_full.append(angps) max_tth = min(max_tth, np.min(angps[:, 0])) @@ -472,10 +481,9 @@ def max_resolution(instr): max_eta = np.inf for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) max_tth = min(max_tth, np.min(angps[:, 0])) max_eta = min(max_eta, np.min(angps[:, 1])) @@ -483,16 +491,16 @@ def max_resolution(instr): def _gaussian_dist(x, cen, fwhm): - sigm = fwhm/(2*np.sqrt(2*np.log(2))) - return np.exp(-0.5*(x - cen)**2/sigm**2) + sigm = fwhm / (2 * np.sqrt(2 * np.log(2))) + return np.exp(-0.5 * (x - cen) ** 2 / sigm**2) def _sigma_to_fwhm(sigm): - return sigm*ct.sigma_to_fwhm + return sigm * ct.sigma_to_fwhm def _fwhm_to_sigma(fwhm): - return fwhm/ct.sigma_to_fwhm + return fwhm / ct.sigma_to_fwhm # ============================================================================= @@ -508,12 +516,17 @@ class HEDMInstrument(object): * where should reference eta be defined? currently set to default config """ - def __init__(self, instrument_config=None, - image_series=None, eta_vector=None, - instrument_name=None, tilt_calibration_mapping=None, - max_workers=max_workers_DFLT, - physics_package=None, - active_beam_name: Optional[str] = None): + def __init__( + self, + instrument_config=None, + image_series=None, + eta_vector=None, + instrument_name=None, + tilt_calibration_mapping=None, + max_workers=max_workers_DFLT, + physics_package=None, + active_beam_name: Optional[str] = None, + ): self._id = instrument_name_DFLT self._active_beam_name = active_beam_name @@ -538,7 +551,8 @@ def __init__(self, instrument_config=None, # FIXME: must add cylindrical self._detectors = dict( panel_id_DFLT=PlanarDetector( - rows=nrows_DFLT, cols=ncols_DFLT, + rows=nrows_DFLT, + cols=ncols_DFLT, pixel_size=pixel_size_DFLT, tvec=t_vec_d_DFLT, tilt=tilt_params_DFLT, @@ -546,9 +560,11 @@ def __init__(self, instrument_config=None, xrs_dist=self.source_distance, evec=self._eta_vector, distortion=None, - roi=None, group=None, - max_workers=self.max_workers), - ) + roi=None, + group=None, + max_workers=self.max_workers, + ), + ) self._tvec = t_vec_s_DFLT self._chi = chi_DFLT @@ -575,10 +591,7 @@ def __init__(self, instrument_config=None, self.physics_package = instrument_config['physics_package'] xrs_config = instrument_config['beam'] - is_single_beam = ( - 'energy' in xrs_config and - 'vector' in xrs_config - ) + is_single_beam = 'energy' in xrs_config and 'vector' in xrs_config if is_single_beam: # Assume single beam. Load the same way as multibeam self._create_default_beam() @@ -635,7 +648,7 @@ def __init__(self, instrument_config=None, elif isinstance(det_buffer, list): panel_buffer = np.asarray(det_buffer) elif np.isscalar(det_buffer): - panel_buffer = det_buffer*np.ones(2) + panel_buffer = det_buffer * np.ones(2) else: raise RuntimeError( "panel buffer spec invalid for %s" % det_id @@ -712,9 +725,9 @@ def mean_detector_center(self) -> np.ndarray: def mean_group_center(self, group: str) -> np.ndarray: """Return the mean center for detectors belonging to a group""" - centers = np.array([ - x.tvec for x in self.detectors_in_group(group).values() - ]) + centers = np.array( + [x.tvec for x in self.detectors_in_group(group).values()] + ) return centers.sum(axis=0) / len(centers) @property @@ -748,10 +761,11 @@ def detector_parameters(self): pdict = {} for key, panel in self.detectors.items(): pdict[key] = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) return pdict @@ -855,8 +869,9 @@ def beam_vector(self) -> np.ndarray: def beam_vector(self, x: np.ndarray): x = np.array(x).flatten() if len(x) == 3: - assert sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' bvec = x elif len(x) == 2: bvec = calc_beam_vec(*x) @@ -873,8 +888,9 @@ def source_distance(self): @source_distance.setter def source_distance(self, x): - assert np.isscalar(x), \ - f"'source_distance' must be a scalar; you input '{x}'" + assert np.isscalar( + x + ), f"'source_distance' must be a scalar; you input '{x}'" self.active_beam['distance'] = x self.beam_dict_modified() @@ -885,8 +901,9 @@ def eta_vector(self): @eta_vector.setter def eta_vector(self, x): x = np.array(x).flatten() - assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' self._eta_vector = x # ...maybe change dictionary item behavior for 3.x compatibility? for detector_id in self.detectors: @@ -898,10 +915,11 @@ def eta_vector(self, x): # ========================================================================= def write_config(self, file=None, style='yaml', calibration_dict={}): - """ WRITE OUT YAML FILE """ + """WRITE OUT YAML FILE""" # initialize output dictionary - assert style.lower() in ['yaml', 'hdf5'], \ + assert style.lower() in ['yaml', 'hdf5'], ( "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) par_dict = {} @@ -930,10 +948,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): if calibration_dict: par_dict['calibration_crystal'] = calibration_dict - ostage = dict( - chi=self.chi, - translation=self.tvec.tolist() - ) + ostage = dict(chi=self.chi, translation=self.tvec.tolist()) par_dict['oscillation_stage'] = ostage det_dict = dict.fromkeys(self.detectors) @@ -941,10 +956,13 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): # grab panel config # !!! don't need beam or tvec # !!! have vetted style - pdict = detector.config_dict(chi=self.chi, tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style=style) + pdict = detector.config_dict( + chi=self.chi, + tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style=style, + ) det_dict[det_name] = pdict['detector'] par_dict['detectors'] = det_dict @@ -954,6 +972,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): with open(file, 'w') as f: yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) else: + def _write_group(file): instr_grp = file.create_group('instrument') unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) @@ -969,9 +988,15 @@ def _write_group(file): return par_dict - def extract_polar_maps(self, plane_data, imgser_dict, - active_hkls=None, threshold=None, - tth_tol=None, eta_tol=0.25): + def extract_polar_maps( + self, + plane_data, + imgser_dict, + active_hkls=None, + threshold=None, + tth_tol=None, + eta_tol=0.25, + ): """ Extract eta-omega maps from an imageseries. @@ -995,23 +1020,25 @@ def extract_polar_maps(self, plane_data, imgser_dict, # detectors, so calculate it once # !!! grab first panel panel = next(iter(self.detectors.values())) - pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = \ + pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = ( panel.make_powder_rings( - plane_data, merge_hkls=False, - delta_eta=eta_tol, full_output=True + plane_data, + merge_hkls=False, + delta_eta=eta_tol, + full_output=True, ) + ) if active_hkls is not None: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" # need to re-cast for element-wise operations active_hkls = np.array(active_hkls) # these are all active reflection unique hklIDs - active_hklIDs = plane_data.getHKLID( - plane_data.hkls, master=True - ) + active_hklIDs = plane_data.getHKLID(plane_data.hkls, master=True) # find indices idx = np.zeros_like(active_hkls, dtype=int) @@ -1068,9 +1095,14 @@ def extract_polar_maps(self, plane_data, imgser_dict, # Divide up the images among processes tasks = distribute_tasks(len(ims), self.max_workers) - func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges, - ring_maps=ring_maps, ring_params=ring_params, - threshold=threshold) + func = partial( + _run_histograms, + ims=ims, + tth_ranges=tth_ranges, + ring_maps=ring_maps, + ring_params=ring_params, + threshold=threshold, + ) max_workers = self.max_workers if max_workers == 1 or len(tasks) == 1: @@ -1088,12 +1120,21 @@ def extract_polar_maps(self, plane_data, imgser_dict, return ring_maps_panel, eta_edges - def extract_line_positions(self, plane_data, imgser_dict, - tth_tol=None, eta_tol=1., npdiv=2, - eta_centers=None, - collapse_eta=True, collapse_tth=False, - do_interpolation=True, do_fitting=False, - tth_distortion=None, fitting_kwargs=None): + def extract_line_positions( + self, + plane_data, + imgser_dict, + tth_tol=None, + eta_tol=1.0, + npdiv=2, + eta_centers=None, + collapse_eta=True, + collapse_tth=False, + do_interpolation=True, + do_fitting=False, + tth_distortion=None, + fitting_kwargs=None, + ): """ Perform annular interpolation on diffraction images. @@ -1168,8 +1209,12 @@ def extract_line_positions(self, plane_data, imgser_dict, # LOOP OVER DETECTORS # ===================================================================== logger.info("Interpolating ring data") - pbar_dets = partial(tqdm, total=self.num_panels, desc="Detector", - position=self.num_panels) + pbar_dets = partial( + tqdm, + total=self.num_panels, + desc="Detector", + position=self.num_panels, + ) # Split up the workers among the detectors max_workers_per_detector = max(1, self.max_workers // self.num_panels) @@ -1192,23 +1237,26 @@ def extract_line_positions(self, plane_data, imgser_dict, def make_instr_cfg(panel): return panel.config_dict( - chi=self.chi, tvec=self.tvec, + chi=self.chi, + tvec=self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) images = [] for detector_id, panel in self.detectors.items(): - images.append(_parse_imgser_dict(imgser_dict, detector_id, - roi=panel.roi)) + images.append( + _parse_imgser_dict(imgser_dict, detector_id, roi=panel.roi) + ) panels = [self.detectors[k] for k in self.detectors] instr_cfgs = [make_instr_cfg(x) for x in panels] pbp_array = np.arange(self.num_panels) iter_args = zip(panels, instr_cfgs, images, pbp_array) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=self.num_panels) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=self.num_panels + ) as executor: results = list(pbar_dets(executor.map(func, iter_args))) panel_data = {} @@ -1217,12 +1265,9 @@ def make_instr_cfg(panel): return panel_data - def simulate_powder_pattern(self, - mat_list, - params=None, - bkgmethod=None, - origin=None, - noise=None): + def simulate_powder_pattern( + self, mat_list, params=None, bkgmethod=None, origin=None, noise=None + ): """ Generate powder diffraction iamges from specified materials. @@ -1261,8 +1306,7 @@ def simulate_powder_pattern(self, if origin is None: origin = self.tvec origin = np.asarray(origin).squeeze() - assert len(origin) == 3, \ - "origin must be a 3-element sequence" + assert len(origin) == 3, "origin must be a 3-element sequence" if bkgmethod is None: bkgmethod = {'chebyshev': 3} @@ -1302,7 +1346,7 @@ def simulate_powder_pattern(self, # find min and max tth over all panels tth_mi = np.inf - tth_ma = 0. + tth_ma = 0.0 ptth_dict = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): ptth, peta = panel.pixel_angles(origin=origin) @@ -1324,7 +1368,7 @@ def simulate_powder_pattern(self, ang_res = max_resolution(self) # !!! calc nsteps by oversampling - nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0]))) + nsteps = int(np.ceil(2 * (tth_ma - tth_mi) / np.degrees(ang_res[0]))) # evaulation vector for LeBail tth = np.linspace(tth_mi, tth_ma, nsteps) @@ -1333,7 +1377,7 @@ def simulate_powder_pattern(self, wavelength = [ valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), - 1. + 1.0, ] ''' @@ -1346,23 +1390,25 @@ def simulate_powder_pattern(self, tth = mat.planeData.getTTh() - LP = (1 + np.cos(tth)**2) / \ - np.cos(0.5*tth)/np.sin(0.5*tth)**2 + LP = ( + (1 + np.cos(tth) ** 2) + / np.cos(0.5 * tth) + / np.sin(0.5 * tth) ** 2 + ) intensity[mat.name] = {} - intensity[mat.name]['synchrotron'] = \ + intensity[mat.name]['synchrotron'] = ( mat.planeData.structFact * LP * multiplicity + ) kwargs = { 'expt_spectrum': expt, 'params': params, 'phases': mat_list, - 'wavelength': { - 'synchrotron': wavelength - }, + 'wavelength': {'synchrotron': wavelength}, 'bkgmethod': bkgmethod, 'intensity_init': intensity, - 'peakshape': 'pvtch' + 'peakshape': 'pvtch', } self.WPPFclass = LeBail(**kwargs) @@ -1380,9 +1426,11 @@ def simulate_powder_pattern(self, for det_key, panel in self.detectors.items(): ptth = ptth_dict[det_key] - img = np.interp(np.degrees(ptth), - self.simulated_spectrum.x, - self.simulated_spectrum.y + self.background.y) + img = np.interp( + np.degrees(ptth), + self.simulated_spectrum.x, + self.simulated_spectrum.y + self.background.y, + ) if noise is None: img_dict[det_key] = img @@ -1393,13 +1441,11 @@ def simulate_powder_pattern(self, img /= prev_max if noise.lower() == 'poisson': - im_noise = random_noise(img, - mode='poisson', - clip=True) + im_noise = random_noise(img, mode='poisson', clip=True) mi = im_noise.min() ma = im_noise.max() if ma > mi: - im_noise = (im_noise - mi)/(ma - mi) + im_noise = (im_noise - mi) / (ma - mi) elif noise.lower() == 'gaussian': im_noise = random_noise(img, mode='gaussian', clip=True) @@ -1421,9 +1467,14 @@ def simulate_powder_pattern(self, return img_dict - def simulate_laue_pattern(self, crystal_data, - minEnergy=5., maxEnergy=35., - rmat_s=None, grain_params=None): + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + grain_params=None, + ): """ Simulate Laue diffraction over the instrument. @@ -1453,17 +1504,28 @@ def simulate_laue_pattern(self, crystal_data, for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_laue_pattern( crystal_data, - minEnergy=minEnergy, maxEnergy=maxEnergy, - rmat_s=rmat_s, tvec_s=self.tvec, + minEnergy=minEnergy, + maxEnergy=maxEnergy, + rmat_s=rmat_s, + tvec_s=self.tvec, grain_params=grain_params, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) return results - def simulate_rotation_series(self, plane_data, grain_param_list, - eta_ranges=[(-np.pi, np.pi), ], - ome_ranges=[(-np.pi, np.pi), ], - ome_period=(-np.pi, np.pi), - wavelength=None): + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + wavelength=None, + ): """ Simulate a monochromatic rotation series over the instrument. @@ -1492,24 +1554,39 @@ def simulate_rotation_series(self, plane_data, grain_param_list, results = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_rotation_series( - plane_data, grain_param_list, + plane_data, + grain_param_list, eta_ranges=eta_ranges, ome_ranges=ome_ranges, ome_period=ome_period, - chi=self.chi, tVec_s=self.tvec, - wavelength=wavelength) + chi=self.chi, + tVec_s=self.tvec, + wavelength=wavelength, + ) return results - def pull_spots(self, plane_data, grain_params, - imgser_dict, - tth_tol=0.25, eta_tol=1., ome_tol=1., - npdiv=2, threshold=10, - eta_ranges=[(-np.pi, np.pi), ], - ome_period=None, - dirname='results', filename=None, output_format='text', - return_spot_list=False, - quiet=True, check_only=False, - interp='nearest'): + def pull_spots( + self, + plane_data, + grain_params, + imgser_dict, + tth_tol=0.25, + eta_tol=1.0, + ome_tol=1.0, + npdiv=2, + threshold=10, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_period=None, + dirname='results', + filename=None, + output_format='text', + return_spot_list=False, + quiet=True, + check_only=False, + interp='nearest', + ): """ Exctract reflection info from a rotation series. @@ -1569,12 +1646,14 @@ def pull_spots(self, plane_data, grain_params, # WARNING: all imageseries AND all wedges within are assumed to have # the same omega values; put in a check that they are all the same??? oims0 = next(iter(imgser_dict.values())) - ome_ranges = [np.radians([i['ostart'], i['ostop']]) - for i in oims0.omegawedges.wedges] + ome_ranges = [ + np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges + ] if ome_period is None: ims = next(iter(imgser_dict.values())) ostart = ims.omega[0, 0] - ome_period = np.radians(ostart + np.r_[0., 360.]) + ome_period = np.radians(ostart + np.r_[0.0, 360.0]) # delta omega in DEGREES grabbed from first imageseries in the dict delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] @@ -1582,7 +1661,10 @@ def pull_spots(self, plane_data, grain_params, # make omega grid for frame expansion around reference frame # in DEGREES ndiv_ome, ome_del = make_tolerance_grid( - delta_ome, ome_tol, 1, adjust_window=True, + delta_ome, + ome_tol, + 1, + adjust_window=True, ) # generate structuring element for connected component labeling @@ -1593,24 +1675,37 @@ def pull_spots(self, plane_data, grain_params, # simulate rotation series sim_results = self.simulate_rotation_series( - plane_data, [grain_params, ], + plane_data, + [ + grain_params, + ], eta_ranges=eta_ranges, ome_ranges=ome_ranges, - ome_period=ome_period) + ome_period=ome_period, + ) # patch vertex generator (global for instrument) - tol_vec = 0.5*np.radians( - [-tth_tol, -eta_tol, - -tth_tol, eta_tol, - tth_tol, eta_tol, - tth_tol, -eta_tol]) + tol_vec = 0.5 * np.radians( + [ + -tth_tol, + -eta_tol, + -tth_tol, + eta_tol, + tth_tol, + eta_tol, + tth_tol, + -eta_tol, + ] + ) # prepare output if requested if filename is not None and output_format.lower() == 'hdf5': this_filename = os.path.join(dirname, filename) writer = GrainDataWriter_h5( os.path.join(dirname, filename), - self.write_config(), grain_params) + self.write_config(), + grain_params, + ) # ===================================================================== # LOOP OVER PANELS @@ -1622,28 +1717,25 @@ def pull_spots(self, plane_data, grain_params, for detector_id, panel in self.detectors.items(): # initialize text-based output writer if filename is not None and output_format.lower() == 'text': - output_dir = os.path.join( - dirname, detector_id - ) + output_dir = os.path.join(dirname, detector_id) os.makedirs(output_dir, exist_ok=True) - this_filename = os.path.join( - output_dir, filename - ) + this_filename = os.path.join(output_dir, filename) writer = PatchDataWriter(this_filename) # grab panel instr_cfg = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) native_area = panel.pixel_area # pixel ref area # pull out the OmegaImageSeries for this panel from input dict - ome_imgser = _parse_imgser_dict(imgser_dict, - detector_id, - roi=panel.roi) + ome_imgser = _parse_imgser_dict( + imgser_dict, detector_id, roi=panel.roi + ) # extract simulation results sim_results_p = sim_results[detector_id] @@ -1659,19 +1751,24 @@ def pull_spots(self, plane_data, grain_params, # patch vertex array from sim nangs = len(ang_centers) patch_vertices = ( - np.tile(ang_centers[:, :2], (1, 4)) + - np.tile(tol_vec, (nangs, 1)) - ).reshape(4*nangs, 2) - ome_dupl = np.tile( - ang_centers[:, 2], (4, 1) - ).T.reshape(len(patch_vertices), 1) + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4 * nangs, 2) + ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape( + len(patch_vertices), 1 + ) # find vertices that all fall on the panel det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), - panel.rmat, rMat_c, self.chi, - panel.tvec, tVec_c, self.tvec, - panel.distortion) + panel.rmat, + rMat_c, + self.chi, + panel.tvec, + tVec_c, + self.tvec, + panel.distortion, + ) _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) # all vertices must be on... @@ -1702,7 +1799,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d %d %d) falls outside omega range - """ % tuple(hkls_p[i_pt, :]) + """ % tuple( + hkls_p[i_pt, :] + ) print(msg) continue else: @@ -1720,11 +1819,16 @@ def pull_spots(self, plane_data, grain_params, # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( instr_cfg, - ang_centers[:, :2], ang_pixel_size, + ang_centers[:, :2], + ang_pixel_size, omega=ang_centers[:, 2], - tth_tol=tth_tol, eta_tol=eta_tol, - rmat_c=rMat_c, tvec_c=tVec_c, - npdiv=npdiv, quiet=True) + tth_tol=tth_tol, + eta_tol=eta_tol, + rmat_c=rMat_c, + tvec_c=tVec_c, + npdiv=npdiv, + quiet=True, + ) # GRAND LOOP over reflections for this panel patch_output = [] @@ -1734,7 +1838,7 @@ def pull_spots(self, plane_data, grain_params, vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch prows, pcols = areas.shape - nrm_fac = areas/float(native_area) + nrm_fac = areas / float(native_area) nrm_fac = nrm_fac / np.min(nrm_fac) # grab hkl info @@ -1748,8 +1852,9 @@ def pull_spots(self, plane_data, grain_params, delta_eta = eta_edges[1] - eta_edges[0] # need to reshape eval pts for interpolation - xy_eval = np.vstack([xy_eval[0].flatten(), - xy_eval[1].flatten()]).T + xy_eval = np.vstack( + [xy_eval[0].flatten(), xy_eval[1].flatten()] + ).T # the evaluation omegas; # expand about the central value using tol vector @@ -1764,7 +1869,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d%d%d) falls outside omega range - """ % tuple(hkl) + """ % tuple( + hkl + ) print(msg) continue else: @@ -1773,8 +1880,8 @@ def pull_spots(self, plane_data, grain_params, peak_id = next_invalid_peak_id sum_int = np.nan max_int = np.nan - meas_angs = np.nan*np.ones(3) - meas_xy = np.nan*np.ones(2) + meas_angs = np.nan * np.ones(3) + meas_xy = np.nan * np.ones(2) # quick check for intensity contains_signal = False @@ -1792,19 +1899,23 @@ def pull_spots(self, plane_data, grain_params, # initialize patch data array for intensities if interp.lower() == 'bilinear': patch_data = np.zeros( - (len(frame_indices), prows, pcols)) + (len(frame_indices), prows, pcols) + ) for i, i_frame in enumerate(frame_indices): - patch_data[i] = \ - panel.interpolate_bilinear( - xy_eval, - ome_imgser[i_frame], - pad_with_nans=False - ).reshape(prows, pcols) # * nrm_fac + patch_data[i] = panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False, + ).reshape( + prows, pcols + ) # * nrm_fac elif interp.lower() == 'nearest': patch_data = patch_data_raw # * nrm_fac else: - msg = "interpolation option " + \ - "'%s' not understood" + msg = ( + "interpolation option " + + "'%s' not understood" + ) raise RuntimeError(msg % interp) # now have interpolated patch data... @@ -1817,9 +1928,10 @@ def pull_spots(self, plane_data, grain_params, peak_id = iRefl props = regionprops(labels, patch_data) coms = np.vstack( - [x.weighted_centroid for x in props]) + [x.weighted_centroid for x in props] + ) if num_peaks > 1: - center = np.r_[patch_data.shape]*0.5 + center = np.r_[patch_data.shape] * 0.5 center_t = np.tile(center, (num_peaks, 1)) com_diff = coms - center_t closest_peak_idx = np.argmin( @@ -1830,15 +1942,17 @@ def pull_spots(self, plane_data, grain_params, coms = coms[closest_peak_idx] # meas_omes = \ # ome_edges[0] + (0.5 + coms[0])*delta_ome - meas_omes = \ - ome_eval[0] + coms[0]*delta_ome + meas_omes = ome_eval[0] + coms[0] * delta_ome meas_angs = np.hstack( - [tth_edges[0] + (0.5 + coms[2])*delta_tth, - eta_edges[0] + (0.5 + coms[1])*delta_eta, - mapAngle( - np.radians(meas_omes), ome_period - ) - ] + [ + tth_edges[0] + + (0.5 + coms[2]) * delta_tth, + eta_edges[0] + + (0.5 + coms[1]) * delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ), + ] ) # intensities @@ -1865,15 +1979,21 @@ def pull_spots(self, plane_data, grain_params, meas_angs, chi=self.chi, rmat_c=rMat_c, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) rMat_s = make_sample_rmat( self.chi, meas_angs[2] ) meas_xy = gvec_to_xy( gvec_c, - panel.rmat, rMat_s, rMat_c, - panel.tvec, self.tvec, tVec_c, - beam_vec=self.beam_vector) + panel.rmat, + rMat_s, + rMat_c, + panel.tvec, + self.tvec, + tVec_c, + beam_vec=self.beam_vector, + ) if panel.distortion is not None: meas_xy = panel.distortion.apply_inverse( np.atleast_2d(meas_xy) @@ -1892,19 +2012,38 @@ def pull_spots(self, plane_data, grain_params, if filename is not None: if output_format.lower() == 'text': writer.dump_patch( - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, - xy_centers[i_pt], meas_xy) + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + xy_centers[i_pt], + meas_xy, + ) elif output_format.lower() == 'hdf5': xyc_arr = xy_eval.reshape( prows, pcols, 2 ).transpose(2, 0, 1) writer.dump_patch( - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy) + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, + ) if return_spot_list: # Full output @@ -1912,17 +2051,34 @@ def pull_spots(self, plane_data, grain_params, prows, pcols, 2 ).transpose(2, 0, 1) _patch_output = [ - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, ] else: # Trimmed output _patch_output = [ - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, meas_xy + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + meas_xy, ] patch_output.append(_patch_output) iRefl += 1 @@ -1940,7 +2096,9 @@ def update_memoization_sizes(self): PlanarDetector.update_memoization_sizes(all_panels) CylindricalDetector.update_memoization_sizes(all_panels) - def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: + def calc_transmission( + self, rMat_s: np.ndarray = None + ) -> dict[str, np.ndarray]: """calculate the transmission from the filter and polymer coating. the inverse of this number is the intensity correction that needs @@ -1954,26 +2112,31 @@ def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: transmissions = {} for det_name, det in self.detectors.items(): transmission_filter, transmission_phosphor = ( - det.calc_filter_coating_transmission(energy)) + det.calc_filter_coating_transmission(energy) + ) transmission = transmission_filter * transmission_phosphor if self.physics_package is not None: transmission_physics_package = ( det.calc_physics_package_transmission( - energy, rMat_s, self.physics_package)) + energy, rMat_s, self.physics_package + ) + ) effective_pinhole_area = det.calc_effective_pinhole_area( - self.physics_package) + self.physics_package + ) transmission = ( - transmission * - transmission_physics_package * - effective_pinhole_area + transmission + * transmission_physics_package + * effective_pinhole_area ) transmissions[det_name] = transmission return transmissions + # ============================================================================= # UTILITIES # ============================================================================= @@ -1984,6 +2147,7 @@ class PatchDataWriter(object): def __init__(self, filename): self._delim = ' ' + # fmt: off header_items = ( '# ID', 'PID', 'H', 'K', 'L', @@ -1998,6 +2162,7 @@ def __init__(self, filename): self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) ]) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2010,30 +2175,34 @@ def __del__(self): def close(self): self.fid.close() - def dump_patch(self, peak_id, hkl_id, - hkl, spot_int, max_int, - pangs, mangs, pxy, mxy): + def dump_patch( + self, peak_id, hkl_id, hkl, spot_int, max_int, pangs, mangs, pxy, mxy + ): """ !!! maybe need to check that last four inputs are arrays """ if mangs is None: spot_int = np.nan max_int = np.nan - mangs = np.nan*np.ones(3) - mxy = np.nan*np.ones(2) - - res = [int(peak_id), int(hkl_id)] \ - + np.array(hkl, dtype=int).tolist() \ - + [spot_int, max_int] \ - + pangs.tolist() \ - + mangs.tolist() \ - + pxy.tolist() \ + mangs = np.nan * np.ones(3) + mxy = np.nan * np.ones(2) + + res = ( + [int(peak_id), int(hkl_id)] + + np.array(hkl, dtype=int).tolist() + + [spot_int, max_int] + + pangs.tolist() + + mangs.tolist() + + pxy.tolist() + mxy.tolist() + ) output_str = self._delim.join( - [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), - self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), - self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + [ + self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:]), + ] ) print(output_str, file=self.fid) return output_str @@ -2049,20 +2218,23 @@ def __init__(self, filename=None, array=None): """ if filename is None and array is None: raise RuntimeError( - 'GrainDataWriter must be specified with filename or array') + 'GrainDataWriter must be specified with filename or array' + ) self.array = None self.fid = None # array supersedes filename if array is not None: - assert array.shape[1] == 21, \ - f'grain data table must have 21 columns not {array.shape[21]}' + assert ( + array.shape[1] == 21 + ), f'grain data table must have 21 columns not {array.shape[21]}' self.array = array self._array_row = 0 return self._delim = ' ' + # fmt: off header_items = ( '# grain ID', 'completeness', 'chi^2', 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', @@ -2074,14 +2246,18 @@ def __init__(self, filename=None, array=None): 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]', 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]' ) + # fmt: on self._header = self._delim.join( - [self._delim.join( - np.tile('{:<12}', 3) - ).format(*header_items[:3]), - self._delim.join( - np.tile('{:<23}', len(header_items) - 3) - ).format(*header_items[3:])] + [ + self._delim.join(np.tile('{:<12}', 3)).format( + *header_items[:3] + ), + self._delim.join( + np.tile('{:<23}', len(header_items) - 3) + ).format(*header_items[3:]), + ] ) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2095,35 +2271,40 @@ def close(self): if self.fid is not None: self.fid.close() - def dump_grain(self, grain_id, completeness, chisq, - grain_params): - assert len(grain_params) == 12, \ - "len(grain_params) must be 12, not %d" % len(grain_params) + def dump_grain(self, grain_id, completeness, chisq, grain_params): + assert ( + len(grain_params) == 12 + ), "len(grain_params) must be 12, not %d" % len(grain_params) # extract strain emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) evec = mutil.symmToVecMV(emat, scale=False) - res = [int(grain_id), completeness, chisq] \ - + grain_params.tolist() \ + res = ( + [int(grain_id), completeness, chisq] + + grain_params.tolist() + evec.tolist() + ) if self.array is not None: row = self._array_row - assert row < self.array.shape[0], \ - f'invalid row {row} in array table' + assert ( + row < self.array.shape[0] + ), f'invalid row {row} in array table' self.array[row] = res self._array_row += 1 return res # (else) format and write to file output_str = self._delim.join( - [self._delim.join( - ['{:<12d}', '{:<12f}', '{:<12e}'] - ).format(*res[:3]), - self._delim.join( - np.tile('{:<23.16e}', len(res) - 3) - ).format(*res[3:])] + [ + self._delim.join(['{:<12d}', '{:<12f}', '{:<12e}']).format( + *res[:3] + ), + self._delim.join(np.tile('{:<23.16e}', len(res) - 3)).format( + *res[3:] + ), + ] ) print(output_str, file=self.fid) return output_str @@ -2153,12 +2334,12 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): vinv_s = np.array(grain_params[6:]).flatten() vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) - if use_attr: # attribute version + if use_attr: # attribute version self.grain_grp.attrs.create('rmat_c', rmat_c) self.grain_grp.attrs.create('tvec_c', tvec_c) self.grain_grp.attrs.create('inv(V)_s', vinv_s) self.grain_grp.attrs.create('vmat_s', vmat_s) - else: # dataset version + else: # dataset version self.grain_grp.create_dataset('rmat_c', data=rmat_c) self.grain_grp.create_dataset('tvec_c', data=tvec_c) self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) @@ -2177,11 +2358,26 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): def close(self): self.fid.close() - def dump_patch(self, panel_id, - i_refl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, ome_centers, - xy_centers, ijs, frame_indices, - spot_data, pangs, pxy, mangs, mxy, gzip=1): + def dump_patch( + self, + panel_id, + i_refl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + ome_centers, + xy_centers, + ijs, + frame_indices, + spot_data, + pangs, + pxy, + mangs, + mxy, + gzip=1, + ): """ to be called inside loop over patches @@ -2197,10 +2393,10 @@ def dump_patch(self, panel_id, spot_grp.attrs.create('predicted_angles', pangs) spot_grp.attrs.create('predicted_xy', pxy) if mangs is None: - mangs = np.nan*np.ones(3) + mangs = np.nan * np.ones(3) spot_grp.attrs.create('measured_angles', mangs) if mxy is None: - mxy = np.nan*np.ones(3) + mxy = np.nan * np.ones(3) spot_grp.attrs.create('measured_xy', mxy) # get centers crds from edge arrays @@ -2219,27 +2415,55 @@ def dump_patch(self, panel_id, eta_crd = centers_of_edge_vec(eta_edges) shuffle_data = True # reduces size by 20% - spot_grp.create_dataset('tth_crd', data=tth_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('eta_crd', data=eta_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ome_crd', data=ome_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('xy_centers', data=xy_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ij_centers', data=ijs, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('frame_indices', data=fi, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('intensities', data=spot_data, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) + spot_grp.create_dataset( + 'tth_crd', + data=tth_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'eta_crd', + data=eta_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ome_crd', + data=ome_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'xy_centers', + data=xy_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ij_centers', + data=ijs, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'frame_indices', + data=fi, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'intensities', + data=spot_data, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) return @@ -2261,9 +2485,16 @@ class GenerateEtaOmeMaps(object): """ - def __init__(self, image_series_dict, instrument, plane_data, - active_hkls=None, eta_step=0.25, threshold=None, - ome_period=(0, 360)): + def __init__( + self, + image_series_dict, + instrument, + plane_data, + active_hkls=None, + eta_step=0.25, + threshold=None, + ome_period=(0, 360), + ): """ image_series must be OmegaImageSeries class instrument_params must be a dict (loaded from yaml spec) @@ -2277,13 +2508,12 @@ def __init__(self, image_series_dict, instrument, plane_data, # ???: change name of iHKLList? # ???: can we change the behavior of iHKLList? if active_hkls is None: - self._iHKLList = plane_data.getHKLID( - plane_data.hkls, master=True - ) + self._iHKLList = plane_data.getHKLID(plane_data.hkls, master=True) n_rings = len(self._iHKLList) else: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" self._iHKLList = active_hkls n_rings = len(active_hkls) @@ -2298,14 +2528,18 @@ def __init__(self, image_series_dict, instrument, plane_data, omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES delta_ome = omegas_array[0][-1] - omegas_array[0][0] frame_mask = None - ome_period = omegas_array[0, 0] + np.r_[0., 360.] # !!! be careful + ome_period = omegas_array[0, 0] + np.r_[0.0, 360.0] # !!! be careful if this_det_ims.omegawedges.nwedges > 1: - delta_omes = [(i['ostop'] - i['ostart'])/i['nsteps'] - for i in this_det_ims.omegawedges.wedges] - check_wedges = mutil.uniqueVectors(np.atleast_2d(delta_omes), - tol=1e-6).squeeze() - assert check_wedges.size == 1, \ - "all wedges must have the same delta omega to 1e-6" + delta_omes = [ + (i['ostop'] - i['ostart']) / i['nsteps'] + for i in this_det_ims.omegawedges.wedges + ] + check_wedges = mutil.uniqueVectors( + np.atleast_2d(delta_omes), tol=1e-6 + ).squeeze() + assert ( + check_wedges.size == 1 + ), "all wedges must have the same delta omega to 1e-6" # grab representative delta ome # !!! assuming positive delta consistent with OmegaImageSeries delta_ome = delta_omes[0] @@ -2321,9 +2555,9 @@ def __init__(self, image_series_dict, instrument, plane_data, ) # compute total nsteps # FIXME: need check for roundoff badness - nsteps = int((ostop - ostart)/delta_ome) + nsteps = int((ostop - ostart) / delta_ome) ome_edges_full = np.linspace( - ostart, ostop, num=nsteps+1, endpoint=True + ostart, ostop, num=nsteps + 1, endpoint=True ) omegas_array = np.vstack( [ome_edges_full[:-1], ome_edges_full[1:]] @@ -2334,15 +2568,21 @@ def __init__(self, image_series_dict, instrument, plane_data, # !!! this array has -1 outside a wedge # !!! again assuming the valid frame order increases monotonically frame_mask = np.array( - [this_det_ims.omega_to_frame(ome)[0] != -1 - for ome in ome_centers] + [ + this_det_ims.omega_to_frame(ome)[0] != -1 + for ome in ome_centers + ] ) # ???: need to pass a threshold? eta_mapping, etas = instrument.extract_polar_maps( - plane_data, image_series_dict, - active_hkls=active_hkls, threshold=threshold, - tth_tol=None, eta_tol=eta_step) + plane_data, + image_series_dict, + active_hkls=active_hkls, + threshold=threshold, + tth_tol=None, + eta_tol=eta_step, + ) # for convenience grab map shape from first map_shape = next(iter(eta_mapping.values())).shape[1:] @@ -2369,7 +2609,7 @@ def __init__(self, image_series_dict, instrument, plane_data, if frame_mask is not None: # !!! must expand row dimension to include # skipped omegas - tmp = np.ones((len(frame_mask), map_shape[1]))*np.nan + tmp = np.ones((len(frame_mask), map_shape[1])) * np.nan tmp[frame_mask, :] = full_map full_map = tmp data_store.append(full_map) @@ -2378,11 +2618,11 @@ def __init__(self, image_series_dict, instrument, plane_data, # set required attributes self._omegas = mapAngle( np.radians(np.average(omegas_array, axis=1)), - np.radians(ome_period) + np.radians(ome_period), ) self._omeEdges = mapAngle( np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), - np.radians(ome_period) + np.radians(ome_period), ) # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the @@ -2396,7 +2636,7 @@ def __init__(self, image_series_dict, instrument, plane_data, # WARNING: unlinke the omegas in imageseries metadata, # these are in RADIANS and represent bin centers self._etaEdges = etas - self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + self._etas = self._etaEdges[:-1] + 0.5 * np.radians(eta_step) @property def dataStore(self): @@ -2432,9 +2672,7 @@ def save(self, filename): def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): # mark pixels in the spec'd tth range - pixels_in_tthr = np.logical_and( - ptth >= tthr[0], ptth <= tthr[1] - ) + pixels_in_tthr = np.logical_and(ptth >= tthr[0], ptth <= tthr[1]) # catch case where ring isn't on detector if not np.any(pixels_in_tthr): @@ -2451,8 +2689,7 @@ def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): def run_fast_histogram(x, bins, weights=None): - return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), - weights=weights) + return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), weights=weights) def run_numpy_histogram(x, bins, weights=None): @@ -2470,7 +2707,7 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): if threshold is not None: # !!! NaNs get preserved image = np.array(image) - image[image < threshold] = 0. + image[image < threshold] = 0.0 for i_r, tthr in enumerate(tth_ranges): this_map = ring_maps[i_r] @@ -2487,12 +2724,21 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): this_map[i_row, bins_on_detector] = result[bins_on_detector] -def _extract_detector_line_positions(iter_args, plane_data, tth_tol, - eta_tol, eta_centers, npdiv, - collapse_tth, collapse_eta, - do_interpolation, do_fitting, - fitting_kwargs, tth_distortion, - max_workers): +def _extract_detector_line_positions( + iter_args, + plane_data, + tth_tol, + eta_tol, + eta_centers, + npdiv, + collapse_tth, + collapse_eta, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, + max_workers, +): panel, instr_cfg, images, pbp = iter_args if images.ndim == 2: @@ -2507,9 +2753,13 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, tth_distr_cls = tth_distortion[panel.name] pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( - plane_data, merge_hkls=True, - delta_tth=tth_tol, delta_eta=eta_tol, - eta_list=eta_centers, tth_distortion=tth_distr_cls) + plane_data, + merge_hkls=True, + delta_tth=tth_tol, + delta_eta=eta_tol, + eta_list=eta_centers, + tth_distortion=tth_distr_cls, + ) tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) @@ -2524,8 +2774,9 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, # ================================================================= # LOOP OVER RING SETS # ================================================================= - pbar_rings = partial(tqdm, total=len(pow_angs), desc="Ringset", - position=pbp) + pbar_rings = partial( + tqdm, total=len(pow_angs), desc="Ringset", position=pbp + ) kwargs = { 'instr_cfg': instr_cfg, @@ -2542,15 +2793,26 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, } func = partial(_extract_ring_line_positions, **kwargs) iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=max_workers) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=max_workers + ) as executor: return list(pbar_rings(executor.map(func, iter_arg))) -def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, - collapse_tth, collapse_eta, images, - do_interpolation, do_fitting, fitting_kwargs, - tth_distortion): +def _extract_ring_line_positions( + iter_args, + instr_cfg, + panel, + eta_tol, + npdiv, + collapse_tth, + collapse_eta, + images, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, +): """ Extracts data for a single Debye-Scherrer ring . @@ -2598,16 +2860,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) nan_mask = np.logical_or.reduce(nan_mask, 1) if angs.ndim > 1 and xys.ndim > 1: - angs = angs[nan_mask,:] - xys = xys[nan_mask, :] + angs = angs[nan_mask, :] + xys = xys[nan_mask, :] n_images = len(images) native_area = panel.pixel_area # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( - instr_cfg, angs, panel.angularPixelSize(xys), - tth_tol=tth_tol, eta_tol=eta_tol, npdiv=npdiv, quiet=True) + instr_cfg, + angs, + panel.angularPixelSize(xys), + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + quiet=True, + ) # loop over patches # FIXME: fix initialization @@ -2620,9 +2888,7 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch # need to reshape eval pts for interpolation - xy_eval = np.vstack([ - xys_eval[0].flatten(), - xys_eval[1].flatten()]).T + xy_eval = np.vstack([xys_eval[0].flatten(), xys_eval[1].flatten()]).T _, on_panel = panel.clip_to_panel(xy_eval) @@ -2630,25 +2896,20 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, continue if collapse_tth: - ang_data = (vtx_angs[0][0, [0, -1]], - vtx_angs[1][[0, -1], 0]) + ang_data = (vtx_angs[0][0, [0, -1]], vtx_angs[1][[0, -1], 0]) elif collapse_eta: # !!! yield the tth bin centers tth_centers = np.average( - np.vstack( - [vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]] - ), - axis=0 + np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]), axis=0 ) - ang_data = (tth_centers, - angs[i_p][-1]) + ang_data = (tth_centers, angs[i_p][-1]) if do_fitting: fit_data = [] else: ang_data = vtx_angs prows, pcols = areas.shape - area_fac = areas/float(native_area) + area_fac = areas / float(native_area) # interpolate if not collapse_tth: @@ -2657,19 +2918,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, # catch interpolation type image = images[j_p] if do_interpolation: - p_img = panel.interpolate_bilinear( + p_img = ( + panel.interpolate_bilinear( xy_eval, image, - ).reshape(prows, pcols)*area_fac + ).reshape(prows, pcols) + * area_fac + ) else: - p_img = image[ijs[0], ijs[1]]*area_fac + p_img = image[ijs[0], ijs[1]] * area_fac # catch flat spectrum data, which will cause # fitting to fail. # ???: best here, or make fitting handle it? mxval = np.max(p_img) mnval = np.min(p_img) - if mxval == 0 or (1. - mnval/mxval) < 0.01: + if mxval == 0 or (1.0 - mnval / mxval) < 0.01: continue # catch collapsing options @@ -2686,11 +2950,16 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, tmp = tth_distortion.apply( panel.angles_to_cart( np.vstack( - [np.radians(this_tth0), - np.tile(ang_data[-1], len(this_tth0))] + [ + np.radians(this_tth0), + np.tile( + ang_data[-1], len(this_tth0) + ), + ] ).T ), - return_nominal=True) + return_nominal=True, + ) pk_centers = np.degrees(tmp[:, 0]) else: pk_centers = this_tth0 diff --git a/hexrd/hedm/instrument/physics_package.py b/hexrd/hedm/instrument/physics_package.py index 7b77f5e10..e0af72b8f 100644 --- a/hexrd/hedm/instrument/physics_package.py +++ b/hexrd/hedm/instrument/physics_package.py @@ -43,22 +43,24 @@ class AbstractPhysicsPackage: Readout models for BaFBr0.85I0.15:Eu image plates Rev. Sci. Instrum. 89, 063101 (2018 """ + # Abstract methods that must be redefined in derived classes @property @abstractmethod def type(self): pass - def __init__(self, - sample_material=None, - sample_density=None, - sample_thickness=None, - pinhole_material=None, - pinhole_density=None, - pinhole_thickness=None, - pinhole_diameter=None, - **kwargs - ): + def __init__( + self, + sample_material=None, + sample_density=None, + sample_thickness=None, + pinhole_material=None, + pinhole_density=None, + pinhole_thickness=None, + pinhole_diameter=None, + **kwargs, + ): self._sample_material = sample_material self._sample_density = sample_density self._sample_thickness = sample_thickness @@ -164,20 +166,23 @@ def absorption_length(self, energy, flag): energy_inp = energy if flag.lower() == 'sample': - args = (self.sample_density, - self.sample_material, - energy_inp, - ) + args = ( + self.sample_density, + self.sample_material, + energy_inp, + ) elif flag.lower() == 'window': - args = (self.window_density, - self.window_material, - energy_inp, - ) + args = ( + self.window_density, + self.window_material, + energy_inp, + ) elif flag.lower() == 'pinhole': - args = (self.pinhole_density, - self.pinhole_material, - energy_inp, - ) + args = ( + self.pinhole_density, + self.pinhole_material, + energy_inp, + ) abs_length = calculate_linear_absorption_length(*args) if abs_length.shape[0] == 1: return abs_length[0] @@ -285,8 +290,10 @@ def sample_diameter(self): if self.sample_geometry == 'cylinder': return self._sample_thickness else: - msg = (f'sample geometry does not have diameter ' - f'associated with it.') + msg = ( + f'sample geometry does not have diameter ' + f'associated with it.' + ) print(msg) return diff --git a/hexrd/hedm/ipfcolor/colorspace.py b/hexrd/hedm/ipfcolor/colorspace.py index 34fa59408..b48a165c6 100644 --- a/hexrd/hedm/ipfcolor/colorspace.py +++ b/hexrd/hedm/ipfcolor/colorspace.py @@ -46,13 +46,15 @@ def hsl2rgb(hsl): different components ''' hsl = np.atleast_2d(hsl) - hsl[np.abs(hsl) < eps] = 0. - hsl[np.abs(hsl - np.ones(hsl.shape)) < eps] = 1. + hsl[np.abs(hsl) < eps] = 0.0 + hsl[np.abs(hsl - np.ones(hsl.shape)) < eps] = 1.0 - if( (hsl.min() < 0.) or (hsl.max() > 1.)): - raise RuntimeError("value of not in range [0,1]. normalizing before conversion") + if (hsl.min() < 0.0) or (hsl.max() > 1.0): + raise RuntimeError( + "value of not in range [0,1]. normalizing before conversion" + ) - if(hsl.ndim != 2): + if hsl.ndim != 2: raise RuntimeError("hsl_rgb: shape of hsl array is invalid.") rgb = np.zeros(hsl.shape) @@ -63,19 +65,19 @@ def hsl2rgb(hsl): S = hsl[:, 1] L = hsl[:, 2] - C = (1.0 - np.abs(2.*L - 1.)) * S - X = (1.0 - np.abs(np.mod(6*H, 2) - 1.0)) * C - m = L - C/2. + C = (1.0 - np.abs(2.0 * L - 1.0)) * S + X = (1.0 - np.abs(np.mod(6 * H, 2) - 1.0)) * C + m = L - C / 2.0 - case = np.floor(6.*H).astype(np.int32) + case = np.floor(6.0 * H).astype(np.int32) ''' depending on the range of H, the rgb definition changes. see https://www.rapidtables.com/convert/color/hsl-to-rgb.html for the detailed formula ''' - Cp = np.atleast_2d(C+m).T - Xp = np.atleast_2d(X+m).T + Cp = np.atleast_2d(C + m).T + Xp = np.atleast_2d(X + m).T Zp = np.atleast_2d(m).T mask = np.logical_or((case == 0), (case == 6)) @@ -99,8 +101,8 @@ def hsl2rgb(hsl): ''' catch all cases where rgb values are out of [0,1] bounds ''' - rgb[rgb < 0.] = 0. - rgb[rgb > 1.] = 1. + rgb[rgb < 0.0] = 0.0 + rgb[rgb > 1.0] = 1.0 return rgb @@ -117,7 +119,7 @@ def rgb2hsl(rgb): different components ''' rgb = np.atleast_2d(rgb) - if(rgb.ndim != 2): + if rgb.ndim != 2: raise RuntimeError("hsl_rgb: shape of hsl array is invalid.") hsl = np.zeros(rgb.shape) @@ -139,27 +141,31 @@ def rgb2hsl(rgb): rmask = rgb[:, 0] == Cmax rmask = np.logical_and(rmask, np.logical_not(zmask)) - hsl[rmask, 0] = np.mod( - (rgb[rmask, 1] - rgb[rmask, 2])/delta[rmask], 6) / 6. + hsl[rmask, 0] = ( + np.mod((rgb[rmask, 1] - rgb[rmask, 2]) / delta[rmask], 6) / 6.0 + ) gmask = rgb[:, 1] == Cmax gmask = np.logical_and(gmask, np.logical_not(zmask)) - hsl[gmask, 0] = np.mod( - (rgb[gmask, 2] - rgb[gmask, 0])/delta[gmask] + 2., 6) / 6. + hsl[gmask, 0] = ( + np.mod((rgb[gmask, 2] - rgb[gmask, 0]) / delta[gmask] + 2.0, 6) / 6.0 + ) bmask = rgb[:, 2] == Cmax bmask = np.logical_and(bmask, np.logical_not(zmask)) - hsl[bmask, 0] = np.mod( - (rgb[bmask, 0] - rgb[bmask, 1])/delta[bmask] + 4., 6) / 6. + hsl[bmask, 0] = ( + np.mod((rgb[bmask, 0] - rgb[bmask, 1]) / delta[bmask] + 4.0, 6) / 6.0 + ) - hsl[np.logical_not(zmask), 1] = delta[np.logical_not( - zmask)] / (1. - np.abs(2 * L[np.logical_not(zmask)] - 1.)) + hsl[np.logical_not(zmask), 1] = delta[np.logical_not(zmask)] / ( + 1.0 - np.abs(2 * L[np.logical_not(zmask)] - 1.0) + ) - hsl[:,2] = L + hsl[:, 2] = L ''' catch cases where hsl is out of [0,1] bounds ''' - hsl[hsl < 0.] = 0. - hsl[hsl > 1.] = 1. + hsl[hsl < 0.0] = 0.0 + hsl[hsl > 1.0] = 1.0 return hsl diff --git a/hexrd/hedm/ipfcolor/sphere_sector.py b/hexrd/hedm/ipfcolor/sphere_sector.py index 99d18e938..c1ab718e8 100644 --- a/hexrd/hedm/ipfcolor/sphere_sector.py +++ b/hexrd/hedm/ipfcolor/sphere_sector.py @@ -62,6 +62,7 @@ there are no triangles for the triclininc cases and needs to be handles differently ''' +# fmt: off pg2vertex = { 'c1': [3, np.array([[0., 0., 1.], [1., 0., 0.], @@ -281,6 +282,7 @@ np.atleast_2d(np.array([0, 1, 2])).T, 'upper'] } +# fmt: on class sector: @@ -290,7 +292,7 @@ class sector: @DETAIL this class is used to store spherical patch for a given point group. the class also has methods to compute the color of a direction by computing the hue, saturation and lightness values in [0,1]. these - values can be converted to rgb for display with the well known + values can be converted to rgb for display with the well known conversion formula. @@ -306,7 +308,7 @@ def __init__(self, pgsym, lauesym, supergroupsym, supergrouplauesym): 11/12/2020 SS 1.1 added lauesym as additional input parameter 11/23/2020 SS 1.2 added supergroupsym as additional parameter - @detail: this routine initializes the data needed for reducing a + @detail: this routine initializes the data needed for reducing a direction to the stereographic fundamental zone (standard stereographic triangle) for the pointgroup/lauegroup symmetry of the crystal. @@ -341,37 +343,37 @@ def __init__(self, pgsym, lauesym, supergroupsym, supergrouplauesym): self.connectivity['superlaue'] = data[2] self.hemisphere['superlaue'] = data[3] - if(self.ntriangle['pg'] != 0): + if self.ntriangle['pg'] != 0: # compute the barycenter or the centroid of point group b = np.mean(self.vertices['pg'], axis=1) - b = b/np.linalg.norm(b) + b = b / np.linalg.norm(b) self.barycenter['pg'] = b else: - self.barycenter['pg'] = np.array([0., 0., 1.]) + self.barycenter['pg'] = np.array([0.0, 0.0, 1.0]) - if(self.ntriangle['laue'] != 0): + if self.ntriangle['laue'] != 0: # compute the barycenter or the centroid of the laue group triangle b = np.mean(self.vertices['laue'], axis=1) - b = b/np.linalg.norm(b) + b = b / np.linalg.norm(b) self.barycenter['laue'] = b else: - self.barycenter['laue'] = np.array([0., 0., 1.]) + self.barycenter['laue'] = np.array([0.0, 0.0, 1.0]) - if(self.ntriangle['super'] != 0): + if self.ntriangle['super'] != 0: # compute the barycenter or the centroid of the supergroup group triangle b = np.mean(self.vertices['super'], axis=1) - b = b/np.linalg.norm(b) + b = b / np.linalg.norm(b) self.barycenter['super'] = b else: - self.barycenter['super'] = np.array([0., 0., 1.]) + self.barycenter['super'] = np.array([0.0, 0.0, 1.0]) - if(self.ntriangle['superlaue'] != 0): + if self.ntriangle['superlaue'] != 0: # compute the barycenter or the centroid of the supergroup group triangle b = np.mean(self.vertices['superlaue'], axis=1) - b = b/np.linalg.norm(b) + b = b / np.linalg.norm(b) self.barycenter['superlaue'] = b else: - self.barycenter['superlaue'] = np.array([0., 0., 1.]) + self.barycenter['superlaue'] = np.array([0.0, 0.0, 1.0]) def check_norm(self, dir3): ''' @@ -384,43 +386,49 @@ def check_norm(self, dir3): n = np.linalg.norm(dir3, axis=1) mask = n > eps n = n[mask] - dir3[mask, :] = dir3[mask, :]/np.tile(n, [3, 1]).T + dir3[mask, :] = dir3[mask, :] / np.tile(n, [3, 1]).T def check_hemisphere(self): zcoord = np.array([self.vx[2], self.vy[2], self.vz[2]]) - if(np.logical_or(np.all(zcoord >= 0.), np.all(zcoord <= 0.))): + if np.logical_or(np.all(zcoord >= 0.0), np.all(zcoord <= 0.0)): pass else: - raise RuntimeError("sphere_sector: the vertices of the stereographic \ - triangle are not in the same hemisphere") + raise RuntimeError( + "sphere_sector: the vertices of the stereographic \ + triangle are not in the same hemisphere" + ) def inside_sphericalpatch(self, vertex, dir3): ''' - @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - @DATE 12/09/2020 SS 1.0 original - @PARAM vertex vertices of the spherical triangle - dir3 normalized direction vectors - switch which group to check. acceptable arguments are 'pg', 'laue', 'supergroup' - and 'supergroup_laue' - @DETAIL check if direction is inside a spherical patch - the logic used as follows: - if determinant of [x A B], [x B C] and [x C A] are - all same sign, then the sphere is inside the traingle - formed by A, B and C - returns a mask with inside as True and outside as False + @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + @DATE 12/09/2020 SS 1.0 original + @PARAM vertex vertices of the spherical triangle + dir3 normalized direction vectors + switch which group to check. acceptable arguments are 'pg', 'laue', 'supergroup' + and 'supergroup_laue' + @DETAIL check if direction is inside a spherical patch + the logic used as follows: + if determinant of [x A B], [x B C] and [x C A] are + all same sign, then the sphere is inside the traingle + formed by A, B and C + returns a mask with inside as True and outside as False ''' nn = vertex.shape[1] mask = [] - d = np.zeros([nn, ]) + d = np.zeros( + [ + nn, + ] + ) for x in dir3: x2 = np.atleast_2d(x).T for ii in range(nn): A = np.atleast_2d(vertex[:, np.mod(ii, nn)]).T - B = np.atleast_2d(vertex[:, np.mod(ii+1, nn)]).T + B = np.atleast_2d(vertex[:, np.mod(ii + 1, nn)]).T d[ii] = np.linalg.det(np.hstack((x2, A, B))) ''' @@ -428,11 +436,11 @@ def inside_sphericalpatch(self, vertex, dir3): determinant can be very small positive or negative number ''' - if(np.abs(d[ii]) < eps): - d[ii] = 0. + if np.abs(d[ii]) < eps: + d[ii] = 0.0 ss = np.unique(np.sign(d)) - if(np.all(ss >= 0.)): + if np.all(ss >= 0.0): mask.append(True) else: mask.append(False) @@ -455,7 +463,7 @@ def fillet_region(self, dir3, switch): returns 1 if its barycenter, vertex 1 and vertex 2 returns 2 if its barycenter, vertex 2 and vertex 3 - it is implicitly assumed that the point lies inside the + it is implicitly assumed that the point lies inside the spherical triangle. behavior is unknown if it is not the case @@ -463,40 +471,45 @@ def fillet_region(self, dir3, switch): ''' vertex = np.copy(self.vertices[switch]) - fregion = -np.ones([dir3.shape[0], ]).astype(np.int32) + fregion = -np.ones( + [ + dir3.shape[0], + ] + ).astype(np.int32) bar_cen = self.barycenter[switch] # if barycenter matches one of the vertices, then remove that vertex - mask = np.all(bar_cen == vertex.T,axis=1) - vertex = vertex[:,~mask] + mask = np.all(bar_cen == vertex.T, axis=1) + vertex = vertex[:, ~mask] nn = vertex.shape[1] f = np.zeros([nn, 3, 3]) for i in range(nn): idx1 = np.mod(i, nn) - idx2 = np.mod(i+1, nn) + idx2 = np.mod(i + 1, nn) A = np.atleast_2d(vertex[:, idx1]).T B = np.atleast_2d(vertex[:, idx2]).T f[i, :, :] = np.hstack((np.atleast_2d(bar_cen).T, A, B)) for i in range(nn): - inside = np.logical_and(self.inside_sphericalpatch( - np.squeeze(f[i, :, :]), dir3), - fregion == -1) + inside = np.logical_and( + self.inside_sphericalpatch(np.squeeze(f[i, :, :]), dir3), + fregion == -1, + ) fregion[inside] = i return fregion def point_on_boundary(self, dir3, switch): ''' - @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - @DATE 12/09/2020 SS 1.0 original - @PARAM dir3 direction in fundamental sector. size is nx3 - switch color using pg or laue group - @DETAIL this function figures out the equivalent point on the boundary - given that the point is inside the spherical triangle + @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + @DATE 12/09/2020 SS 1.0 original + @PARAM dir3 direction in fundamental sector. size is nx3 + switch color using pg or laue group + @DETAIL this function figures out the equivalent point on the boundary + given that the point is inside the spherical triangle ''' vertex = self.vertices[switch] fregion = self.fillet_region(dir3, switch) @@ -509,18 +522,18 @@ def point_on_boundary(self, dir3, switch): d = dir3[i, :] A = vertex[:, np.mod(f, nn)] - B = vertex[:, np.mod(f+1, nn)] + B = vertex[:, np.mod(f + 1, nn)] nhat = np.cross(B, A) - nhat = nhat/np.linalg.norm(nhat) + nhat = nhat / np.linalg.norm(nhat) lam = np.dot(nhat, d) - deldir = lam*nhat + deldir = lam * nhat dp = d - deldir ndp = np.linalg.norm(dp) - if(ndp > 0.): - dp = dp/ndp + if ndp > 0.0: + dp = dp / ndp else: dp = d @@ -535,16 +548,20 @@ def calculate_rho(self, dir3, switch): @PARAM dir3 direction in fundamental sector. size is nx3 switch color using pg or laue group @DETAIL this function is used to calculate the azimuthal angle - of a bunch of directions. it is assumed all directions + of a bunch of directions. it is assumed all directions are indide the SST ''' vertex = self.vertices[switch] bar_cen = self.barycenter[switch] - rho = np.zeros([dir3.shape[0], ]) + rho = np.zeros( + [ + dir3.shape[0], + ] + ) # handle triclinic and monoclinic cases a little differently - if(np.all(bar_cen == np.array([0., 0., 1.]))): - rho = np.arctan2(dir3[:,1], dir3[:,0]) + np.pi + if np.all(bar_cen == np.array([0.0, 0.0, 1.0])): + rho = np.arctan2(dir3[:, 1], dir3[:, 0]) + np.pi else: dir3_b, fregion = self.point_on_boundary(dir3, switch) @@ -555,25 +572,27 @@ def calculate_rho(self, dir3, switch): d = dir3_b[i, :] A = vertex[:, np.mod(f, nn)] - B = vertex[:, np.mod(f+1, nn)] + B = vertex[:, np.mod(f + 1, nn)] # angle between A and B omega = np.dot(A, B) - if(np.abs(omega) > 1.): + if np.abs(omega) > 1.0: omega = np.sign(omega) # angle between point and A omegap = np.dot(A, d) - if(np.abs(omegap) > 1.): + if np.abs(omegap) > 1.0: omegap = np.sign(omega) omega = np.arccos(omega) omegap = np.arccos(omegap) - if(omegap != 0.): - rho[i] = 2*np.pi*omegap/omega/nn + f*2.*np.pi/nn + if omegap != 0.0: + rho[i] = ( + 2 * np.pi * omegap / omega / nn + f * 2.0 * np.pi / nn + ) else: - rho[i] = f*2.*np.pi/nn + rho[i] = f * 2.0 * np.pi / nn return rho @@ -583,26 +602,30 @@ def calculate_theta(self, dir3, switch): @DATE 12/09/2020 SS 1.0 original @PARAM dir3 direction in fundamental sector. size is nx3 switch color using pg or laue group - @DETAIL this function is used to calculate the polar angle + @DETAIL this function is used to calculate the polar angle of direction vectors. it is assumed that the direction vector lies inside the SST ''' vertex = self.vertices[switch] dir3_b, fregion = self.point_on_boundary(dir3, switch) - theta = np.zeros([dir3.shape[0], ]) + theta = np.zeros( + [ + dir3.shape[0], + ] + ) bar_cen = self.barycenter[switch] # handle triclinic and monoclinic cases a little differently - if(np.all(bar_cen == np.array([0., 0., 1.]))): - dp = np.dot(np.array([0., 0., 1.]), dir3.T) + if np.all(bar_cen == np.array([0.0, 0.0, 1.0])): + dp = np.dot(np.array([0.0, 0.0, 1.0]), dir3.T) # catch some cases where dot product is 1+/-epsilon - mask = np.abs(dp) > 1. + mask = np.abs(dp) > 1.0 dp[mask] = np.sign(dp[mask]) theta = np.arccos(dp) else: - # first calculate the angle the point makes with the barycenter + # first calculate the angle the point makes with the barycenter omega = np.dot(bar_cen, dir3.T) mask = np.abs(omega) > 1.0 omega[mask] = np.sign(omega[mask]) @@ -615,25 +638,28 @@ def calculate_theta(self, dir3, switch): omega = np.arccos(omega) omegap = np.arccos(omegap) - zmask = omegap == 0. + zmask = omegap == 0.0 - theta[~zmask] = np.pi*omega[~zmask]/omegap[~zmask]/2.0 + theta[~zmask] = np.pi * omega[~zmask] / omegap[~zmask] / 2.0 theta[zmask] = 0.0 return theta def hue_speed(self, rho): ''' - @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - @DATE 12/09/2020 SS 1.0 original - @PARAM rho azimuthal angle - @DETAIL calculate the hue speed for a vector of azimuthal angles - this is utilized in increasing the area of the red, blue and - green regions + @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + @DATE 12/09/2020 SS 1.0 original + @PARAM rho azimuthal angle + @DETAIL calculate the hue speed for a vector of azimuthal angles + this is utilized in increasing the area of the red, blue and + green regions ''' rho = rho - np.pi - v = 0.5 + np.exp(-(4./7.)*rho**2) + \ - np.exp(-(4./7.)*(rho - 2.*np.pi/3.)**2) + \ - np.exp(-(4./7.)*(rho + 2.*np.pi/3.)**2) + v = ( + 0.5 + + np.exp(-(4.0 / 7.0) * rho**2) + + np.exp(-(4.0 / 7.0) * (rho - 2.0 * np.pi / 3.0) ** 2) + + np.exp(-(4.0 / 7.0) * (rho + 2.0 * np.pi / 3.0) ** 2) + ) return v @@ -660,16 +686,16 @@ def calc_hue(self, dir3, switch): ''' rho = self.calculate_rho(dir3, switch) - r = np.linspace(0., 2*np.pi, 1000) + r = np.linspace(0.0, 2 * np.pi, 1000) v = self.hue_speed(r) cons = np.trapz(v, r) h = np.zeros(rho.shape) for i in range(rho.shape[0]): - r = np.linspace(0., rho[i], 1000) + r = np.linspace(0.0, rho[i], 1000) v = self.hue_speed(r) - h[i] = np.trapz(v, r)/cons + h[i] = np.trapz(v, r) / cons return h @@ -689,7 +715,7 @@ def calc_saturation(self, l): @DETAIL calculate saturation. this is always set to 1. ''' - s = 1. - 2.*0.25*np.abs(l - 0.5) + s = 1.0 - 2.0 * 0.25 * np.abs(l - 0.5) return s def calc_lightness(self, dir3, mask, switch): @@ -714,10 +740,10 @@ def calc_lightness(self, dir3, mask, switch): ''' theta = np.pi - self.calculate_theta(dir3, switch) - f1 = theta/np.pi - f2 = np.sin(theta/2.)**2 - l = 0.35*f1 + 0.65*f2 - l[~mask] = 1. - l[~mask] + f1 = theta / np.pi + f2 = np.sin(theta / 2.0) ** 2 + l = 0.35 * f1 + 0.65 * f2 + l[~mask] = 1.0 - l[~mask] return l @@ -729,7 +755,7 @@ def get_color(self, dir3, mask, switch): 11/23/2020 SS 1.2 added mask argument which tell the directions for which the supergroup reductions dont match the point or laue group reductions. mask has size dir3.shape[0] - + @PARAM dir3 direction in fundamental sector. behavior is undefined if mask True if symmetry reduction of dir3 using point group does not match the super group and False otherwise diff --git a/hexrd/hedm/material/crystallography.py b/hexrd/hedm/material/crystallography.py index 48eb857e8..2d2dce52f 100644 --- a/hexrd/hedm/material/crystallography.py +++ b/hexrd/hedm/material/crystallography.py @@ -39,7 +39,13 @@ from hexrd.core.deprecation import deprecated from hexrd.core import constants from hexrd.core.matrixutil import unitVector -from hexrd.core.rotations import rotMatOfExpMap, mapAngle, applySym, ltypeOfLaueGroup, quatOfLaueGroup +from hexrd.core.rotations import ( + rotMatOfExpMap, + mapAngle, + applySym, + ltypeOfLaueGroup, + quatOfLaueGroup, +) from hexrd.core.transforms import xfcapi from hexrd.core import valunits from hexrd.core.valunits import toFloat @@ -160,6 +166,7 @@ def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' ).getVal(dUnit) + def latticeParameters(lvec): """ Generates direct and reciprocal lattice vector components in a @@ -187,6 +194,7 @@ def latticeParameters(lvec): return [a, b, c, alfa, beta, gama] + def latticePlanes( hkls: np.ndarray, lparms: np.ndarray, @@ -563,6 +571,7 @@ def latticeVectors( 'rparms': rparms, } + def hexagonalIndicesFromRhombohedral(hkl): """ converts rhombohedral hkl to hexagonal indices @@ -910,7 +919,7 @@ def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: elif len(exclusions.shape) == 2: # treat exclusions as ranges of indices for r in exclusions: - excl[self.tThSort[r[0]:r[1]]] = True + excl[self.tThSort[r[0] : r[1]]] = True else: raise RuntimeError( f'Unclear behavior for shape {exclusions.shape}' @@ -1845,8 +1854,10 @@ def get_exclusions(self): def set_exclusions(self, exclusions): self.exclusions = exclusions - @deprecated(new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", - removal_date="2025-08-01") + @deprecated( + new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", + removal_date="2025-08-01", + ) def getLatticeType(self): return ltypeOfLaueGroup(self.laueGroup) diff --git a/hexrd/hedm/material/unitcell.py b/hexrd/hedm/material/unitcell.py index 3b7c1e594..117f5b0cf 100644 --- a/hexrd/hedm/material/unitcell.py +++ b/hexrd/hedm/material/unitcell.py @@ -39,7 +39,7 @@ def _calcstar(v, sym, mat): for vec in vsym: vv = vp - vec dist = _calclength(vv, mat) - if dist < 1E-3: + if dist < 1e-3: isnew = False break if isnew: @@ -50,7 +50,6 @@ def _calcstar(v, sym, mat): class unitcell: - ''' >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov >> @DATE: 10/09/2018 SS 1.0 original @@ -61,11 +60,18 @@ class unitcell: # initialize the unitcell class # need lattice parameters and space group data from HDF5 file - def __init__(self, lp, sgnum, - atomtypes, charge, - atominfo, - U, dmin, beamenergy, - sgsetting=0): + def __init__( + self, + lp, + sgnum, + atomtypes, + charge, + atominfo, + U, + dmin, + beamenergy, + sgsetting=0, + ): self._tstart = time.time() self.pref = 0.4178214 @@ -115,10 +121,12 @@ def GetPgLg(self): def CalcWavelength(self): # wavelength in nm - self.wavelength = constants.cPlanck * \ - constants.cLight / \ - constants.cCharge / \ - self.voltage + self.wavelength = ( + constants.cPlanck + * constants.cLight + / constants.cCharge + / self.voltage + ) self.wavelength *= 1e9 def calcBetaij(self): @@ -126,11 +134,11 @@ def calcBetaij(self): self.betaij = np.zeros([3, 3, self.atom_ntype]) for i in range(self.U.shape[0]): U = self.U[i, :] - self.betaij[:, :, i] = np.array([[U[0], U[3], U[4]], - [U[3], U[1], U[5]], - [U[4], U[5], U[2]]]) + self.betaij[:, :, i] = np.array( + [[U[0], U[3], U[4]], [U[3], U[1], U[5]], [U[4], U[5], U[2]]] + ) - self.betaij[:, :, i] *= 2. * np.pi**2 * self._aij + self.betaij[:, :, i] *= 2.0 * np.pi**2 * self._aij def calcmatrices(self): @@ -153,9 +161,13 @@ def calcmatrices(self): ''' direct metric tensor ''' - self._dmt = np.array([[a**2, a*b*cg, a*c*cb], - [a*b*cg, b**2, b*c*ca], - [a*c*cb, b*c*ca, c**2]]) + self._dmt = np.array( + [ + [a**2, a * b * cg, a * c * cb], + [a * b * cg, b**2, b * c * ca], + [a * c * cb, b * c * ca, c**2], + ] + ) self._vol = np.sqrt(np.linalg.det(self.dmt)) if self.vol < 1e-5: @@ -169,30 +181,44 @@ def calcmatrices(self): ''' direct structure matrix ''' - self._dsm = np.array([[a, b*cg, c*cb], - [0., b*sg, -c*(cb*cg - ca)/sg], - [0., 0., self.vol/(a*b*sg)]]) + self._dsm = np.array( + [ + [a, b * cg, c * cb], + [0.0, b * sg, -c * (cb * cg - ca) / sg], + [0.0, 0.0, self.vol / (a * b * sg)], + ] + ) - self._dsm[np.abs(self._dsm) < eps] = 0. + self._dsm[np.abs(self._dsm) < eps] = 0.0 ''' reciprocal structure matrix ''' - self._rsm = np.array([[1./a, 0., 0.], - [-1./(a*tg), 1./(b*sg), 0.], - [b*c*(cg*ca - cb)/(self.vol*sg), - a*c*(cb*cg - ca)/(self.vol*sg), - a*b*sg/self.vol]]) + self._rsm = np.array( + [ + [1.0 / a, 0.0, 0.0], + [-1.0 / (a * tg), 1.0 / (b * sg), 0.0], + [ + b * c * (cg * ca - cb) / (self.vol * sg), + a * c * (cb * cg - ca) / (self.vol * sg), + a * b * sg / self.vol, + ], + ] + ) - self._rsm[np.abs(self._rsm) < eps] = 0. + self._rsm[np.abs(self._rsm) < eps] = 0.0 ast = self.CalcLength([1, 0, 0], 'r') bst = self.CalcLength([0, 1, 0], 'r') cst = self.CalcLength([0, 0, 1], 'r') - self._aij = np.array([[ast**2, ast*bst, ast*cst], - [bst*ast, bst**2, bst*cst], - [cst*ast, cst*bst, cst**2]]) + self._aij = np.array( + [ + [ast**2, ast * bst, ast * cst], + [bst * ast, bst**2, bst * cst], + [cst * ast, cst * bst, cst**2], + ] + ) ''' transform between any crystal space to any other space. choices are 'd' (direct), 'r' (reciprocal) and 'c' (cartesian)''' @@ -207,7 +233,8 @@ def TransSpace(self, v_in, inspace, outspace): v_out = np.dot(self.dsm, v_in) else: raise ValueError( - 'inspace in "d" but outspace can\'t be identified') + 'inspace in "d" but outspace can\'t be identified' + ) elif inspace == 'r': if outspace == 'd': @@ -216,7 +243,8 @@ def TransSpace(self, v_in, inspace, outspace): v_out = np.dot(self.rsm, v_in) else: raise ValueError( - 'inspace in "r" but outspace can\'t be identified') + 'inspace in "r" but outspace can\'t be identified' + ) elif inspace == 'c': if outspace == 'r': @@ -225,7 +253,8 @@ def TransSpace(self, v_in, inspace, outspace): v_out = np.dot(v_in, self.rsm) else: raise ValueError( - 'inspace in "c" but outspace can\'t be identified') + 'inspace in "c" but outspace can\'t be identified' + ) else: raise ValueError('incorrect inspace argument') @@ -268,7 +297,7 @@ def CalcLength(self, u, space): def NormVec(self, u, space): ulen = self.CalcLength(u, space) - return u/ulen + return u / ulen ''' calculate angle between two vectors in any space''' @@ -277,7 +306,7 @@ def CalcAngle(self, u, v, space): ulen = self.CalcLength(u, space) vlen = self.CalcLength(v, space) - dot = self.CalcDot(u, v, space)/ulen/vlen + dot = self.CalcDot(u, v, space) / ulen / vlen if np.isclose(np.abs(dot), 1.0): dot = np.sign(dot) angle = np.arccos(dot) @@ -304,9 +333,13 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): else: vol = 1.0 - pxq = np.array([p[1]*q[2]-p[2]*q[1], - p[2]*q[0]-p[0]*q[2], - p[0]*q[1]-p[1]*q[0]]) + pxq = np.array( + [ + p[1] * q[2] - p[2] * q[1], + p[2] * q[0] - p[0] * q[2], + p[0] * q[1] - p[1] * q[0], + ] + ) if inspace == 'd': ''' @@ -323,7 +356,8 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): pxq = self.TransSpace(pxq, 'r', 'c') else: raise ValueError( - 'inspace is ''d'' but outspace is unidentified') + 'inspace is ' 'd' ' but outspace is unidentified' + ) elif inspace == 'r': ''' @@ -339,7 +373,8 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): pxq = self.TransSpace(pxq, 'd', 'c') else: raise ValueError( - 'inspace is ''r'' but outspace is unidentified') + 'inspace is ' 'r' ' but outspace is unidentified' + ) elif inspace == 'c': ''' @@ -355,7 +390,8 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): pass else: raise ValueError( - 'inspace is ''c'' but outspace is unidentified') + 'inspace is ' 'c' ' but outspace is unidentified' + ) else: raise ValueError('inspace is unidentified') @@ -398,16 +434,17 @@ def GenerateCartesianPGSym(self): self.SYM_PG_c.append(np.dot(self.dsm, np.dot(sop, self.rsm.T))) self.SYM_PG_c = np.array(self.SYM_PG_c) - self.SYM_PG_c[np.abs(self.SYM_PG_c) < eps] = 0. + self.SYM_PG_c[np.abs(self.SYM_PG_c) < eps] = 0.0 if self._pointGroup == self._laueGroup: self.SYM_PG_c_laue = self.SYM_PG_c else: for sop in self.SYM_PG_d_laue: self.SYM_PG_c_laue.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T))) + np.dot(self.dsm, np.dot(sop, self.rsm.T)) + ) self.SYM_PG_c_laue = np.array(self.SYM_PG_c_laue) - self.SYM_PG_c_laue[np.abs(self.SYM_PG_c_laue) < eps] = 0. + self.SYM_PG_c_laue[np.abs(self.SYM_PG_c_laue) < eps] = 0.0 ''' use the point group symmetry of the supergroup @@ -440,18 +477,21 @@ def GenerateCartesianPGSym(self): for sop in sym_supergroup: self.SYM_PG_supergroup.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T))) + np.dot(self.dsm, np.dot(sop, self.rsm.T)) + ) self.SYM_PG_supergroup = np.array(self.SYM_PG_supergroup) - self.SYM_PG_supergroup[np.abs(self.SYM_PG_supergroup) < eps] = 0. + self.SYM_PG_supergroup[np.abs(self.SYM_PG_supergroup) < eps] = 0.0 for sop in sym_supergroup_laue: self.SYM_PG_supergroup_laue.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T))) + np.dot(self.dsm, np.dot(sop, self.rsm.T)) + ) self.SYM_PG_supergroup_laue = np.array(self.SYM_PG_supergroup_laue) - self.SYM_PG_supergroup_laue[np.abs( - self.SYM_PG_supergroup_laue) < eps] = 0. + self.SYM_PG_supergroup_laue[ + np.abs(self.SYM_PG_supergroup_laue) < eps + ] = 0.0 ''' the standard setting for the monoclinic system has the b-axis aligned @@ -465,7 +505,7 @@ def GenerateCartesianPGSym(self): ''' if self.latticeType == 'monoclinic': - om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]]) + om = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]]) for i, s in enumerate(self.SYM_PG_c): ss = np.dot(om, np.dot(s, om.T)) @@ -482,7 +522,7 @@ def GenerateCartesianPGSym(self): SS 12/10/2020 ''' if self._pointGroup == 'c1': - om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]]) + om = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]]) for i, s in enumerate(self.SYM_PG_supergroup): ss = np.dot(om, np.dot(s, om.T)) @@ -511,7 +551,7 @@ def CalcOrbit(self, v, reduceToUC=True): raise RuntimeError("fractional coordinate in not 3-d") r = v # using wigner-sietz notation - r = np.hstack((r, 1.)) + r = np.hstack((r, 1.0)) asym_pos = np.broadcast_to(r[0:3], [1, 3]) @@ -524,15 +564,15 @@ def CalcOrbit(self, v, reduceToUC=True): # reduce to fundamental unitcell with fractional # coordinates between 0-1 rr = np.modf(rr)[0] - rr[rr < 0.] += 1. - rr[np.abs(rr) < 1.0E-6] = 0. + rr[rr < 0.0] += 1.0 + rr[np.abs(rr) < 1.0e-6] = 0.0 # check if this is new isnew = True for j in range(n): v = rr - asym_pos[j] dist = self.CalcLength(v, 'd') - if dist < 1E-3: + if dist < 1e-3: isnew = False break @@ -593,9 +633,7 @@ def CalcPositions(self): self.numat = np.array(numat) self.asym_pos = asym_pos - def remove_duplicate_atoms(self, - atom_pos=None, - tol=1e-3): + def remove_duplicate_atoms(self, atom_pos=None, tol=1e-3): """ @date 03/04/2021 SS 1.0 original @@ -632,12 +670,11 @@ def remove_duplicate_atoms(self, idx.append(i) else: isclose = False - for j, uniqpos in enumerate(atom_pos_fixed): + for j, uniqpos in enumerate(atom_pos_fixed): pos2 = uniqpos[0:3] occ2 = uniqpos[3] # cases with fractional occupancy on same site - if (np.all(np.isclose(pos, pos2)) and - (occ+occ2 <= 1.)): + if np.all(np.isclose(pos, pos2)) and (occ + occ2 <= 1.0): atom_pos_fixed.append(np.hstack([pos, occ])) idx.append(i) isclose = True @@ -651,7 +688,7 @@ def remove_duplicate_atoms(self, for vvv in vv: # check if distance less than tol # the factor of 10 is for A --> nm - if self.CalcLength(vvv, 'd') < tol/10.: + if self.CalcLength(vvv, 'd') < tol / 10.0: # if true then its a repeated atom isclose = True break @@ -706,11 +743,11 @@ def CalcDensity(self): occ = self.atom_pos[i, 3] # -1 due to 0 indexing in python - self.avA += numat * constants.atom_weights[atype-1] * occ + self.avA += numat * constants.atom_weights[atype - 1] * occ self.avZ += numat * atype - self.density = self.avA / (self.vol * 1.0E-21 * constants.cAvogadro) + self.density = self.avA / (self.vol * 1.0e-21 * constants.cAvogadro) av_natom = np.dot(self.numat, self.atom_pos[:, 3]) @@ -732,19 +769,25 @@ def init_max_g_index(self): def CalcMaxGIndex(self): self.init_max_g_index() - while (1.0 / self.CalcLength( - np.array([self.ih, 0, 0], - dtype=np.float64), 'r') > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([self.ih, 0, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ih = self.ih + 1 - while (1.0 / self.CalcLength( - np.array([0, self.ik, 0], - dtype=np.float64), 'r') > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([0, self.ik, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ik = self.ik + 1 - while (1.0 / self.CalcLength( - np.array([0, 0, self.il], - dtype=np.float64), 'r') > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([0, 0, self.il], dtype=np.float64), 'r') + > self.dmin + ): self.il = self.il + 1 def InitializeInterpTable(self): @@ -763,15 +806,16 @@ def InitializeInterpTable(self): elem = constants.ptableinverse[Z] if Z <= 92: - gid = fid.get('/'+elem) + gid = fid.get('/' + elem) data = np.array(gid.get('data')) - self.pe_cs[elem] = interp1d(data[:, WAV_ID], - data[:, MU_ID]+data[:,COH_INCOH_ID]) + self.pe_cs[elem] = interp1d( + data[:, WAV_ID], data[:, MU_ID] + data[:, COH_INCOH_ID] + ) data = data[:, [WAV_ID, REAL_F1_ID, IMAG_F2_ID]] f_anomalous_data.append(data) else: - wav = np.linspace(1.16E2, 2.86399992e-03, 189) - zs = np.ones_like(wav)*Z + wav = np.linspace(1.16e2, 2.86399992e-03, 189) + zs = np.ones_like(wav) * Z zrs = np.zeros_like(wav) data_zs = np.vstack((wav, zs, zrs)).T self.pe_cs[elem] = interp1d(wav, zrs) @@ -780,7 +824,11 @@ def InitializeInterpTable(self): n = max([x.shape[0] for x in f_anomalous_data]) self.f_anomalous_data = np.zeros([self.atom_ntype, n, 3]) self.f_anomalous_data_sizes = np.zeros( - [self.atom_ntype, ], dtype=np.int32) + [ + self.atom_ntype, + ], + dtype=np.int32, + ) for i in range(self.atom_ntype): nd = f_anomalous_data[i].shape[0] @@ -789,19 +837,32 @@ def InitializeInterpTable(self): def CalcXRSF(self, hkl): from hexrd.powder.wppf.xtal import _calcxrsf + ''' the 1E-2 is to convert to A^-2 since the fitting is done in those units ''' - fNT = np.zeros([self.atom_ntype, ]) - frel = np.zeros([self.atom_ntype, ]) + fNT = np.zeros( + [ + self.atom_ntype, + ] + ) + frel = np.zeros( + [ + self.atom_ntype, + ] + ) scatfac = np.zeros([self.atom_ntype, 11]) f_anomalous_data = self.f_anomalous_data hkl2d = np.atleast_2d(hkl).astype(np.float64) nref = hkl2d.shape[0] - multiplicity = np.ones([nref, ]) + multiplicity = np.ones( + [ + nref, + ] + ) w_int = 1.0 occ = self.atom_pos[:, 3] @@ -826,23 +887,25 @@ def CalcXRSF(self, hkl): frel[i] = constants.frel[elem] fNT[i] = constants.fNT[elem] - sf, sf_raw = _calcxrsf(hkl2d, - nref, - multiplicity, - w_int, - self.wavelength, - self.rmt.astype(np.float64), - self.atom_type, - self.atom_ntype, - betaij, - occ, - self.asym_pos_arr, - self.numat, - scatfac, - fNT, - frel, - f_anomalous_data, - self.f_anomalous_data_sizes) + sf, sf_raw = _calcxrsf( + hkl2d, + nref, + multiplicity, + w_int, + self.wavelength, + self.rmt.astype(np.float64), + self.atom_type, + self.atom_ntype, + betaij, + occ, + self.asym_pos_arr, + self.numat, + scatfac, + fNT, + frel, + f_anomalous_data, + self.f_anomalous_data_sizes, + ) return sf_raw @@ -853,8 +916,8 @@ def CalcXRSF(self, hkl): """ def calc_unitcell_mass(self): - a_mass = constants.atom_weights[self.atom_type-1] - return np.sum(a_mass*self.numat) + a_mass = constants.atom_weights[self.atom_type - 1] + return np.sum(a_mass * self.numat) """ calculate the number density in 1/micron^3 @@ -870,12 +933,15 @@ def calc_number_density(self): def calc_absorption_cross_sec(self): - abs_cs_total = 0. + abs_cs_total = 0.0 for i in range(self.atom_ntype): Z = self.atom_type[i] elem = constants.ptableinverse[Z] - abs_cs_total += self.pe_cs[elem](self.wavelength) *\ - self.numat[i]/np.sum(self.numat) + abs_cs_total += ( + self.pe_cs[elem](self.wavelength) + * self.numat[i] + / np.sum(self.numat) + ) return abs_cs_total """ @@ -899,7 +965,7 @@ def calc_absorption_length(self): abs_cs_total = self.calc_absorption_cross_sec() # the 1e4 factor converts wavelength from cm -> micron - self.absorption_length = 1e4/(abs_cs_total*self.density) + self.absorption_length = 1e4 / (abs_cs_total * self.density) """ calculate bragg angle for a reflection. returns Nan if @@ -926,7 +992,7 @@ def ChooseSymmetric(self, hkllist, InversionSymmetry=True): geqv = self.CalcStar(g, 'r', applyLaue=laue) - for r in geqv[1:, ]: + for r in geqv[1:,]: rid = np.where(np.all(r == hkllist, axis=1)) mask[rid] = False @@ -954,8 +1020,14 @@ def SortHKL(self, hkllist): glen.append(np.round(self.CalcLength(g, 'r'), 8)) # glen = np.atleast_2d(np.array(glen,dtype=float)).T - dtype = [('glen', float), ('max', int), ('sum', int), - ('h', int), ('k', int), ('l', int)] + dtype = [ + ('glen', float), + ('max', int), + ('sum', int), + ('h', int), + ('k', int), + ('l', int), + ] a = [] for i, gl in enumerate(glen): @@ -979,16 +1051,21 @@ def getHKLs(self, dmin): ignore all l < 0 ''' - hmin = -self.ih-1 + hmin = -self.ih - 1 hmax = self.ih - kmin = -self.ik-1 + kmin = -self.ik - 1 kmax = self.ik lmin = -1 lmax = self.il - hkllist = np.array([[ih, ik, il] for ih in np.arange(hmax, hmin, -1) - for ik in np.arange(kmax, kmin, -1) - for il in np.arange(lmax, lmin, -1)]) + hkllist = np.array( + [ + [ih, ik, il] + for ih in np.arange(hmax, hmin, -1) + for ik in np.arange(kmax, kmin, -1) + for il in np.arange(lmax, lmin, -1) + ] + ) hkl_allowed = spacegroup.Allowed_HKLs(self.sgnum, hkllist) @@ -1002,7 +1079,7 @@ def getHKLs(self, dmin): # ignore [0 0 0] as it is the direct beam if np.sum(np.abs(g)) != 0: - dspace = 1./self.CalcLength(g, 'r') + dspace = 1.0 / self.CalcLength(g, 'r') if dspace >= dmin: hkl_dsp.append(g) @@ -1029,6 +1106,7 @@ def getHKLs(self, dmin): self.hkls = self.SortHKL(hkl) return self.hkls + ''' set some properties for the unitcell class. only the lattice parameters, space group and asymmetric positions can change, @@ -1044,8 +1122,10 @@ def Required_C(self, C): def MakeStiffnessMatrix(self, inp_Cvals): if len(inp_Cvals) != len(_StiffnessDict[self._laueGroup][0]): x = len(_StiffnessDict[self._laueGroup][0]) - msg = (f"number of constants entered is not correct." - f" need a total of {x} independent constants.") + msg = ( + f"number of constants entered is not correct." + f" need a total of {x} independent constants." + ) raise IOError(msg) # initialize all zeros and fill the supplied values @@ -1113,15 +1193,15 @@ def inside_spheretriangle(self, conn, dir3, hemisphere, switch): number ''' if np.abs(d1) < eps: - d1 = 0. + d1 = 0.0 if np.abs(d2) < eps: - d2 = 0. + d2 = 0.0 if np.abs(d3) < eps: - d3 = 0. + d3 = 0.0 ss = np.unique(np.sign([d1, d2, d3])) if hemisphere == 'upper': - if np.all(ss >= 0.): + if np.all(ss >= 0.0): mask.append(True) else: mask.append(False) @@ -1182,11 +1262,12 @@ def reduce_dirvector(self, dir3, switch='pg'): dir3n = dir3 else: if np.all(np.linalg.norm(dir3) > eps): - dir3n = dir3/np.tile(np.linalg.norm(dir3, axis=1), [3, 1]).T + dir3n = dir3 / np.tile(np.linalg.norm(dir3, axis=1), [3, 1]).T else: raise RuntimeError( "atleast one of the input direction seems \ - to be a null vector") + to be a null vector" + ) ''' we need both the symmetry reductions for the point group and laue group @@ -1229,18 +1310,19 @@ def reduce_dirvector(self, dir3, switch='pg'): if hemisphere == 'both': mask = np.ones(dir3_sym.shape[0], dtype=bool) elif hemisphere == 'upper': - mask = dir3_sym[:, 2] >= 0. + mask = dir3_sym[:, 2] >= 0.0 else: for ii in range(ntriangle): tmpmask = self.inside_spheretriangle( - connectivity[:, ii], dir3_sym, - hemisphere, switch) + connectivity[:, ii], dir3_sym, hemisphere, switch + ) mask = np.logical_or(mask, tmpmask) if np.sum(mask) > 0: if dir3_reduced.size != 0: dir3_reduced = np.vstack( - (dir3_reduced, dir3_sym[mask, :])) + (dir3_reduced, dir3_sym[mask, :]) + ) idx_red = np.hstack((idx_red, idx[mask])) else: dir3_reduced = np.copy(dir3_sym[mask, :]) @@ -1282,7 +1364,8 @@ class which correctly color the orientations for this crystal class. the ''' dir3_red = self.reduce_dirvector(dir3, switch='laue') dir3_red_supergroup = self.reduce_dirvector( - dir3, switch='superlaue') + dir3, switch='superlaue' + ) switch = 'superlaue' else: @@ -1299,10 +1382,9 @@ class which correctly color the orientations for this crystal class. the rgb = colorspace.hsl2rgb(hsl) return rgb - def color_orientations(self, - rmats, - ref_dir=np.array([0., 0., 1.]), - laueswitch=True): + def color_orientations( + self, rmats, ref_dir=np.array([0.0, 0.0, 1.0]), laueswitch=True + ): ''' @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov @DATE 11/12/2020 SS 1.0 original @@ -1325,7 +1407,9 @@ def color_orientations(self, if rmats.ndim == 2: rmats = np.atleast_3d(rmats).T else: - assert rmats.ndim == 3, "rotations matrices need to \ + assert ( + rmats.ndim == 3 + ), "rotations matrices need to \ be nx3x3. Please check size." ''' @@ -1364,12 +1448,10 @@ def convert_lp_to_valunits(self, lp): lp_valunit = [] for i in range(6): if i < 3: - lp_valunit.append( - valWUnit('lp', 'length', lp[i], 'nm')) + lp_valunit.append(valWUnit('lp', 'length', lp[i], 'nm')) else: - lp_valunit.append( - valWUnit('lp', 'angle', lp[i], 'degrees')) + lp_valunit.append(valWUnit('lp', 'angle', lp[i], 'degrees')) return lp_valunit @@ -1379,8 +1461,7 @@ def fill_correct_lp_vals(self, lp, val, lp_name): """ index = list(_lpname).index(lp_name) lp[index] = val - lp_red = [lp[i] for i in - _rqpDict[self.latticeType][0]] + lp_red = [lp[i] for i in _rqpDict[self.latticeType][0]] lp = _rqpDict[self.latticeType][1](lp_red) lp_valunit = self.convert_lp_to_valunits(lp) return lp_valunit @@ -1391,20 +1472,18 @@ def compliance(self): if not hasattr(self, 'stiffness'): raise AttributeError('Stiffness not set on unit cell') - return np.linalg.inv(self.stiffness / 1.e3) + return np.linalg.inv(self.stiffness / 1.0e3) @compliance.setter def compliance(self, v): # Compliance in TPa⁻¹. Stiffness is in GPa. - self.stiffness = np.linalg.inv(v) * 1.e3 + self.stiffness = np.linalg.inv(v) * 1.0e3 # lattice constants as properties @property def lparms(self): - return [self.a, self.b, - self.c, self.alpha, self.beta, - self.gamma] + return [self.a, self.b, self.c, self.alpha, self.beta, self.gamma] @lparms.setter def lparms(self, lp): @@ -1426,8 +1505,7 @@ def lparms(self, lp): @property def lparms_reduced(self): lp = self.lparms - lp_red = [lp[i] for i in - _rqpDict[self.latticeType][0]] + lp_red = [lp[i] for i in _rqpDict[self.latticeType][0]] return lp_red @property @@ -1438,12 +1516,10 @@ def a(self): def a(self, val): if self.is_editable("a"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "a") + lp_valunit = self.fill_correct_lp_vals(lp, val, "a") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1454,12 +1530,10 @@ def b(self): def b(self, val): if self.is_editable("b"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "b") + lp_valunit = self.fill_correct_lp_vals(lp, val, "b") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1470,12 +1544,10 @@ def c(self): def c(self, val): if self.is_editable("c"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "c") + lp_valunit = self.fill_correct_lp_vals(lp, val, "c") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1486,12 +1558,10 @@ def alpha(self): def alpha(self, val): if self.is_editable("alpha"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "alpha") + lp_valunit = self.fill_correct_lp_vals(lp, val, "alpha") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1502,12 +1572,10 @@ def beta(self): def beta(self, val): if self.is_editable("beta"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "beta") + lp_valunit = self.fill_correct_lp_vals(lp, val, "beta") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1518,12 +1586,10 @@ def gamma(self): def gamma(self, val): if self.is_editable("gamma"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "gamma") + lp_valunit = self.fill_correct_lp_vals(lp, val, "gamma") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1574,17 +1640,21 @@ def sgnum(self): @sgnum.setter def sgnum(self, val): - if not(isinstance(val, int)): + if not (isinstance(val, int)): raise ValueError('space group should be integer') - if not((val >= 1) and (val <= 230)): + if not ((val >= 1) and (val <= 230)): raise ValueError('space group number should be between 1 and 230.') self._sym_sgnum = val - self.sg_hmsymbol = symbols.pstr_spacegroup[val-1].strip() + self.sg_hmsymbol = symbols.pstr_spacegroup[val - 1].strip() - self.SYM_SG, self.SYM_PG_d, self.SYM_PG_d_laue, \ - self.centrosymmetric, self.symmorphic = \ - symmetry.GenerateSGSym(self.sgnum, self.sgsetting) + ( + self.SYM_SG, + self.SYM_PG_d, + self.SYM_PG_d_laue, + self.centrosymmetric, + self.symmorphic, + ) = symmetry.GenerateSGSym(self.sgnum, self.sgsetting) self.latticeType = symmetry.latticeType(self.sgnum) @@ -1609,10 +1679,12 @@ def sgnum(self, val): ''' SS 11/11/2020 adding the sphere_sector class initialization here ''' - self.sphere_sector = sphere_sector.sector(self._pointGroup, - self._laueGroup, - self._supergroup, - self._supergroup_laue) + self.sphere_sector = sphere_sector.sector( + self._pointGroup, + self._laueGroup, + self._supergroup, + self._supergroup_laue, + ) self.CalcDensity() self.calc_absorption_length() @@ -1638,10 +1710,12 @@ def atom_pos(self, val): """ if hasattr(self, 'atom_type'): if self.atom_ntype != val.shape[0]: - msg = (f"incorrect number of atom positions." - f" number of atom type = {self.atom_ntype} " - f" and number of" - f" atom positions = {val.shape[0]}.") + msg = ( + f"incorrect number of atom positions." + f" number of atom type = {self.atom_ntype} " + f" and number of" + f" atom positions = {val.shape[0]}." + ) raise ValueError(msg) self._atom_pos = val @@ -1666,8 +1740,9 @@ def asym_pos(self): @asym_pos.setter def asym_pos(self, val): - assert(type(val) == list),\ - 'input type to asymmetric positions should be list' + assert ( + type(val) == list + ), 'input type to asymmetric positions should be list' self._asym_pos = val @property @@ -1676,8 +1751,9 @@ def numat(self): @numat.setter def numat(self, val): - assert(val.shape[0] == - self.atom_ntype), 'shape of numat is not consistent' + assert ( + val.shape[0] == self.atom_ntype + ), 'shape of numat is not consistent' self._numat = val # direct metric tensor is read only @@ -1711,18 +1787,18 @@ def vol(self): @property def vol_per_atom(self): # vol per atom in A^3 - return 1e3*self.vol/self.num_atom + return 1e3 * self.vol / self.num_atom _rqpDict = { 'triclinic': (tuple(range(6)), lambda p: p), # all 6 # note beta 'monoclinic': ((0, 1, 2, 4), lambda p: (p[0], p[1], p[2], 90, p[3], 90)), - 'orthorhombic': ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), - 'tetragonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), - 'trigonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - 'hexagonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - 'cubic': ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), + 'orthorhombic': ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), + 'tetragonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), + 'trigonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + 'hexagonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + 'cubic': ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), } _lpname = np.array(['a', 'b', 'c', 'alpha', 'beta', 'gamma']) @@ -1762,78 +1838,68 @@ def vol_per_atom(self): supergroup_11 = 'oh' -def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range +def _sgrange(min, max): + return tuple(range(min, max + 1)) # inclusive range + ''' 11/20/2020 SS added supergroup to the list which is used for coloring the fundamental zone IPF ''' _pgDict = { - _sgrange(1, 1): ('c1', laue_1, - supergroup_1, supergroup_00), # Triclinic - _sgrange(2, 2): ('ci', laue_1, \ - supergroup_00, supergroup_00), # laue 1 - _sgrange(3, 5): ('c2', laue_2, \ - supergroup_2, supergroup_3), # Monoclinic - _sgrange(6, 9): ('cs', laue_2, \ - supergroup_1, supergroup_3), - _sgrange(10, 15): ('c2h', laue_2, \ - supergroup_3, supergroup_3), # laue 2 - _sgrange(16, 24): ('d2', laue_3, \ - supergroup_3, supergroup_3), # Orthorhombic - _sgrange(25, 46): ('c2v', laue_3, \ - supergroup_2, supergroup_3), - _sgrange(47, 74): ('d2h', laue_3, \ - supergroup_3, supergroup_3), # laue 3 - _sgrange(75, 80): ('c4', laue_4, \ - supergroup_4, supergroup_5), # Tetragonal - _sgrange(81, 82): ('s4', laue_4, \ - supergroup_01, supergroup_5), - _sgrange(83, 88): ('c4h', laue_4, \ - supergroup_5, supergroup_5), # laue 4 - _sgrange(89, 98): ('d4', laue_5, \ - supergroup_5, supergroup_5), - _sgrange(99, 110): ('c4v', laue_5, \ - supergroup_4, supergroup_5), - _sgrange(111, 122): ('d2d', laue_5, \ - supergroup_5, supergroup_5), - _sgrange(123, 142): ('d4h', laue_5, \ - supergroup_5, supergroup_5), # laue 5 + _sgrange(1, 1): ('c1', laue_1, supergroup_1, supergroup_00), # Triclinic + _sgrange(2, 2): ('ci', laue_1, supergroup_00, supergroup_00), # laue 1 + _sgrange(3, 5): ('c2', laue_2, supergroup_2, supergroup_3), # Monoclinic + _sgrange(6, 9): ('cs', laue_2, supergroup_1, supergroup_3), + _sgrange(10, 15): ('c2h', laue_2, supergroup_3, supergroup_3), # laue 2 + _sgrange(16, 24): ( + 'd2', + laue_3, + supergroup_3, + supergroup_3, + ), # Orthorhombic + _sgrange(25, 46): ('c2v', laue_3, supergroup_2, supergroup_3), + _sgrange(47, 74): ('d2h', laue_3, supergroup_3, supergroup_3), # laue 3 + _sgrange(75, 80): ('c4', laue_4, supergroup_4, supergroup_5), # Tetragonal + _sgrange(81, 82): ('s4', laue_4, supergroup_01, supergroup_5), + _sgrange(83, 88): ('c4h', laue_4, supergroup_5, supergroup_5), # laue 4 + _sgrange(89, 98): ('d4', laue_5, supergroup_5, supergroup_5), + _sgrange(99, 110): ('c4v', laue_5, supergroup_4, supergroup_5), + _sgrange(111, 122): ('d2d', laue_5, supergroup_5, supergroup_5), + _sgrange(123, 142): ('d4h', laue_5, supergroup_5, supergroup_5), # laue 5 # Trigonal # laue 6 [also c3i] - _sgrange(143, 146): ('c3', laue_6, \ - supergroup_6, supergroup_02), - _sgrange(147, 148): ('s6', laue_6, \ - supergroup_02, supergroup_02), - _sgrange(149, 155): ('d3', laue_7, \ - supergroup_7, supergroup_9), - _sgrange(156, 161): ('c3v', laue_7, \ - supergroup_6, supergroup_9), - _sgrange(162, 167): ('d3d', laue_7, \ - supergroup_9, supergroup_9), # laue 7 - _sgrange(168, 173): ('c6', laue_8, \ - supergroup_7, supergroup_9), # Hexagonal - _sgrange(174, 174): ('c3h', laue_8, \ - supergroup_7, supergroup_9), - _sgrange(175, 176): ('c6h', laue_8, \ - supergroup_9, supergroup_9), # laue 8 - _sgrange(177, 182): ('d6', laue_9, \ - supergroup_9, supergroup_9), - _sgrange(183, 186): ('c6v', laue_9, \ - supergroup_7, supergroup_9), - _sgrange(187, 190): ('d3h', laue_9, \ - supergroup_9, supergroup_9), - _sgrange(191, 194): ('d6h', laue_9, \ - supergroup_9, supergroup_9), # laue 9 - _sgrange(195, 199): ('t', laue_10, \ - supergroup_10, supergroup_11), # Cubic - _sgrange(200, 206): ('th', laue_10, \ - supergroup_11, supergroup_11), # laue 10 - _sgrange(207, 214): ('o', laue_11, \ - supergroup_11, supergroup_11), - _sgrange(215, 220): ('td', laue_11, \ - supergroup_10, supergroup_11), - _sgrange(221, 230): ('oh', laue_11, \ - supergroup_11, supergroup_11) # laue 11 + _sgrange(143, 146): ('c3', laue_6, supergroup_6, supergroup_02), + _sgrange(147, 148): ('s6', laue_6, supergroup_02, supergroup_02), + _sgrange(149, 155): ('d3', laue_7, supergroup_7, supergroup_9), + _sgrange(156, 161): ('c3v', laue_7, supergroup_6, supergroup_9), + _sgrange(162, 167): ('d3d', laue_7, supergroup_9, supergroup_9), # laue 7 + _sgrange(168, 173): ( + 'c6', + laue_8, + supergroup_7, + supergroup_9, + ), # Hexagonal + _sgrange(174, 174): ('c3h', laue_8, supergroup_7, supergroup_9), + _sgrange(175, 176): ('c6h', laue_8, supergroup_9, supergroup_9), # laue 8 + _sgrange(177, 182): ('d6', laue_9, supergroup_9, supergroup_9), + _sgrange(183, 186): ('c6v', laue_9, supergroup_7, supergroup_9), + _sgrange(187, 190): ('d3h', laue_9, supergroup_9, supergroup_9), + _sgrange(191, 194): ('d6h', laue_9, supergroup_9, supergroup_9), # laue 9 + _sgrange(195, 199): ('t', laue_10, supergroup_10, supergroup_11), # Cubic + _sgrange(200, 206): ( + 'th', + laue_10, + supergroup_11, + supergroup_11, + ), # laue 10 + _sgrange(207, 214): ('o', laue_11, supergroup_11, supergroup_11), + _sgrange(215, 220): ('td', laue_11, supergroup_10, supergroup_11), + _sgrange(221, 230): ( + 'oh', + laue_11, + supergroup_11, + supergroup_11, + ), # laue 11 } ''' @@ -1938,7 +2004,8 @@ def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range ''' -def identity(x): return x +def identity(x): + return x def C_cyclictet_eq(x): @@ -1957,7 +2024,7 @@ def C_trigonal_eq(x): x[3, 5] = -x[0, 4] x[4, 4] = x[3, 3] x[4, 5] = x[0, 3] - x[5, 5] = 0.5*(x[0, 0]-x[0, 1]) + x[5, 5] = 0.5 * (x[0, 0] - x[0, 1]) return x @@ -1985,5 +2052,5 @@ def C_cubic_eq(x): laue_8: [type8, C_trigonal_eq], # cyclic hexagonal, 5 components needed laue_9: [type8, C_trigonal_eq], # dihedral hexagonal, 5 components laue_10: [type9, C_cubic_eq], # cubic, 3 components - laue_11: [type9, C_cubic_eq] # cubic, 3 components + laue_11: [type9, C_cubic_eq], # cubic, 3 components } diff --git a/hexrd/hedm/preprocess/preprocessors.py b/hexrd/hedm/preprocess/preprocessors.py index 9b09e922e..f934048ed 100644 --- a/hexrd/hedm/preprocess/preprocessors.py +++ b/hexrd/hedm/preprocess/preprocessors.py @@ -1,6 +1,10 @@ from hexrd.core.imageseries.baseclass import ImageSeries from hexrd.core.imageseries.omega import OmegaWedges -from hexrd.hedm.preprocess.profiles import Eiger_Arguments, Dexelas_Arguments, HexrdPPScript_Arguments +from hexrd.hedm.preprocess.profiles import ( + Eiger_Arguments, + Dexelas_Arguments, + HexrdPPScript_Arguments, +) from hexrd.core import imageseries from hexrd.core.imageseries.process import ProcessedImageSeries import os diff --git a/hexrd/hedm/preprocess/profiles.py b/hexrd/hedm/preprocess/profiles.py index 747abfbbc..dff82ca3c 100644 --- a/hexrd/hedm/preprocess/profiles.py +++ b/hexrd/hedm/preprocess/profiles.py @@ -2,7 +2,10 @@ import glob import os import yaml -from hexrd.hedm.preprocess.argument_classes_factory import ArgumentClassesFactory, autoregister +from hexrd.hedm.preprocess.argument_classes_factory import ( + ArgumentClassesFactory, + autoregister, +) from hexrd.hedm.preprocess.yaml_internals import HexrdPPScriptArgumentsDumper from typing import Any, Union, Optional, cast diff --git a/hexrd/hedm/sampleOrientations/conversions.py b/hexrd/hedm/sampleOrientations/conversions.py index f4a85beb0..98cc1212b 100644 --- a/hexrd/hedm/sampleOrientations/conversions.py +++ b/hexrd/hedm/sampleOrientations/conversions.py @@ -51,33 +51,33 @@ def cu2ho(cu): xyz = sXYZ * sc ma = np.max(np.abs(xyz)) - if ma < 1E-8: + if ma < 1e-8: return np.array([0.0, 0.0, 0.0]) ma2 = np.max(np.abs(xyz[0:2])) - if ma2 < 1E-8: + if ma2 < 1e-8: LamXYZ = np.array([0.0, 0.0, constants.pref * xyz[2]]) else: if np.abs(xyz[1]) <= np.abs(xyz[0]): - q = (np.pi/12.0) * xyz[1]/xyz[0] + q = (np.pi / 12.0) * xyz[1] / xyz[0] c = np.cos(q) s = np.sin(q) - q = constants.prek * xyz[0] / np.sqrt(np.sqrt(2.0)-c) + q = constants.prek * xyz[0] / np.sqrt(np.sqrt(2.0) - c) T1 = (np.sqrt(2.0) * c - 1.0) * q T2 = np.sqrt(2.0) * s * q else: - q = (np.pi/12.0) * xyz[0]/xyz[1] + q = (np.pi / 12.0) * xyz[0] / xyz[1] c = np.cos(q) s = np.sin(q) - q = constants.prek * xyz[1] / np.sqrt(np.sqrt(2.0)-c) + q = constants.prek * xyz[1] / np.sqrt(np.sqrt(2.0) - c) T1 = np.sqrt(2.0) * s * q T2 = (np.sqrt(2.0) * c - 1.0) * q c = T1**2 + T2**2 - s = np.pi * c / (24.0 * xyz[2]**2) + s = np.pi * c / (24.0 * xyz[2] ** 2) c = np.sqrt(np.pi) * c / np.sqrt(24.0) / xyz[2] - q = np.sqrt( 1.0 - s ) + q = np.sqrt(1.0 - s) LamXYZ = np.array([T1 * q, T2 * q, constants.pref * xyz[2] - c]) if pyd == 1 or pyd == 2: @@ -96,18 +96,18 @@ def ho2ro(ho): @njit(cache=True, nogil=True) def ho2ax(ho): - hmag = np.linalg.norm(ho[:])**2 - if hmag < 1E-8: + hmag = np.linalg.norm(ho[:]) ** 2 + if hmag < 1e-8: return np.array([0.0, 0.0, 1.0, 0.0]) hm = hmag - hn = ho/np.sqrt(hmag) + hn = ho / np.sqrt(hmag) s = constants.tfit[0] + constants.tfit[1] * hmag for ii in range(2, 21): - hm = hm*hmag + hm = hm * hmag s = s + constants.tfit[ii] * hm s = 2.0 * np.arccos(s) diff = np.abs(s - np.pi) - if diff < 1E-8: + if diff < 1e-8: return np.array([hn[0], hn[1], hn[2], np.pi]) else: return np.array([hn[0], hn[1], hn[2], s]) @@ -115,14 +115,14 @@ def ho2ax(ho): @njit(cache=True, nogil=True) def ax2ro(ax): - if np.abs(ax[3]) < 1E-8: + if np.abs(ax[3]) < 1e-8: return np.array([0.0, 0.0, 1.0, 0.0]) - elif np.abs(ax[3] - np.pi) < 1E-8: + elif np.abs(ax[3] - np.pi) < 1e-8: return np.array([ax[0], ax[1], ax[2], np.inf]) else: - return np.array([ax[0], ax[1], ax[2], np.tan(ax[3]*0.5)]) + return np.array([ax[0], ax[1], ax[2], np.tan(ax[3] * 0.5)]) @njit(cache=True, nogil=True) @@ -133,21 +133,21 @@ def ro2qu(ro): @njit(cache=True, nogil=True) def ro2ax(ro): - if np.abs(ro[3]) < 1E-8: + if np.abs(ro[3]) < 1e-8: return np.array([0.0, 0.0, 1.0, 0.0]) elif ro[3] == np.inf: return np.array([ro[0], ro[1], ro[2], np.pi]) else: - ang = 2.0*np.arctan(ro[3]) - mag = 1.0/np.linalg.norm(ro[0:3]) - return np.array([ro[0]*mag, ro[1]*mag, ro[2]*mag, ang]) + ang = 2.0 * np.arctan(ro[3]) + mag = 1.0 / np.linalg.norm(ro[0:3]) + return np.array([ro[0] * mag, ro[1] * mag, ro[2] * mag, ang]) @njit(cache=True, nogil=True) def ax2qu(ro): - if np.abs(ro[3]) < 1E-8: + if np.abs(ro[3]) < 1e-8: return np.array([1.0, 0.0, 0.0, 0.0]) else: - c = np.cos(ro[3]*0.5) - s = np.sin(ro[3]*0.5) - return np.array([c, ro[0]*s, ro[1]*s, ro[2]*s]) + c = np.cos(ro[3] * 0.5) + s = np.sin(ro[3] * 0.5) + return np.array([c, ro[0] * s, ro[1] * s, ro[2] * s]) diff --git a/hexrd/hedm/sampleOrientations/rfz.py b/hexrd/hedm/sampleOrientations/rfz.py index 17fbd833a..04f1df338 100644 --- a/hexrd/hedm/sampleOrientations/rfz.py +++ b/hexrd/hedm/sampleOrientations/rfz.py @@ -7,8 +7,8 @@ @numba.njit(cache=True, nogil=True) def getFZtypeandOrder(pgnum): - FZtype = FZtypeArray[pgnum-1] - FZorder = FZorderArray[pgnum-1] + FZtype = FZtypeArray[pgnum - 1] + FZorder = FZorderArray[pgnum - 1] return np.array([FZtype, FZorder]) @@ -24,9 +24,9 @@ def insideCyclicFZ(ro, FZorder): res = True else: if FZorder == 2: - res = np.abs(ro[1]*ro[3]) <= constants.BP[FZorder-1] + res = np.abs(ro[1] * ro[3]) <= constants.BP[FZorder - 1] else: - res = np.abs(ro[2]*ro[3]) <= constants.BP[FZorder-1] + res = np.abs(ro[2] * ro[3]) <= constants.BP[FZorder - 1] return res @@ -38,41 +38,38 @@ def insideDihedralFZ(ro, FZorder): else: rod = ro[0:3] * ro[3] - c1 = np.abs(rod[2]) <= constants.BP[FZorder-1] + c1 = np.abs(rod[2]) <= constants.BP[FZorder - 1] if c1: - if FZorder == 2: - c2 = np.logical_and(np.abs(rod[0]) <= 1.0, - np.abs(rod[1]) <= 1.0) + if FZorder == 2: + c2 = np.logical_and(np.abs(rod[0]) <= 1.0, np.abs(rod[1]) <= 1.0) elif FZorder == 3: - srt = np.sqrt(3.0)/2.0 - c2 = np.abs(srt*rod[0] + 0.5*rod[1]) <= 1.0 - c3 = np.abs(srt*rod[0] - 0.5*rod[1]) <= 1.0 + srt = np.sqrt(3.0) / 2.0 + c2 = np.abs(srt * rod[0] + 0.5 * rod[1]) <= 1.0 + c3 = np.abs(srt * rod[0] - 0.5 * rod[1]) <= 1.0 c4 = np.abs(rod[1]) <= 1.0 - return np.logical_and(c2, - np.logical_and(c3, c4)) + return np.logical_and(c2, np.logical_and(c3, c4)) elif FZorder == 4: - r22 = 1.0/np.sqrt(2.0) - c2 = np.logical_and(np.abs(rod[0]) <= 1.0, - np.abs(rod[1]) <= 1.0) - c3 = np.logical_and(r22*np.abs(rod[0]+rod[1]) <= 1.0, - r22*np.abs(rod[0]-rod[1]) <= 1.0) + r22 = 1.0 / np.sqrt(2.0) + c2 = np.logical_and(np.abs(rod[0]) <= 1.0, np.abs(rod[1]) <= 1.0) + c3 = np.logical_and( + r22 * np.abs(rod[0] + rod[1]) <= 1.0, + r22 * np.abs(rod[0] - rod[1]) <= 1.0, + ) return np.logical_and(c2, c3) elif FZorder == 6: - srt = np.sqrt(3.0)/2.0 - c2 = np.abs(0.5*rod[0] + srt*rod[1]) < 1.0 - c2 = np.logical_and(c2, - np.abs(srt*rod[0] + 0.5*rod[1]) < 1.0) - c2 = np.logical_and(c2, - np.abs(0.5*rod[0] - srt*rod[1]) < 1.0) - c2 = np.logical_and(c2, - np.abs(srt*rod[0] - 0.5*rod[1]) < 1.0) - c2 = np.logical_and(c2, - np.logical_and(np.abs(rod[0]) <= 1.0, - np.abs(rod[1]) <= 1.0)) + srt = np.sqrt(3.0) / 2.0 + c2 = np.abs(0.5 * rod[0] + srt * rod[1]) < 1.0 + c2 = np.logical_and(c2, np.abs(srt * rod[0] + 0.5 * rod[1]) < 1.0) + c2 = np.logical_and(c2, np.abs(0.5 * rod[0] - srt * rod[1]) < 1.0) + c2 = np.logical_and(c2, np.abs(srt * rod[0] - 0.5 * rod[1]) < 1.0) + c2 = np.logical_and( + c2, + np.logical_and(np.abs(rod[0]) <= 1.0, np.abs(rod[1]) <= 1.0), + ) return c2 else: return False @@ -83,11 +80,11 @@ def insideCubicFZ(ro, kwrd): rod = np.abs(ro[0:3] * ro[3]) if kwrd == 'oct': - c1 = (np.max(rod) - constants.BP[3]) <= 1E-8 + c1 = (np.max(rod) - constants.BP[3]) <= 1e-8 else: c1 = True - c2 = (rod[0]+rod[1]+rod[2] - 1.0) <= 1E-8 + c2 = (rod[0] + rod[1] + rod[2] - 1.0) <= 1e-8 res = np.logical_and(c1, c2) return res @@ -95,7 +92,7 @@ def insideCubicFZ(ro, kwrd): @numba.njit(cache=True, nogil=True) def insideFZ(ro, pgnum): res = getFZtypeandOrder(pgnum) - FZtype = res[0] + FZtype = res[0] FZorder = res[1] if FZtype == 0: diff --git a/hexrd/hedm/sampleOrientations/sampleRFZ.py b/hexrd/hedm/sampleOrientations/sampleRFZ.py index 2168b490d..0cd7dbf70 100644 --- a/hexrd/hedm/sampleOrientations/sampleRFZ.py +++ b/hexrd/hedm/sampleOrientations/sampleRFZ.py @@ -8,20 +8,16 @@ @numba.njit(cache=True, nogil=True, parallel=True) -def _sample(pgnum, - N, - delta, - shift, - ap_2): +def _sample(pgnum, N, delta, shift, ap_2): - N3 = (2*N+1)**3 + N3 = (2 * N + 1) ** 3 res = np.full((N3, 4), np.nan, dtype=np.float64) - for ii in prange(-N, N+1): + for ii in prange(-N, N + 1): xx = (ii + shift) * delta - for jj in prange(-N, N+1): + for jj in prange(-N, N + 1): yy = (jj + shift) * delta - for kk in prange(-N, N+1): + for kk in prange(-N, N + 1): zz = (kk + shift) * delta cu = np.array([xx, yy, zz]) ma = np.max(np.abs(cu)) @@ -29,13 +25,17 @@ def _sample(pgnum, if ma <= ap_2: ro = cu2ro(cu) if insideFZ(ro, pgnum): - idx = (ii+N)*(2*N+1)**2 + (jj+N)*(2*N+1) + (kk+N) - res[idx,:] = ro2qu(ro) + idx = ( + (ii + N) * (2 * N + 1) ** 2 + + (jj + N) * (2 * N + 1) + + (kk + N) + ) + res[idx, :] = ro2qu(ro) return res -class sampleRFZ: +class sampleRFZ: """This class samples the rodrigues fundamental zone of a point group uniformly in the density sense and returns a list of orientations which are spaced, @@ -46,8 +46,8 @@ class sampleRFZ: Note ---- Details can be found in: - S. Singh and M. De Graef, "Orientation sampling for - dictionary-based diffraction pattern indexing methods". + S. Singh and M. De Graef, "Orientation sampling for + dictionary-based diffraction pattern indexing methods". MSMSE 24, 085013 (2016) Attributes @@ -59,10 +59,9 @@ class sampleRFZ: """ - def __init__(self, - pgnum, - sampling_type='default', - average_angular_spacing=3.0): + def __init__( + self, pgnum, sampling_type='default', average_angular_spacing=3.0 + ): """__init__ method of the sampleRFZ class. @@ -92,19 +91,20 @@ def sampling_N(self): """ if self.sampling_type.lower() == 'default': - return np.rint(131.97049 / (self.avg_ang_spacing - 0.03732)).astype(np.int32) + return np.rint( + 131.97049 / (self.avg_ang_spacing - 0.03732) + ).astype(np.int32) elif self.sampling_type.lower() == 'special': - return np.rint(125.70471 / (self.avg_ang_spacing - 0.07127)).astype(np.int32) + return np.rint( + 125.70471 / (self.avg_ang_spacing - 0.07127) + ).astype(np.int32) def sample(self): - res = _sample(self.pgnum, - self.cubN, - self.delta, - self.shift, - self.ap_2) - mask = ~np.isnan(res[:,0]) - res = res[mask,:] + res = _sample(self.pgnum, self.cubN, self.delta, self.shift, self.ap_2) + mask = ~np.isnan(res[:, 0]) + res = res[mask, :] self.orientations = res + def sample_if_possible(self): required_attributes = ('pgnum', 'avg_ang_spacing', 'sampling_type') if not all(hasattr(self, x) for x in required_attributes): @@ -134,7 +134,6 @@ def sampling_type(self, stype): def avg_ang_spacing(self): return self._avg_ang_spacing - @avg_ang_spacing.setter def avg_ang_spacing(self, ang): self._avg_ang_spacing = ang diff --git a/hexrd/hedm/xrdutil/utils.py b/hexrd/hedm/xrdutil/utils.py index ae21ed756..d8150ded1 100644 --- a/hexrd/hedm/xrdutil/utils.py +++ b/hexrd/hedm/xrdutil/utils.py @@ -508,9 +508,8 @@ def _filter_hkls_eta_ome( angMask_eta = np.zeros(len(angles), dtype=bool) for etas in eta_range: angMask_eta = np.logical_or( - angMask_eta, xfcapi.validate_angle_ranges( - angles[:, 1], etas[0], etas[1] - ) + angMask_eta, + xfcapi.validate_angle_ranges(angles[:, 1], etas[0], etas[1]), ) ccw = True @@ -991,8 +990,13 @@ def simulateGVecs( # first find valid G-vectors angList = np.vstack( xfcapi.oscill_angles_of_hkls( - full_hkls[:, 1:], chi, rMat_c, bMat, wlen, v_inv=vInv_s, - beam_vec=beam_vector + full_hkls[:, 1:], + chi, + rMat_c, + bMat, + wlen, + v_inv=vInv_s, + beam_vec=beam_vector, ) ) allAngs, allHKLs = _filter_hkls_eta_ome( @@ -1008,8 +1012,15 @@ def simulateGVecs( else: # ??? preallocate for speed? det_xy, rMat_ss, _ = _project_on_detector_plane( - allAngs, rMat_d, rMat_c, chi, tVec_d, tVec_c, tVec_s, distortion, - beamVec=beam_vector + allAngs, + rMat_d, + rMat_c, + chi, + tVec_d, + tVec_c, + tVec_s, + distortion, + beamVec=beam_vector, ) on_panel = np.logical_and( @@ -1472,7 +1483,7 @@ def make_reflection_patches( def extract_detector_transformation( - detector_params: Union[dict[str, Any], np.ndarray] + detector_params: Union[dict[str, Any], np.ndarray], ) -> tuple[np.ndarray, np.ndarray, float, np.ndarray]: """ Construct arrays from detector parameters. diff --git a/hexrd/laue/fitting/calibration/laue.py b/hexrd/laue/fitting/calibration/laue.py index 972b25d26..3b0b9645e 100644 --- a/hexrd/laue/fitting/calibration/laue.py +++ b/hexrd/laue/fitting/calibration/laue.py @@ -19,7 +19,11 @@ # TODO: Resolve extra-workflow-dependency from ....core.fitting.calibration.calibrator import Calibrator from ....core.fitting.calibration.abstract_grain import AbstractGrainCalibrator -from ....core.fitting.calibration.lmfit_param_handling import create_grain_params, DEFAULT_EULER_CONVENTION, rename_to_avoid_collision +from ....core.fitting.calibration.lmfit_param_handling import ( + create_grain_params, + DEFAULT_EULER_CONVENTION, + rename_to_avoid_collision, +) class LaueCalibrator(AbstractGrainCalibrator): @@ -32,16 +36,29 @@ class LaueCalibrator(AbstractGrainCalibrator): varying energy range rather than a constant energy value. Also, we do not utilize any omega periods. """ + type = 'laue' - def __init__(self, instr, material, grain_params, default_refinements=None, - min_energy=5, max_energy=25, tth_distortion=None, - calibration_picks=None, - euler_convention=DEFAULT_EULER_CONVENTION, - xray_source: Optional[str] = None): + def __init__( + self, + instr, + material, + grain_params, + default_refinements=None, + min_energy=5, + max_energy=25, + tth_distortion=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + xray_source: Optional[str] = None, + ): super().__init__( - instr, material, grain_params, default_refinements, - calibration_picks, euler_convention, + instr, + material, + grain_params, + default_refinements, + calibration_picks, + euler_convention, ) self.energy_cutoffs = [min_energy, max_energy] self.xray_source = xray_source @@ -85,10 +102,20 @@ def energy_cutoffs(self, x): self.plane_data.wavelength = self.energy_cutoffs[-1] self.plane_data.exclusions = None - def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., - npdiv=2, do_smoothing=True, smoothing_sigma=2, - use_blob_detection=True, blob_threshold=0.25, - fit_peaks=True, min_peak_int=1., fit_tth_tol=0.1): + def autopick_points( + self, + raw_img_dict, + tth_tol=5.0, + eta_tol=5.0, + npdiv=2, + do_smoothing=True, + smoothing_sigma=2, + use_blob_detection=True, + blob_threshold=0.25, + fit_peaks=True, + min_peak_int=1.0, + fit_tth_tol=0.1, + ): """ Parameters ---------- @@ -132,13 +159,23 @@ def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., fit_tth_tol=fit_tth_tol, ) - def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., - npdiv=2, do_smoothing=True, smoothing_sigma=2, - use_blob_detection=True, blob_threshold=0.25, - fit_peaks=True, min_peak_int=1., fit_tth_tol=0.1): + def _autopick_points( + self, + raw_img_dict, + tth_tol=5.0, + eta_tol=5.0, + npdiv=2, + do_smoothing=True, + smoothing_sigma=2, + use_blob_detection=True, + blob_threshold=0.25, + fit_peaks=True, + min_peak_int=1.0, + fit_tth_tol=0.1, + ): labelStructure = ndimage.generate_binary_structure(2, 1) rmat_s = np.eye(3) # !!! forcing to identity - omega = 0. # !!! same ^^^ + omega = 0.0 # !!! same ^^^ rmat_c = xfcapi.make_rmat_of_expmap(self.grain_params[:3]) tvec_c = self.grain_params[3:6] @@ -150,7 +187,8 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., self.plane_data, minEnergy=self.energy_cutoffs[0], maxEnergy=self.energy_cutoffs[1], - rmat_s=None, grain_params=np.atleast_2d(self.grain_params), + rmat_s=None, + grain_params=np.atleast_2d(self.grain_params), ) # loop over detectors for results @@ -159,7 +197,7 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., det_config = det.config_dict( chi=self.instr.chi, tvec=self.instr.tvec, - beam_vector=self.instr.beam_vector + beam_vector=self.instr.beam_vector, ) xy_det, hkls, angles, dspacing, energy = laue_sim[det_key] @@ -182,57 +220,66 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., # make patches refl_patches = xrdutil.make_reflection_patches( det_config, - valid_angs, det.angularPixelSize(valid_xy), - rmat_c=rmat_c, tvec_c=tvec_c, - tth_tol=tth_tol, eta_tol=eta_tol, - npdiv=npdiv, quiet=True) + valid_angs, + det.angularPixelSize(valid_xy), + rmat_c=rmat_c, + tvec_c=tvec_c, + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + quiet=True, + ) reflInfoList = [] img = raw_img_dict[det_key] native_area = det.pixel_area num_patches = len(valid_angs) - meas_xy = np.nan*np.ones((num_patches, 2)) - meas_angs = np.nan*np.ones((num_patches, 2)) + meas_xy = np.nan * np.ones((num_patches, 2)) + meas_angs = np.nan * np.ones((num_patches, 2)) for iRefl, patch in enumerate(refl_patches): # check for overrun irow = patch[-1][0] jcol = patch[-1][1] - if np.any([irow < 0, irow >= det.rows, - jcol < 0, jcol >= det.cols]): + if np.any( + [irow < 0, irow >= det.rows, jcol < 0, jcol >= det.cols] + ): continue if not np.all( - det.clip_to_panel( - np.vstack([patch[1][0].flatten(), - patch[1][1].flatten()]).T - )[1] - ): + det.clip_to_panel( + np.vstack( + [patch[1][0].flatten(), patch[1][1].flatten()] + ).T + )[1] + ): continue # use nearest interpolation spot_data = img[irow, jcol] * patch[3] * npdiv**2 / native_area spot_data -= np.amin(spot_data) patch_size = spot_data.shape - sigmax = 0.25*np.min(spot_data.shape) * fwhm_to_sigma + sigmax = 0.25 * np.min(spot_data.shape) * fwhm_to_sigma # optional gaussian smoothing if do_smoothing: spot_data = filters.gaussian(spot_data, smoothing_sigma) if use_blob_detection: - spot_data_scl = 2.*spot_data/np.max(spot_data) - 1. + spot_data_scl = 2.0 * spot_data / np.max(spot_data) - 1.0 # Compute radii in the 3rd column. - blobs_log = blob_log(spot_data_scl, - min_sigma=2, - max_sigma=min(sigmax, 20), - num_sigma=10, - threshold=blob_threshold, - overlap=0.1) + blobs_log = blob_log( + spot_data_scl, + min_sigma=2, + max_sigma=min(sigmax, 20), + num_sigma=10, + threshold=blob_threshold, + overlap=0.1, + ) numPeaks = len(blobs_log) else: labels, numPeaks = ndimage.label( spot_data > np.percentile(spot_data, 99), - structure=labelStructure + structure=labelStructure, ) slabels = np.arange(1, numPeaks + 1) tth_edges = patch[0][0][0, :] @@ -247,11 +294,11 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., coms = np.array( ndimage.center_of_mass( spot_data, labels=labels, index=slabels - ) ) + ) if numPeaks > 1: # - center = np.r_[spot_data.shape]*0.5 + center = np.r_[spot_data.shape] * 0.5 com_diff = coms - np.tile(center, (numPeaks, 1)) closest_peak_idx = np.argmin( np.sum(com_diff**2, axis=1) @@ -263,20 +310,28 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., coms = coms[closest_peak_idx] # if fit_peaks: - sigm = 0.2*np.min(spot_data.shape) + sigm = 0.2 * np.min(spot_data.shape) if use_blob_detection: sigm = min(blobs_log[closest_peak_idx, 2], sigm) y0, x0 = coms.flatten() ampl = float(spot_data[int(y0), int(x0)]) # y0, x0 = 0.5*np.array(spot_data.shape) # ampl = np.max(spot_data) - a_par = c_par = 0.5/float(sigm**2) - b_par = 0. - bgx = bgy = 0. + a_par = c_par = 0.5 / float(sigm**2) + b_par = 0.0 + bgx = bgy = 0.0 bkg = np.min(spot_data) - params = [ampl, - a_par, b_par, c_par, - x0, y0, bgx, bgy, bkg] + params = [ + ampl, + a_par, + b_par, + c_par, + x0, + y0, + bgx, + bgy, + bkg, + ] # result = leastsq(gaussian_2d, params, args=(spot_data)) # @@ -291,24 +346,29 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., row_cen = fit_tth_tol * patch_size[0] col_cen = fit_tth_tol * patch_size[1] if np.any( - [coms[0] < row_cen, - coms[0] >= patch_size[0] - row_cen, - coms[1] < col_cen, - coms[1] >= patch_size[1] - col_cen] + [ + coms[0] < row_cen, + coms[0] >= patch_size[0] - row_cen, + coms[1] < col_cen, + coms[1] >= patch_size[1] - col_cen, + ] ): continue - if (fit_par[0] < min_peak_int): + if fit_par[0] < min_peak_int: continue # intensities spot_intensity, int_err = nquad( gaussian_2d_int, - [[0., 2.*y0], [0., 2.*x0]], - args=fit_par) - com_angs = np.hstack([ - tth_edges[0] + (0.5 + coms[1])*delta_tth, - eta_edges[0] + (0.5 + coms[0])*delta_eta - ]) + [[0.0, 2.0 * y0], [0.0, 2.0 * x0]], + args=fit_par, + ) + com_angs = np.hstack( + [ + tth_edges[0] + (0.5 + coms[1]) * delta_tth, + eta_edges[0] + (0.5 + coms[0]) * delta_eta, + ] + ) # grab intensities if not fit_peaks: @@ -331,12 +391,18 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., cmv, chi=self.instr.chi, rmat_c=rmat_c, - beam_vec=self.instr.beam_vector) + beam_vec=self.instr.beam_vector, + ) new_xy = xfcapi.gvec_to_xy( gvec_c, - det.rmat, rmat_s, rmat_c, - det.tvec, self.instr.tvec, tvec_c, - beam_vec=self.instr.beam_vector) + det.rmat, + rmat_s, + rmat_c, + det.tvec, + self.instr.tvec, + tvec_c, + beam_vec=self.instr.beam_vector, + ) meas_xy[iRefl, :] = new_xy if det.distortion is not None: meas_xy[iRefl, :] = det.distortion.apply_inverse( @@ -348,15 +414,20 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., # spot_intensity = np.nan max_intensity = np.nan - reflInfoList.append([peakId, valid_hkls[:, iRefl], - (spot_intensity, max_intensity), - valid_energy[iRefl], - valid_angs[iRefl, :], - meas_angs[iRefl, :], - meas_xy[iRefl, :]]) + reflInfoList.append( + [ + peakId, + valid_hkls[:, iRefl], + (spot_intensity, max_intensity), + valid_energy[iRefl], + valid_angs[iRefl, :], + meas_angs[iRefl, :], + meas_xy[iRefl, :], + ] + ) reflInfo = np.array( - [tuple(i) for i in reflInfoList], - dtype=reflInfo_dtype) + [tuple(i) for i in reflInfoList], dtype=reflInfo_dtype + ) refl_dict[det_key] = reflInfo # Convert to our data_dict format @@ -407,8 +478,12 @@ def _residual(self): energy_cutoffs = np.r_[0.5, 1.5] * np.asarray(self.energy_cutoffs) return sxcal_obj_func( - [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, - self.bmatx, energy_cutoffs + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + energy_cutoffs, ) def model(self): @@ -420,14 +495,26 @@ def _model(self): pick_hkls_dict, pick_xys_dict = self._evaluate() return sxcal_obj_func( - [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, - self.bmatx, self.energy_cutoffs, sim_only=True + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.energy_cutoffs, + sim_only=True, ) # Objective function for Laue fitting -def sxcal_obj_func(grain_params, instr, meas_xy, hkls_idx, - bmat, energy_cutoffs, sim_only=False): +def sxcal_obj_func( + grain_params, + instr, + meas_xy, + hkls_idx, + bmat, + energy_cutoffs, + sim_only=False, +): """ Objective function for Laue-based fitting. @@ -444,9 +531,10 @@ def sxcal_obj_func(grain_params, instr, meas_xy, hkls_idx, # returns xy_det, hkls_in, angles, dspacing, energy sim_results = panel.simulate_laue_pattern( [hkls_idx[det_key], bmat], - minEnergy=energy_cutoffs[0], maxEnergy=energy_cutoffs[1], + minEnergy=energy_cutoffs[0], + maxEnergy=energy_cutoffs[1], grain_params=grain_params, - beam_vec=instr.beam_vector + beam_vec=instr.beam_vector, ) calc_xy_tmp = sim_results[0][0] @@ -474,20 +562,30 @@ def sxcal_obj_func(grain_params, instr, meas_xy, hkls_idx, def gaussian_2d(p, data): shape = data.shape x, y = np.meshgrid(range(shape[1]), range(shape[0])) - func = p[0]*np.exp( - -(p[1]*(x-p[4])*(x-p[4]) - + p[2]*(x-p[4])*(y-p[5]) - + p[3]*(y-p[5])*(y-p[5])) - ) + p[6]*(x-p[4]) + p[7]*(y-p[5]) + p[8] + func = ( + p[0] + * np.exp( + -( + p[1] * (x - p[4]) * (x - p[4]) + + p[2] * (x - p[4]) * (y - p[5]) + + p[3] * (y - p[5]) * (y - p[5]) + ) + ) + + p[6] * (x - p[4]) + + p[7] * (y - p[5]) + + p[8] + ) return func.flatten() - data.flatten() def gaussian_2d_int(y, x, *p): - func = p[0]*np.exp( - -(p[1]*(x-p[4])*(x-p[4]) - + p[2]*(x-p[4])*(y-p[5]) - + p[3]*(y-p[5])*(y-p[5])) + func = p[0] * np.exp( + -( + p[1] * (x - p[4]) * (x - p[4]) + + p[2] * (x - p[4]) * (y - p[5]) + + p[3] * (y - p[5]) * (y - p[5]) ) + ) return func.flatten() diff --git a/hexrd/laue/instrument/__init__.py b/hexrd/laue/instrument/__init__.py new file mode 100644 index 000000000..b5414013c --- /dev/null +++ b/hexrd/laue/instrument/__init__.py @@ -0,0 +1,13 @@ +from .hedm_instrument import ( + calc_angles_from_beam_vec, + calc_beam_vec, + centers_of_edge_vec, + GenerateEtaOmeMaps, + GrainDataWriter, + HEDMInstrument, + max_tth, + switch_xray_source, + unwrap_dict_to_h5, + unwrap_h5_to_dict, +) +from .detector import Detector diff --git a/hexrd/laue/instrument/detector.py b/hexrd/laue/instrument/detector.py index adf6ef82f..858fa21d7 100644 --- a/hexrd/laue/instrument/detector.py +++ b/hexrd/laue/instrument/detector.py @@ -3,7 +3,11 @@ import os from typing import Optional -from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +from hexrd.core.instrument.constants import ( + COATING_DEFAULT, + FILTER_DEFAULTS, + PHOSPHOR_DEFAULT, +) from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba @@ -11,18 +15,30 @@ from hexrd.core import constants as ct from hexrd.core import distortion as distortion_pkg from hexrd.core import matrixutil as mutil + +# TODO: Resolve extra-core-dependency from hexrd.hedm import xrdutil from hexrd.core.rotations import mapAngle -from hexrd.laue.material import crystallography -from hexrd.laue.material.crystallography import PlaneData +from hexrd.core.material import crystallography +from hexrd.core.material.crystallography import PlaneData -from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec +from hexrd.core.transforms.xfcapi import ( + xy_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_rmat_of_expmap, + oscill_angles_of_hkls, + angles_to_dvec, +) from hexrd.core.utils.decorators import memoize from hexrd.core.gridutil import cellIndices from hexrd.core.instrument import detector_coatings -from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_incoherent_scattering +from hexrd.core.material.utils import ( + calculate_linear_absorption_length, + calculate_incoherent_scattering, +) distortion_registry = distortion_pkg.Registry() @@ -279,7 +295,8 @@ def __init__( if detector_filter is None: detector_filter = detector_coatings.Filter( - **FILTER_DEFAULTS.TARDIS) + **FILTER_DEFAULTS.TARDIS + ) self.filter = detector_filter if detector_coating is None: @@ -530,8 +547,9 @@ def pixel_coords(self): # METHODS # ========================================================================= - def pixel_Q(self, energy: np.floating, - origin: np.ndarray = ct.zeros_3) -> np.ndarray: + def pixel_Q( + self, energy: np.floating, origin: np.ndarray = ct.zeros_3 + ) -> np.ndarray: '''get the equivalent momentum transfer for the angles. @@ -550,7 +568,7 @@ def pixel_Q(self, energy: np.floating, ''' lam = ct.keVToAngstrom(energy) tth, _ = self.pixel_angles(origin=origin) - return 4.*np.pi*np.sin(tth*0.5)/lam + return 4.0 * np.pi * np.sin(tth * 0.5) / lam def pixel_compton_energy_loss( self, @@ -577,9 +595,9 @@ def pixel_compton_energy_loss( ''' energy = np.asarray(energy) tth, _ = self.pixel_angles() - ang_fact = (1 - np.cos(tth)) - beta = energy/ct.cRestmasskeV - return energy/(1 + beta*ang_fact) + ang_fact = 1 - np.cos(tth) + beta = energy / ct.cRestmasskeV + return energy / (1 + beta * ang_fact) def pixel_compton_attenuation_length( self, @@ -628,8 +646,7 @@ def compute_compton_scattering_intensity( physics_package: AbstractPhysicsPackage, origin: np.array = ct.zeros_3, ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - - ''' compute the theoretical compton scattering + '''compute the theoretical compton scattering signal on the detector. this value is corrected for the transmission of compton scattered photons and normlaized before getting subtracting from the @@ -652,18 +669,20 @@ def compute_compton_scattering_intensity( q = self.pixel_Q(energy) inc_s = calculate_incoherent_scattering( - physics_package.sample_material, - q.flatten()).reshape(self.shape) + physics_package.sample_material, q.flatten() + ).reshape(self.shape) inc_w = calculate_incoherent_scattering( - physics_package.window_material, - q.flatten()).reshape(self.shape) + physics_package.window_material, q.flatten() + ).reshape(self.shape) t_s = self.calc_compton_physics_package_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) t_w = self.calc_compton_window_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) return inc_s * t_s + inc_w * t_w, t_s, t_w @@ -1087,9 +1106,14 @@ def interpolate_nearest(self, xy, img, pad_with_nans=True): int_xy[on_panel] = int_vals return int_xy - def interpolate_bilinear(self, xy, img, pad_with_nans=True, - clip_to_panel=True, - on_panel: Optional[np.ndarray] = None): + def interpolate_bilinear( + self, + xy, + img, + pad_with_nans=True, + clip_to_panel=True, + on_panel: Optional[np.ndarray] = None, + ): """ Interpolate an image array at the specified cartesian points. @@ -1766,19 +1790,23 @@ def increase_memoization_sizes(funcs, min_size): if cache_info['maxsize'] < min_size: f.set_cache_maxsize(min_size) - def calc_physics_package_transmission(self, energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage) -> np.float64: + def calc_physics_package_transmission( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + ) -> np.float64: """get the transmission from the physics package need to consider HED and HEDM samples separately """ bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1791,17 +1819,17 @@ def calc_physics_package_transmission(self, energy: np.floating, cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_transmission_sample( - seca, secb, energy, physics_package) - T_window = self.calc_transmission_window( - secb, energy, physics_package) + seca, secb, energy, physics_package + ) + T_window = self.calc_transmission_window(secb, energy, physics_package) transmission_physics_package = T_sample * T_window return transmission_physics_package @@ -1818,12 +1846,13 @@ def calc_compton_physics_package_transmission( routine than elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1836,18 +1865,19 @@ def calc_compton_physics_package_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'sample') + seca, secb, energy, physics_package, 'sample' + ) T_window = self.calc_compton_transmission_window( - secb, energy, physics_package) + secb, energy, physics_package + ) return T_sample * T_window @@ -1864,12 +1894,13 @@ def calc_compton_window_transmission( elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1882,45 +1913,54 @@ def calc_compton_window_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_window = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'window') + seca, secb, energy, physics_package, 'window' + ) T_sample = self.calc_compton_transmission_sample( - seca, energy, physics_package) + seca, energy, physics_package + ) return T_sample * T_window - def calc_transmission_sample(self, seca: np.array, - secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_sample( + self, + seca: np.array, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: thickness_s = physics_package.sample_thickness # in microns if np.isclose(thickness_s, 0): return np.ones(self.shape) # in microns^-1 - mu_s = 1./physics_package.sample_absorption_length(energy) - x = (mu_s*thickness_s) - pre = 1./x/(secb - seca) - num = np.exp(-x*seca) - np.exp(-x*secb) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + x = mu_s * thickness_s + pre = 1.0 / x / (secb - seca) + num = np.exp(-x * seca) - np.exp(-x * secb) return pre * num - def calc_transmission_window(self, secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_window( + self, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: material_w = physics_package.window_material thickness_w = physics_package.window_thickness # in microns if material_w is None or np.isclose(thickness_w, 0): return np.ones(self.shape) # in microns^-1 - mu_w = 1./physics_package.window_absorption_length(energy) - return np.exp(-thickness_w*mu_w*secb) + mu_w = 1.0 / physics_package.window_absorption_length(energy) + return np.exp(-thickness_w * mu_w * secb) def calc_compton_transmission( self, @@ -1935,9 +1975,11 @@ def calc_compton_transmission( formula = physics_package.sample_material density = physics_package.sample_density thickness = physics_package.sample_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1. / self.pixel_compton_attenuation_length( - energy, density, formula, + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, + density, + formula, ) elif pp_layer == 'window': formula = physics_package.window_material @@ -1946,17 +1988,18 @@ def calc_compton_transmission( density = physics_package.window_density thickness = physics_package.window_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) if thickness <= 0: return np.ones(self.shape) - x1 = mu*thickness*seca - x2 = mu_prime*thickness*secb - num = (np.exp(-x1) - np.exp(-x2)) - return -num/(x1 - x2) + x1 = mu * thickness * seca + x2 = mu_prime * thickness * secb + num = np.exp(-x1) - np.exp(-x2) + return -num / (x1 - x2) def calc_compton_transmission_sample( self, @@ -1966,9 +2009,8 @@ def calc_compton_transmission_sample( ) -> np.ndarray: thickness_s = physics_package.sample_thickness # in microns - mu_s = 1./physics_package.sample_absorption_length( - energy) - return np.exp(-mu_s*thickness_s*seca) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + return np.exp(-mu_s * thickness_s * seca) def calc_compton_transmission_window( self, @@ -1980,60 +2022,71 @@ def calc_compton_transmission_window( if formula is None: return np.ones(self.shape) - density = physics_package.window_density # in g/cc + density = physics_package.window_density # in g/cc thickness_w = physics_package.window_thickness # in microns - mu_w_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) - return np.exp(-mu_w_prime*thickness_w*secb) - - def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) -> np.array: - """get the effective pinhole area correction - """ - if (np.isclose(physics_package.pinhole_diameter, 0) - or np.isclose(physics_package.pinhole_thickness, 0)): + mu_w_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) + return np.exp(-mu_w_prime * thickness_w * secb) + + def calc_effective_pinhole_area( + self, physics_package: AbstractPhysicsPackage + ) -> np.array: + """get the effective pinhole area correction""" + if np.isclose(physics_package.pinhole_diameter, 0) or np.isclose( + physics_package.pinhole_thickness, 0 + ): return np.ones(self.shape) - hod = (physics_package.pinhole_thickness / - physics_package.pinhole_diameter) + hod = ( + physics_package.pinhole_thickness + / physics_package.pinhole_diameter + ) bvec = self.bvec tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) cth = -dvecs[:, 2].reshape(self.shape) tanth = np.tan(np.arccos(cth)) - f = hod*tanth - f[np.abs(f) > 1.] = np.nan + f = hod * tanth + f[np.abs(f) > 1.0] = np.nan asinf = np.arcsin(f) return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) - def calc_transmission_generic(self, - secb: np.array, - thickness: np.floating, - absorption_length: np.floating) -> np.array: + def calc_transmission_generic( + self, + secb: np.array, + thickness: np.floating, + absorption_length: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - mu = 1./absorption_length # in microns^-1 - return np.exp(-thickness*mu*secb) + mu = 1.0 / absorption_length # in microns^-1 + return np.exp(-thickness * mu * secb) - def calc_transmission_phosphor(self, - secb: np.array, - thickness: np.floating, - readout_length: np.floating, - absorption_length: np.floating, - energy: np.floating, - pre_U0: np.floating) -> np.array: + def calc_transmission_phosphor( + self, + secb: np.array, + thickness: np.floating, + readout_length: np.floating, + absorption_length: np.floating, + energy: np.floating, + pre_U0: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - f1 = absorption_length*thickness - f2 = absorption_length*readout_length - arg = (secb + 1/f2) - return pre_U0 * energy*((1.0 - np.exp(-f1*arg))/arg) + f1 = absorption_length * thickness + f2 = absorption_length * readout_length + arg = secb + 1 / f2 + return pre_U0 * energy * ((1.0 - np.exp(-f1 * arg)) / arg) + # ============================================================================= # UTILITY METHODS diff --git a/hexrd/laue/instrument/hedm_instrument.py b/hexrd/laue/instrument/hedm_instrument.py index cefdd4ae3..44915c515 100644 --- a/hexrd/laue/instrument/hedm_instrument.py +++ b/hexrd/laue/instrument/hedm_instrument.py @@ -59,10 +59,17 @@ from hexrd.core.fitting.utils import fit_ring from hexrd.core.gridutil import make_tolerance_grid from hexrd.core import matrixutil as mutil -from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +from hexrd.core.transforms.xfcapi import ( + angles_to_gvec, + gvec_to_xy, + make_sample_rmat, + make_rmat_of_expmap, + unit_vector, +) + # TODO: Resolve extra-workflow dependency from hexrd.hedm import xrdutil -from hexrd.laue.material.crystallography import PlaneData +from hexrd.powder.material.crystallography import PlaneData from hexrd.core import constants as ct from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler, mapAngle from hexrd.core import distortion as distortion_pkg @@ -70,7 +77,6 @@ from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict from hexrd.core.utils.yaml import NumpyToNativeDumper from hexrd.core.valunits import valWUnit -# TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import LeBail from hexrd.core.instrument.cylindrical_detector import CylindricalDetector @@ -79,14 +85,15 @@ from skimage.draw import polygon from skimage.util import random_noise -# TODO: Resolve extra-workflow-dependency from hexrd.powder.wppf import wppfsupport try: from fast_histogram import histogram1d + fast_histogram = True except ImportError: from numpy import histogram as histogram1d + fast_histogram = False logger = logging.getLogger() @@ -109,9 +116,9 @@ pixel_size_DFLT = (0.2, 0.2) tilt_params_DFLT = np.zeros(3) -t_vec_d_DFLT = np.r_[0., 0., -1000.] +t_vec_d_DFLT = np.r_[0.0, 0.0, -1000.0] -chi_DFLT = 0. +chi_DFLT = 0.0 t_vec_s_DFLT = np.zeros(3) multi_ims_key = ct.shared_ims_key @@ -125,8 +132,9 @@ # ============================================================================= -def generate_chunks(nrows, ncols, base_nrows, base_ncols, - row_gap=0, col_gap=0): +def generate_chunks( + nrows, ncols, base_nrows, base_ncols, row_gap=0, col_gap=0 +): """ Generate chunking data for regularly tiled composite detectors. @@ -158,18 +166,15 @@ def generate_chunks(nrows, ncols, base_nrows, base_ncols, [[row_start, row_stop], [col_start, col_stop]] """ - row_starts = np.array([i*(base_nrows + row_gap) for i in range(nrows)]) - col_starts = np.array([i*(base_ncols + col_gap) for i in range(ncols)]) + row_starts = np.array([i * (base_nrows + row_gap) for i in range(nrows)]) + col_starts = np.array([i * (base_ncols + col_gap) for i in range(ncols)]) rr = np.vstack([row_starts, row_starts + base_nrows]) cc = np.vstack([col_starts, col_starts + base_ncols]) rects = [] labels = [] for i in range(nrows): for j in range(ncols): - this_rect = np.array( - [[rr[0, i], rr[1, i]], - [cc[0, j], cc[1, j]]] - ) + this_rect = np.array([[rr[0, i], rr[1, i]], [cc[0, j], cc[1, j]]]) rects.append(this_rect) labels.append('%d_%d' % (i, j)) return rects, labels @@ -195,9 +200,11 @@ def chunk_instrument(instr, rects, labels, use_roi=False): """ icfg_dict = instr.write_config() - new_icfg_dict = dict(beam=icfg_dict['beam'], - oscillation_stage=icfg_dict['oscillation_stage'], - detectors={}) + new_icfg_dict = dict( + beam=icfg_dict['beam'], + oscillation_stage=icfg_dict['oscillation_stage'], + detectors={}, + ) for panel_id, panel in instr.detectors.items(): pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] @@ -207,7 +214,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): row_col_dim = np.diff(rect) # (2, 1) shape = tuple(row_col_dim.flatten()) - center = (rect[:, 0].reshape(2, 1) + 0.5*row_col_dim) + center = rect[:, 0].reshape(2, 1) + 0.5 * row_col_dim sp_tvec = np.concatenate( [panel.pixelToCart(center.T).flatten(), np.zeros(1)] @@ -232,7 +239,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): if panel.panel_buffer is not None: if panel.panel_buffer.ndim == 2: # have a mask array! submask = panel.panel_buffer[ - rect[0, 0]:rect[0, 1], rect[1, 0]:rect[1, 1] + rect[0, 0] : rect[0, 1], rect[1, 0] : rect[1, 1] ] new_icfg_dict['detectors'][panel_name]['buffer'] = submask return new_icfg_dict @@ -276,9 +283,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): images_in = imgser_dict[multi_ims_key] elif np.any(matched_det_keys): if sum(matched_det_keys) != 1: - raise RuntimeError( - f"multiple entries found for '{det_key}'" - ) + raise RuntimeError(f"multiple entries found for '{det_key}'") # use boolean array to index the proper key # !!! these should be in the same order img_keys = img_keys = np.asarray(list(imgser_dict.keys())) @@ -298,7 +303,12 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): if isinstance(images_in, ims_classes): # input is an imageseries of some kind - ims = ProcessedImageSeries(images_in, [('rectangle', roi), ]) + ims = ProcessedImageSeries( + images_in, + [ + ('rectangle', roi), + ], + ) if isinstance(images_in, OmegaImageSeries): # if it was an OmegaImageSeries, must re-cast ims = OmegaImageSeries(ims) @@ -306,16 +316,16 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): # 2- or 3-d array of images ndim = images_in.ndim if ndim == 2: - ims = images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims = images_in[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]] elif ndim == 3: nrows = roi[0][1] - roi[0][0] ncols = roi[1][1] - roi[1][0] n_images = len(images_in) - ims = np.empty((n_images, nrows, ncols), - dtype=images_in.dtype) + ims = np.empty((n_images, nrows, ncols), dtype=images_in.dtype) for i, image in images_in: - ims[i, :, :] = \ - images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims[i, :, :] = images_in[ + roi[0][0] : roi[0][1], roi[1][0] : roi[1][1] + ] else: raise RuntimeError( f"image input dim must be 2 or 3; you gave {ndim}" @@ -333,9 +343,8 @@ def calc_beam_vec(azim, pola): tht = np.radians(azim) phi = np.radians(pola) bv = np.r_[ - np.sin(phi)*np.cos(tht), - np.cos(phi), - np.sin(phi)*np.sin(tht)] + np.sin(phi) * np.cos(tht), np.cos(phi), np.sin(phi) * np.sin(tht) + ] return -bv @@ -346,9 +355,7 @@ def calc_angles_from_beam_vec(bvec): """ bvec = np.atleast_1d(bvec).flatten() nvec = unit_vector(-bvec) - azim = float( - np.degrees(np.arctan2(nvec[2], nvec[0])) - ) + azim = float(np.degrees(np.arctan2(nvec[2], nvec[0]))) pola = float(np.degrees(np.arccos(nvec[1]))) return azim, pola @@ -372,9 +379,9 @@ def angle_in_range(angle, ranges, ccw=True, units='degrees'): WARNING: always clockwise; assumes wedges are not overlapping """ - tau = 360. + tau = 360.0 if units.lower() == 'radians': - tau = 2*np.pi + tau = 2 * np.pi w = np.nan for i, wedge in enumerate(ranges): amin = wedge[0] @@ -406,7 +413,7 @@ def max_tth(instr): tth_max : float The maximum observable Bragg angle by the instrument in radians. """ - tth_max = 0. + tth_max = 0.0 for det in instr.detectors.values(): ptth, peta = det.pixel_angles() tth_max = max(np.max(ptth), tth_max) @@ -438,10 +445,9 @@ def pixel_resolution(instr): ang_ps_full = [] for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) ang_ps_full.append(angps) max_tth = min(max_tth, np.min(angps[:, 0])) @@ -473,10 +479,9 @@ def max_resolution(instr): max_eta = np.inf for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) max_tth = min(max_tth, np.min(angps[:, 0])) max_eta = min(max_eta, np.min(angps[:, 1])) @@ -484,16 +489,16 @@ def max_resolution(instr): def _gaussian_dist(x, cen, fwhm): - sigm = fwhm/(2*np.sqrt(2*np.log(2))) - return np.exp(-0.5*(x - cen)**2/sigm**2) + sigm = fwhm / (2 * np.sqrt(2 * np.log(2))) + return np.exp(-0.5 * (x - cen) ** 2 / sigm**2) def _sigma_to_fwhm(sigm): - return sigm*ct.sigma_to_fwhm + return sigm * ct.sigma_to_fwhm def _fwhm_to_sigma(fwhm): - return fwhm/ct.sigma_to_fwhm + return fwhm / ct.sigma_to_fwhm # ============================================================================= @@ -509,12 +514,17 @@ class HEDMInstrument(object): * where should reference eta be defined? currently set to default config """ - def __init__(self, instrument_config=None, - image_series=None, eta_vector=None, - instrument_name=None, tilt_calibration_mapping=None, - max_workers=max_workers_DFLT, - physics_package=None, - active_beam_name: Optional[str] = None): + def __init__( + self, + instrument_config=None, + image_series=None, + eta_vector=None, + instrument_name=None, + tilt_calibration_mapping=None, + max_workers=max_workers_DFLT, + physics_package=None, + active_beam_name: Optional[str] = None, + ): self._id = instrument_name_DFLT self._active_beam_name = active_beam_name @@ -539,7 +549,8 @@ def __init__(self, instrument_config=None, # FIXME: must add cylindrical self._detectors = dict( panel_id_DFLT=PlanarDetector( - rows=nrows_DFLT, cols=ncols_DFLT, + rows=nrows_DFLT, + cols=ncols_DFLT, pixel_size=pixel_size_DFLT, tvec=t_vec_d_DFLT, tilt=tilt_params_DFLT, @@ -547,9 +558,11 @@ def __init__(self, instrument_config=None, xrs_dist=self.source_distance, evec=self._eta_vector, distortion=None, - roi=None, group=None, - max_workers=self.max_workers), - ) + roi=None, + group=None, + max_workers=self.max_workers, + ), + ) self._tvec = t_vec_s_DFLT self._chi = chi_DFLT @@ -576,10 +589,7 @@ def __init__(self, instrument_config=None, self.physics_package = instrument_config['physics_package'] xrs_config = instrument_config['beam'] - is_single_beam = ( - 'energy' in xrs_config and - 'vector' in xrs_config - ) + is_single_beam = 'energy' in xrs_config and 'vector' in xrs_config if is_single_beam: # Assume single beam. Load the same way as multibeam self._create_default_beam() @@ -636,7 +646,7 @@ def __init__(self, instrument_config=None, elif isinstance(det_buffer, list): panel_buffer = np.asarray(det_buffer) elif np.isscalar(det_buffer): - panel_buffer = det_buffer*np.ones(2) + panel_buffer = det_buffer * np.ones(2) else: raise RuntimeError( "panel buffer spec invalid for %s" % det_id @@ -713,9 +723,9 @@ def mean_detector_center(self) -> np.ndarray: def mean_group_center(self, group: str) -> np.ndarray: """Return the mean center for detectors belonging to a group""" - centers = np.array([ - x.tvec for x in self.detectors_in_group(group).values() - ]) + centers = np.array( + [x.tvec for x in self.detectors_in_group(group).values()] + ) return centers.sum(axis=0) / len(centers) @property @@ -749,10 +759,11 @@ def detector_parameters(self): pdict = {} for key, panel in self.detectors.items(): pdict[key] = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) return pdict @@ -856,8 +867,9 @@ def beam_vector(self) -> np.ndarray: def beam_vector(self, x: np.ndarray): x = np.array(x).flatten() if len(x) == 3: - assert sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' bvec = x elif len(x) == 2: bvec = calc_beam_vec(*x) @@ -874,8 +886,9 @@ def source_distance(self): @source_distance.setter def source_distance(self, x): - assert np.isscalar(x), \ - f"'source_distance' must be a scalar; you input '{x}'" + assert np.isscalar( + x + ), f"'source_distance' must be a scalar; you input '{x}'" self.active_beam['distance'] = x self.beam_dict_modified() @@ -886,8 +899,9 @@ def eta_vector(self): @eta_vector.setter def eta_vector(self, x): x = np.array(x).flatten() - assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' self._eta_vector = x # ...maybe change dictionary item behavior for 3.x compatibility? for detector_id in self.detectors: @@ -899,10 +913,11 @@ def eta_vector(self, x): # ========================================================================= def write_config(self, file=None, style='yaml', calibration_dict={}): - """ WRITE OUT YAML FILE """ + """WRITE OUT YAML FILE""" # initialize output dictionary - assert style.lower() in ['yaml', 'hdf5'], \ + assert style.lower() in ['yaml', 'hdf5'], ( "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) par_dict = {} @@ -931,10 +946,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): if calibration_dict: par_dict['calibration_crystal'] = calibration_dict - ostage = dict( - chi=self.chi, - translation=self.tvec.tolist() - ) + ostage = dict(chi=self.chi, translation=self.tvec.tolist()) par_dict['oscillation_stage'] = ostage det_dict = dict.fromkeys(self.detectors) @@ -942,10 +954,13 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): # grab panel config # !!! don't need beam or tvec # !!! have vetted style - pdict = detector.config_dict(chi=self.chi, tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style=style) + pdict = detector.config_dict( + chi=self.chi, + tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style=style, + ) det_dict[det_name] = pdict['detector'] par_dict['detectors'] = det_dict @@ -955,6 +970,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): with open(file, 'w') as f: yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) else: + def _write_group(file): instr_grp = file.create_group('instrument') unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) @@ -970,9 +986,15 @@ def _write_group(file): return par_dict - def extract_polar_maps(self, plane_data, imgser_dict, - active_hkls=None, threshold=None, - tth_tol=None, eta_tol=0.25): + def extract_polar_maps( + self, + plane_data, + imgser_dict, + active_hkls=None, + threshold=None, + tth_tol=None, + eta_tol=0.25, + ): """ Extract eta-omega maps from an imageseries. @@ -996,23 +1018,25 @@ def extract_polar_maps(self, plane_data, imgser_dict, # detectors, so calculate it once # !!! grab first panel panel = next(iter(self.detectors.values())) - pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = \ + pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = ( panel.make_powder_rings( - plane_data, merge_hkls=False, - delta_eta=eta_tol, full_output=True + plane_data, + merge_hkls=False, + delta_eta=eta_tol, + full_output=True, ) + ) if active_hkls is not None: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" # need to re-cast for element-wise operations active_hkls = np.array(active_hkls) # these are all active reflection unique hklIDs - active_hklIDs = plane_data.getHKLID( - plane_data.hkls, master=True - ) + active_hklIDs = plane_data.getHKLID(plane_data.hkls, master=True) # find indices idx = np.zeros_like(active_hkls, dtype=int) @@ -1069,9 +1093,14 @@ def extract_polar_maps(self, plane_data, imgser_dict, # Divide up the images among processes tasks = distribute_tasks(len(ims), self.max_workers) - func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges, - ring_maps=ring_maps, ring_params=ring_params, - threshold=threshold) + func = partial( + _run_histograms, + ims=ims, + tth_ranges=tth_ranges, + ring_maps=ring_maps, + ring_params=ring_params, + threshold=threshold, + ) max_workers = self.max_workers if max_workers == 1 or len(tasks) == 1: @@ -1089,12 +1118,21 @@ def extract_polar_maps(self, plane_data, imgser_dict, return ring_maps_panel, eta_edges - def extract_line_positions(self, plane_data, imgser_dict, - tth_tol=None, eta_tol=1., npdiv=2, - eta_centers=None, - collapse_eta=True, collapse_tth=False, - do_interpolation=True, do_fitting=False, - tth_distortion=None, fitting_kwargs=None): + def extract_line_positions( + self, + plane_data, + imgser_dict, + tth_tol=None, + eta_tol=1.0, + npdiv=2, + eta_centers=None, + collapse_eta=True, + collapse_tth=False, + do_interpolation=True, + do_fitting=False, + tth_distortion=None, + fitting_kwargs=None, + ): """ Perform annular interpolation on diffraction images. @@ -1169,8 +1207,12 @@ def extract_line_positions(self, plane_data, imgser_dict, # LOOP OVER DETECTORS # ===================================================================== logger.info("Interpolating ring data") - pbar_dets = partial(tqdm, total=self.num_panels, desc="Detector", - position=self.num_panels) + pbar_dets = partial( + tqdm, + total=self.num_panels, + desc="Detector", + position=self.num_panels, + ) # Split up the workers among the detectors max_workers_per_detector = max(1, self.max_workers // self.num_panels) @@ -1193,23 +1235,26 @@ def extract_line_positions(self, plane_data, imgser_dict, def make_instr_cfg(panel): return panel.config_dict( - chi=self.chi, tvec=self.tvec, + chi=self.chi, + tvec=self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) images = [] for detector_id, panel in self.detectors.items(): - images.append(_parse_imgser_dict(imgser_dict, detector_id, - roi=panel.roi)) + images.append( + _parse_imgser_dict(imgser_dict, detector_id, roi=panel.roi) + ) panels = [self.detectors[k] for k in self.detectors] instr_cfgs = [make_instr_cfg(x) for x in panels] pbp_array = np.arange(self.num_panels) iter_args = zip(panels, instr_cfgs, images, pbp_array) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=self.num_panels) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=self.num_panels + ) as executor: results = list(pbar_dets(executor.map(func, iter_args))) panel_data = {} @@ -1218,12 +1263,9 @@ def make_instr_cfg(panel): return panel_data - def simulate_powder_pattern(self, - mat_list, - params=None, - bkgmethod=None, - origin=None, - noise=None): + def simulate_powder_pattern( + self, mat_list, params=None, bkgmethod=None, origin=None, noise=None + ): """ Generate powder diffraction iamges from specified materials. @@ -1262,8 +1304,7 @@ def simulate_powder_pattern(self, if origin is None: origin = self.tvec origin = np.asarray(origin).squeeze() - assert len(origin) == 3, \ - "origin must be a 3-element sequence" + assert len(origin) == 3, "origin must be a 3-element sequence" if bkgmethod is None: bkgmethod = {'chebyshev': 3} @@ -1303,7 +1344,7 @@ def simulate_powder_pattern(self, # find min and max tth over all panels tth_mi = np.inf - tth_ma = 0. + tth_ma = 0.0 ptth_dict = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): ptth, peta = panel.pixel_angles(origin=origin) @@ -1325,7 +1366,7 @@ def simulate_powder_pattern(self, ang_res = max_resolution(self) # !!! calc nsteps by oversampling - nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0]))) + nsteps = int(np.ceil(2 * (tth_ma - tth_mi) / np.degrees(ang_res[0]))) # evaulation vector for LeBail tth = np.linspace(tth_mi, tth_ma, nsteps) @@ -1334,7 +1375,7 @@ def simulate_powder_pattern(self, wavelength = [ valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), - 1. + 1.0, ] ''' @@ -1347,23 +1388,25 @@ def simulate_powder_pattern(self, tth = mat.planeData.getTTh() - LP = (1 + np.cos(tth)**2) / \ - np.cos(0.5*tth)/np.sin(0.5*tth)**2 + LP = ( + (1 + np.cos(tth) ** 2) + / np.cos(0.5 * tth) + / np.sin(0.5 * tth) ** 2 + ) intensity[mat.name] = {} - intensity[mat.name]['synchrotron'] = \ + intensity[mat.name]['synchrotron'] = ( mat.planeData.structFact * LP * multiplicity + ) kwargs = { 'expt_spectrum': expt, 'params': params, 'phases': mat_list, - 'wavelength': { - 'synchrotron': wavelength - }, + 'wavelength': {'synchrotron': wavelength}, 'bkgmethod': bkgmethod, 'intensity_init': intensity, - 'peakshape': 'pvtch' + 'peakshape': 'pvtch', } self.WPPFclass = LeBail(**kwargs) @@ -1381,9 +1424,11 @@ def simulate_powder_pattern(self, for det_key, panel in self.detectors.items(): ptth = ptth_dict[det_key] - img = np.interp(np.degrees(ptth), - self.simulated_spectrum.x, - self.simulated_spectrum.y + self.background.y) + img = np.interp( + np.degrees(ptth), + self.simulated_spectrum.x, + self.simulated_spectrum.y + self.background.y, + ) if noise is None: img_dict[det_key] = img @@ -1394,13 +1439,11 @@ def simulate_powder_pattern(self, img /= prev_max if noise.lower() == 'poisson': - im_noise = random_noise(img, - mode='poisson', - clip=True) + im_noise = random_noise(img, mode='poisson', clip=True) mi = im_noise.min() ma = im_noise.max() if ma > mi: - im_noise = (im_noise - mi)/(ma - mi) + im_noise = (im_noise - mi) / (ma - mi) elif noise.lower() == 'gaussian': im_noise = random_noise(img, mode='gaussian', clip=True) @@ -1422,9 +1465,14 @@ def simulate_powder_pattern(self, return img_dict - def simulate_laue_pattern(self, crystal_data, - minEnergy=5., maxEnergy=35., - rmat_s=None, grain_params=None): + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + grain_params=None, + ): """ Simulate Laue diffraction over the instrument. @@ -1454,17 +1502,28 @@ def simulate_laue_pattern(self, crystal_data, for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_laue_pattern( crystal_data, - minEnergy=minEnergy, maxEnergy=maxEnergy, - rmat_s=rmat_s, tvec_s=self.tvec, + minEnergy=minEnergy, + maxEnergy=maxEnergy, + rmat_s=rmat_s, + tvec_s=self.tvec, grain_params=grain_params, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) return results - def simulate_rotation_series(self, plane_data, grain_param_list, - eta_ranges=[(-np.pi, np.pi), ], - ome_ranges=[(-np.pi, np.pi), ], - ome_period=(-np.pi, np.pi), - wavelength=None): + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + wavelength=None, + ): """ Simulate a monochromatic rotation series over the instrument. @@ -1493,24 +1552,39 @@ def simulate_rotation_series(self, plane_data, grain_param_list, results = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_rotation_series( - plane_data, grain_param_list, + plane_data, + grain_param_list, eta_ranges=eta_ranges, ome_ranges=ome_ranges, ome_period=ome_period, - chi=self.chi, tVec_s=self.tvec, - wavelength=wavelength) + chi=self.chi, + tVec_s=self.tvec, + wavelength=wavelength, + ) return results - def pull_spots(self, plane_data, grain_params, - imgser_dict, - tth_tol=0.25, eta_tol=1., ome_tol=1., - npdiv=2, threshold=10, - eta_ranges=[(-np.pi, np.pi), ], - ome_period=None, - dirname='results', filename=None, output_format='text', - return_spot_list=False, - quiet=True, check_only=False, - interp='nearest'): + def pull_spots( + self, + plane_data, + grain_params, + imgser_dict, + tth_tol=0.25, + eta_tol=1.0, + ome_tol=1.0, + npdiv=2, + threshold=10, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_period=None, + dirname='results', + filename=None, + output_format='text', + return_spot_list=False, + quiet=True, + check_only=False, + interp='nearest', + ): """ Exctract reflection info from a rotation series. @@ -1570,12 +1644,14 @@ def pull_spots(self, plane_data, grain_params, # WARNING: all imageseries AND all wedges within are assumed to have # the same omega values; put in a check that they are all the same??? oims0 = next(iter(imgser_dict.values())) - ome_ranges = [np.radians([i['ostart'], i['ostop']]) - for i in oims0.omegawedges.wedges] + ome_ranges = [ + np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges + ] if ome_period is None: ims = next(iter(imgser_dict.values())) ostart = ims.omega[0, 0] - ome_period = np.radians(ostart + np.r_[0., 360.]) + ome_period = np.radians(ostart + np.r_[0.0, 360.0]) # delta omega in DEGREES grabbed from first imageseries in the dict delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] @@ -1583,7 +1659,10 @@ def pull_spots(self, plane_data, grain_params, # make omega grid for frame expansion around reference frame # in DEGREES ndiv_ome, ome_del = make_tolerance_grid( - delta_ome, ome_tol, 1, adjust_window=True, + delta_ome, + ome_tol, + 1, + adjust_window=True, ) # generate structuring element for connected component labeling @@ -1594,24 +1673,37 @@ def pull_spots(self, plane_data, grain_params, # simulate rotation series sim_results = self.simulate_rotation_series( - plane_data, [grain_params, ], + plane_data, + [ + grain_params, + ], eta_ranges=eta_ranges, ome_ranges=ome_ranges, - ome_period=ome_period) + ome_period=ome_period, + ) # patch vertex generator (global for instrument) - tol_vec = 0.5*np.radians( - [-tth_tol, -eta_tol, - -tth_tol, eta_tol, - tth_tol, eta_tol, - tth_tol, -eta_tol]) + tol_vec = 0.5 * np.radians( + [ + -tth_tol, + -eta_tol, + -tth_tol, + eta_tol, + tth_tol, + eta_tol, + tth_tol, + -eta_tol, + ] + ) # prepare output if requested if filename is not None and output_format.lower() == 'hdf5': this_filename = os.path.join(dirname, filename) writer = GrainDataWriter_h5( os.path.join(dirname, filename), - self.write_config(), grain_params) + self.write_config(), + grain_params, + ) # ===================================================================== # LOOP OVER PANELS @@ -1623,28 +1715,25 @@ def pull_spots(self, plane_data, grain_params, for detector_id, panel in self.detectors.items(): # initialize text-based output writer if filename is not None and output_format.lower() == 'text': - output_dir = os.path.join( - dirname, detector_id - ) + output_dir = os.path.join(dirname, detector_id) os.makedirs(output_dir, exist_ok=True) - this_filename = os.path.join( - output_dir, filename - ) + this_filename = os.path.join(output_dir, filename) writer = PatchDataWriter(this_filename) # grab panel instr_cfg = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) native_area = panel.pixel_area # pixel ref area # pull out the OmegaImageSeries for this panel from input dict - ome_imgser = _parse_imgser_dict(imgser_dict, - detector_id, - roi=panel.roi) + ome_imgser = _parse_imgser_dict( + imgser_dict, detector_id, roi=panel.roi + ) # extract simulation results sim_results_p = sim_results[detector_id] @@ -1660,19 +1749,24 @@ def pull_spots(self, plane_data, grain_params, # patch vertex array from sim nangs = len(ang_centers) patch_vertices = ( - np.tile(ang_centers[:, :2], (1, 4)) + - np.tile(tol_vec, (nangs, 1)) - ).reshape(4*nangs, 2) - ome_dupl = np.tile( - ang_centers[:, 2], (4, 1) - ).T.reshape(len(patch_vertices), 1) + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4 * nangs, 2) + ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape( + len(patch_vertices), 1 + ) # find vertices that all fall on the panel det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), - panel.rmat, rMat_c, self.chi, - panel.tvec, tVec_c, self.tvec, - panel.distortion) + panel.rmat, + rMat_c, + self.chi, + panel.tvec, + tVec_c, + self.tvec, + panel.distortion, + ) _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) # all vertices must be on... @@ -1703,7 +1797,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d %d %d) falls outside omega range - """ % tuple(hkls_p[i_pt, :]) + """ % tuple( + hkls_p[i_pt, :] + ) print(msg) continue else: @@ -1721,11 +1817,16 @@ def pull_spots(self, plane_data, grain_params, # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( instr_cfg, - ang_centers[:, :2], ang_pixel_size, + ang_centers[:, :2], + ang_pixel_size, omega=ang_centers[:, 2], - tth_tol=tth_tol, eta_tol=eta_tol, - rmat_c=rMat_c, tvec_c=tVec_c, - npdiv=npdiv, quiet=True) + tth_tol=tth_tol, + eta_tol=eta_tol, + rmat_c=rMat_c, + tvec_c=tVec_c, + npdiv=npdiv, + quiet=True, + ) # GRAND LOOP over reflections for this panel patch_output = [] @@ -1735,7 +1836,7 @@ def pull_spots(self, plane_data, grain_params, vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch prows, pcols = areas.shape - nrm_fac = areas/float(native_area) + nrm_fac = areas / float(native_area) nrm_fac = nrm_fac / np.min(nrm_fac) # grab hkl info @@ -1749,8 +1850,9 @@ def pull_spots(self, plane_data, grain_params, delta_eta = eta_edges[1] - eta_edges[0] # need to reshape eval pts for interpolation - xy_eval = np.vstack([xy_eval[0].flatten(), - xy_eval[1].flatten()]).T + xy_eval = np.vstack( + [xy_eval[0].flatten(), xy_eval[1].flatten()] + ).T # the evaluation omegas; # expand about the central value using tol vector @@ -1765,7 +1867,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d%d%d) falls outside omega range - """ % tuple(hkl) + """ % tuple( + hkl + ) print(msg) continue else: @@ -1774,8 +1878,8 @@ def pull_spots(self, plane_data, grain_params, peak_id = next_invalid_peak_id sum_int = np.nan max_int = np.nan - meas_angs = np.nan*np.ones(3) - meas_xy = np.nan*np.ones(2) + meas_angs = np.nan * np.ones(3) + meas_xy = np.nan * np.ones(2) # quick check for intensity contains_signal = False @@ -1793,19 +1897,23 @@ def pull_spots(self, plane_data, grain_params, # initialize patch data array for intensities if interp.lower() == 'bilinear': patch_data = np.zeros( - (len(frame_indices), prows, pcols)) + (len(frame_indices), prows, pcols) + ) for i, i_frame in enumerate(frame_indices): - patch_data[i] = \ - panel.interpolate_bilinear( - xy_eval, - ome_imgser[i_frame], - pad_with_nans=False - ).reshape(prows, pcols) # * nrm_fac + patch_data[i] = panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False, + ).reshape( + prows, pcols + ) # * nrm_fac elif interp.lower() == 'nearest': patch_data = patch_data_raw # * nrm_fac else: - msg = "interpolation option " + \ - "'%s' not understood" + msg = ( + "interpolation option " + + "'%s' not understood" + ) raise RuntimeError(msg % interp) # now have interpolated patch data... @@ -1818,9 +1926,10 @@ def pull_spots(self, plane_data, grain_params, peak_id = iRefl props = regionprops(labels, patch_data) coms = np.vstack( - [x.weighted_centroid for x in props]) + [x.weighted_centroid for x in props] + ) if num_peaks > 1: - center = np.r_[patch_data.shape]*0.5 + center = np.r_[patch_data.shape] * 0.5 center_t = np.tile(center, (num_peaks, 1)) com_diff = coms - center_t closest_peak_idx = np.argmin( @@ -1831,15 +1940,17 @@ def pull_spots(self, plane_data, grain_params, coms = coms[closest_peak_idx] # meas_omes = \ # ome_edges[0] + (0.5 + coms[0])*delta_ome - meas_omes = \ - ome_eval[0] + coms[0]*delta_ome + meas_omes = ome_eval[0] + coms[0] * delta_ome meas_angs = np.hstack( - [tth_edges[0] + (0.5 + coms[2])*delta_tth, - eta_edges[0] + (0.5 + coms[1])*delta_eta, - mapAngle( - np.radians(meas_omes), ome_period - ) - ] + [ + tth_edges[0] + + (0.5 + coms[2]) * delta_tth, + eta_edges[0] + + (0.5 + coms[1]) * delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ), + ] ) # intensities @@ -1866,15 +1977,21 @@ def pull_spots(self, plane_data, grain_params, meas_angs, chi=self.chi, rmat_c=rMat_c, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) rMat_s = make_sample_rmat( self.chi, meas_angs[2] ) meas_xy = gvec_to_xy( gvec_c, - panel.rmat, rMat_s, rMat_c, - panel.tvec, self.tvec, tVec_c, - beam_vec=self.beam_vector) + panel.rmat, + rMat_s, + rMat_c, + panel.tvec, + self.tvec, + tVec_c, + beam_vec=self.beam_vector, + ) if panel.distortion is not None: meas_xy = panel.distortion.apply_inverse( np.atleast_2d(meas_xy) @@ -1893,19 +2010,38 @@ def pull_spots(self, plane_data, grain_params, if filename is not None: if output_format.lower() == 'text': writer.dump_patch( - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, - xy_centers[i_pt], meas_xy) + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + xy_centers[i_pt], + meas_xy, + ) elif output_format.lower() == 'hdf5': xyc_arr = xy_eval.reshape( prows, pcols, 2 ).transpose(2, 0, 1) writer.dump_patch( - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy) + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, + ) if return_spot_list: # Full output @@ -1913,17 +2049,34 @@ def pull_spots(self, plane_data, grain_params, prows, pcols, 2 ).transpose(2, 0, 1) _patch_output = [ - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, ] else: # Trimmed output _patch_output = [ - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, meas_xy + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + meas_xy, ] patch_output.append(_patch_output) iRefl += 1 @@ -1941,7 +2094,9 @@ def update_memoization_sizes(self): PlanarDetector.update_memoization_sizes(all_panels) CylindricalDetector.update_memoization_sizes(all_panels) - def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: + def calc_transmission( + self, rMat_s: np.ndarray = None + ) -> dict[str, np.ndarray]: """calculate the transmission from the filter and polymer coating. the inverse of this number is the intensity correction that needs @@ -1955,26 +2110,31 @@ def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: transmissions = {} for det_name, det in self.detectors.items(): transmission_filter, transmission_phosphor = ( - det.calc_filter_coating_transmission(energy)) + det.calc_filter_coating_transmission(energy) + ) transmission = transmission_filter * transmission_phosphor if self.physics_package is not None: transmission_physics_package = ( det.calc_physics_package_transmission( - energy, rMat_s, self.physics_package)) + energy, rMat_s, self.physics_package + ) + ) effective_pinhole_area = det.calc_effective_pinhole_area( - self.physics_package) + self.physics_package + ) transmission = ( - transmission * - transmission_physics_package * - effective_pinhole_area + transmission + * transmission_physics_package + * effective_pinhole_area ) transmissions[det_name] = transmission return transmissions + # ============================================================================= # UTILITIES # ============================================================================= @@ -1985,6 +2145,7 @@ class PatchDataWriter(object): def __init__(self, filename): self._delim = ' ' + # fmt: off header_items = ( '# ID', 'PID', 'H', 'K', 'L', @@ -1999,6 +2160,7 @@ def __init__(self, filename): self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) ]) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2011,30 +2173,34 @@ def __del__(self): def close(self): self.fid.close() - def dump_patch(self, peak_id, hkl_id, - hkl, spot_int, max_int, - pangs, mangs, pxy, mxy): + def dump_patch( + self, peak_id, hkl_id, hkl, spot_int, max_int, pangs, mangs, pxy, mxy + ): """ !!! maybe need to check that last four inputs are arrays """ if mangs is None: spot_int = np.nan max_int = np.nan - mangs = np.nan*np.ones(3) - mxy = np.nan*np.ones(2) - - res = [int(peak_id), int(hkl_id)] \ - + np.array(hkl, dtype=int).tolist() \ - + [spot_int, max_int] \ - + pangs.tolist() \ - + mangs.tolist() \ - + pxy.tolist() \ + mangs = np.nan * np.ones(3) + mxy = np.nan * np.ones(2) + + res = ( + [int(peak_id), int(hkl_id)] + + np.array(hkl, dtype=int).tolist() + + [spot_int, max_int] + + pangs.tolist() + + mangs.tolist() + + pxy.tolist() + mxy.tolist() + ) output_str = self._delim.join( - [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), - self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), - self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + [ + self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:]), + ] ) print(output_str, file=self.fid) return output_str @@ -2050,20 +2216,23 @@ def __init__(self, filename=None, array=None): """ if filename is None and array is None: raise RuntimeError( - 'GrainDataWriter must be specified with filename or array') + 'GrainDataWriter must be specified with filename or array' + ) self.array = None self.fid = None # array supersedes filename if array is not None: - assert array.shape[1] == 21, \ - f'grain data table must have 21 columns not {array.shape[21]}' + assert ( + array.shape[1] == 21 + ), f'grain data table must have 21 columns not {array.shape[21]}' self.array = array self._array_row = 0 return self._delim = ' ' + # fmt: off header_items = ( '# grain ID', 'completeness', 'chi^2', 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', @@ -2083,6 +2252,7 @@ def __init__(self, filename=None, array=None): np.tile('{:<23}', len(header_items) - 3) ).format(*header_items[3:])] ) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2096,35 +2266,40 @@ def close(self): if self.fid is not None: self.fid.close() - def dump_grain(self, grain_id, completeness, chisq, - grain_params): - assert len(grain_params) == 12, \ - "len(grain_params) must be 12, not %d" % len(grain_params) + def dump_grain(self, grain_id, completeness, chisq, grain_params): + assert ( + len(grain_params) == 12 + ), "len(grain_params) must be 12, not %d" % len(grain_params) # extract strain emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) evec = mutil.symmToVecMV(emat, scale=False) - res = [int(grain_id), completeness, chisq] \ - + grain_params.tolist() \ + res = ( + [int(grain_id), completeness, chisq] + + grain_params.tolist() + evec.tolist() + ) if self.array is not None: row = self._array_row - assert row < self.array.shape[0], \ - f'invalid row {row} in array table' + assert ( + row < self.array.shape[0] + ), f'invalid row {row} in array table' self.array[row] = res self._array_row += 1 return res # (else) format and write to file output_str = self._delim.join( - [self._delim.join( - ['{:<12d}', '{:<12f}', '{:<12e}'] - ).format(*res[:3]), - self._delim.join( - np.tile('{:<23.16e}', len(res) - 3) - ).format(*res[3:])] + [ + self._delim.join(['{:<12d}', '{:<12f}', '{:<12e}']).format( + *res[:3] + ), + self._delim.join(np.tile('{:<23.16e}', len(res) - 3)).format( + *res[3:] + ), + ] ) print(output_str, file=self.fid) return output_str @@ -2154,12 +2329,12 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): vinv_s = np.array(grain_params[6:]).flatten() vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) - if use_attr: # attribute version + if use_attr: # attribute version self.grain_grp.attrs.create('rmat_c', rmat_c) self.grain_grp.attrs.create('tvec_c', tvec_c) self.grain_grp.attrs.create('inv(V)_s', vinv_s) self.grain_grp.attrs.create('vmat_s', vmat_s) - else: # dataset version + else: # dataset version self.grain_grp.create_dataset('rmat_c', data=rmat_c) self.grain_grp.create_dataset('tvec_c', data=tvec_c) self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) @@ -2178,11 +2353,26 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): def close(self): self.fid.close() - def dump_patch(self, panel_id, - i_refl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, ome_centers, - xy_centers, ijs, frame_indices, - spot_data, pangs, pxy, mangs, mxy, gzip=1): + def dump_patch( + self, + panel_id, + i_refl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + ome_centers, + xy_centers, + ijs, + frame_indices, + spot_data, + pangs, + pxy, + mangs, + mxy, + gzip=1, + ): """ to be called inside loop over patches @@ -2198,10 +2388,10 @@ def dump_patch(self, panel_id, spot_grp.attrs.create('predicted_angles', pangs) spot_grp.attrs.create('predicted_xy', pxy) if mangs is None: - mangs = np.nan*np.ones(3) + mangs = np.nan * np.ones(3) spot_grp.attrs.create('measured_angles', mangs) if mxy is None: - mxy = np.nan*np.ones(3) + mxy = np.nan * np.ones(3) spot_grp.attrs.create('measured_xy', mxy) # get centers crds from edge arrays @@ -2220,27 +2410,55 @@ def dump_patch(self, panel_id, eta_crd = centers_of_edge_vec(eta_edges) shuffle_data = True # reduces size by 20% - spot_grp.create_dataset('tth_crd', data=tth_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('eta_crd', data=eta_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ome_crd', data=ome_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('xy_centers', data=xy_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ij_centers', data=ijs, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('frame_indices', data=fi, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('intensities', data=spot_data, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) + spot_grp.create_dataset( + 'tth_crd', + data=tth_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'eta_crd', + data=eta_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ome_crd', + data=ome_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'xy_centers', + data=xy_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ij_centers', + data=ijs, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'frame_indices', + data=fi, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'intensities', + data=spot_data, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) return @@ -2262,9 +2480,16 @@ class GenerateEtaOmeMaps(object): """ - def __init__(self, image_series_dict, instrument, plane_data, - active_hkls=None, eta_step=0.25, threshold=None, - ome_period=(0, 360)): + def __init__( + self, + image_series_dict, + instrument, + plane_data, + active_hkls=None, + eta_step=0.25, + threshold=None, + ome_period=(0, 360), + ): """ image_series must be OmegaImageSeries class instrument_params must be a dict (loaded from yaml spec) @@ -2278,13 +2503,12 @@ def __init__(self, image_series_dict, instrument, plane_data, # ???: change name of iHKLList? # ???: can we change the behavior of iHKLList? if active_hkls is None: - self._iHKLList = plane_data.getHKLID( - plane_data.hkls, master=True - ) + self._iHKLList = plane_data.getHKLID(plane_data.hkls, master=True) n_rings = len(self._iHKLList) else: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" self._iHKLList = active_hkls n_rings = len(active_hkls) @@ -2299,14 +2523,18 @@ def __init__(self, image_series_dict, instrument, plane_data, omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES delta_ome = omegas_array[0][-1] - omegas_array[0][0] frame_mask = None - ome_period = omegas_array[0, 0] + np.r_[0., 360.] # !!! be careful + ome_period = omegas_array[0, 0] + np.r_[0.0, 360.0] # !!! be careful if this_det_ims.omegawedges.nwedges > 1: - delta_omes = [(i['ostop'] - i['ostart'])/i['nsteps'] - for i in this_det_ims.omegawedges.wedges] - check_wedges = mutil.uniqueVectors(np.atleast_2d(delta_omes), - tol=1e-6).squeeze() - assert check_wedges.size == 1, \ - "all wedges must have the same delta omega to 1e-6" + delta_omes = [ + (i['ostop'] - i['ostart']) / i['nsteps'] + for i in this_det_ims.omegawedges.wedges + ] + check_wedges = mutil.uniqueVectors( + np.atleast_2d(delta_omes), tol=1e-6 + ).squeeze() + assert ( + check_wedges.size == 1 + ), "all wedges must have the same delta omega to 1e-6" # grab representative delta ome # !!! assuming positive delta consistent with OmegaImageSeries delta_ome = delta_omes[0] @@ -2322,9 +2550,9 @@ def __init__(self, image_series_dict, instrument, plane_data, ) # compute total nsteps # FIXME: need check for roundoff badness - nsteps = int((ostop - ostart)/delta_ome) + nsteps = int((ostop - ostart) / delta_ome) ome_edges_full = np.linspace( - ostart, ostop, num=nsteps+1, endpoint=True + ostart, ostop, num=nsteps + 1, endpoint=True ) omegas_array = np.vstack( [ome_edges_full[:-1], ome_edges_full[1:]] @@ -2335,15 +2563,21 @@ def __init__(self, image_series_dict, instrument, plane_data, # !!! this array has -1 outside a wedge # !!! again assuming the valid frame order increases monotonically frame_mask = np.array( - [this_det_ims.omega_to_frame(ome)[0] != -1 - for ome in ome_centers] + [ + this_det_ims.omega_to_frame(ome)[0] != -1 + for ome in ome_centers + ] ) # ???: need to pass a threshold? eta_mapping, etas = instrument.extract_polar_maps( - plane_data, image_series_dict, - active_hkls=active_hkls, threshold=threshold, - tth_tol=None, eta_tol=eta_step) + plane_data, + image_series_dict, + active_hkls=active_hkls, + threshold=threshold, + tth_tol=None, + eta_tol=eta_step, + ) # for convenience grab map shape from first map_shape = next(iter(eta_mapping.values())).shape[1:] @@ -2370,7 +2604,7 @@ def __init__(self, image_series_dict, instrument, plane_data, if frame_mask is not None: # !!! must expand row dimension to include # skipped omegas - tmp = np.ones((len(frame_mask), map_shape[1]))*np.nan + tmp = np.ones((len(frame_mask), map_shape[1])) * np.nan tmp[frame_mask, :] = full_map full_map = tmp data_store.append(full_map) @@ -2379,11 +2613,11 @@ def __init__(self, image_series_dict, instrument, plane_data, # set required attributes self._omegas = mapAngle( np.radians(np.average(omegas_array, axis=1)), - np.radians(ome_period) + np.radians(ome_period), ) self._omeEdges = mapAngle( np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), - np.radians(ome_period) + np.radians(ome_period), ) # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the @@ -2397,7 +2631,7 @@ def __init__(self, image_series_dict, instrument, plane_data, # WARNING: unlinke the omegas in imageseries metadata, # these are in RADIANS and represent bin centers self._etaEdges = etas - self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + self._etas = self._etaEdges[:-1] + 0.5 * np.radians(eta_step) @property def dataStore(self): @@ -2433,9 +2667,7 @@ def save(self, filename): def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): # mark pixels in the spec'd tth range - pixels_in_tthr = np.logical_and( - ptth >= tthr[0], ptth <= tthr[1] - ) + pixels_in_tthr = np.logical_and(ptth >= tthr[0], ptth <= tthr[1]) # catch case where ring isn't on detector if not np.any(pixels_in_tthr): @@ -2452,8 +2684,7 @@ def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): def run_fast_histogram(x, bins, weights=None): - return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), - weights=weights) + return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), weights=weights) def run_numpy_histogram(x, bins, weights=None): @@ -2471,7 +2702,7 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): if threshold is not None: # !!! NaNs get preserved image = np.array(image) - image[image < threshold] = 0. + image[image < threshold] = 0.0 for i_r, tthr in enumerate(tth_ranges): this_map = ring_maps[i_r] @@ -2488,12 +2719,21 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): this_map[i_row, bins_on_detector] = result[bins_on_detector] -def _extract_detector_line_positions(iter_args, plane_data, tth_tol, - eta_tol, eta_centers, npdiv, - collapse_tth, collapse_eta, - do_interpolation, do_fitting, - fitting_kwargs, tth_distortion, - max_workers): +def _extract_detector_line_positions( + iter_args, + plane_data, + tth_tol, + eta_tol, + eta_centers, + npdiv, + collapse_tth, + collapse_eta, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, + max_workers, +): panel, instr_cfg, images, pbp = iter_args if images.ndim == 2: @@ -2508,9 +2748,13 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, tth_distr_cls = tth_distortion[panel.name] pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( - plane_data, merge_hkls=True, - delta_tth=tth_tol, delta_eta=eta_tol, - eta_list=eta_centers, tth_distortion=tth_distr_cls) + plane_data, + merge_hkls=True, + delta_tth=tth_tol, + delta_eta=eta_tol, + eta_list=eta_centers, + tth_distortion=tth_distr_cls, + ) tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) @@ -2525,8 +2769,9 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, # ================================================================= # LOOP OVER RING SETS # ================================================================= - pbar_rings = partial(tqdm, total=len(pow_angs), desc="Ringset", - position=pbp) + pbar_rings = partial( + tqdm, total=len(pow_angs), desc="Ringset", position=pbp + ) kwargs = { 'instr_cfg': instr_cfg, @@ -2543,15 +2788,26 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, } func = partial(_extract_ring_line_positions, **kwargs) iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=max_workers) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=max_workers + ) as executor: return list(pbar_rings(executor.map(func, iter_arg))) -def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, - collapse_tth, collapse_eta, images, - do_interpolation, do_fitting, fitting_kwargs, - tth_distortion): +def _extract_ring_line_positions( + iter_args, + instr_cfg, + panel, + eta_tol, + npdiv, + collapse_tth, + collapse_eta, + images, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, +): """ Extracts data for a single Debye-Scherrer ring . @@ -2599,16 +2855,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) nan_mask = np.logical_or.reduce(nan_mask, 1) if angs.ndim > 1 and xys.ndim > 1: - angs = angs[nan_mask,:] - xys = xys[nan_mask, :] + angs = angs[nan_mask, :] + xys = xys[nan_mask, :] n_images = len(images) native_area = panel.pixel_area # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( - instr_cfg, angs, panel.angularPixelSize(xys), - tth_tol=tth_tol, eta_tol=eta_tol, npdiv=npdiv, quiet=True) + instr_cfg, + angs, + panel.angularPixelSize(xys), + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + quiet=True, + ) # loop over patches # FIXME: fix initialization @@ -2621,9 +2883,7 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch # need to reshape eval pts for interpolation - xy_eval = np.vstack([ - xys_eval[0].flatten(), - xys_eval[1].flatten()]).T + xy_eval = np.vstack([xys_eval[0].flatten(), xys_eval[1].flatten()]).T _, on_panel = panel.clip_to_panel(xy_eval) @@ -2631,25 +2891,20 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, continue if collapse_tth: - ang_data = (vtx_angs[0][0, [0, -1]], - vtx_angs[1][[0, -1], 0]) + ang_data = (vtx_angs[0][0, [0, -1]], vtx_angs[1][[0, -1], 0]) elif collapse_eta: # !!! yield the tth bin centers tth_centers = np.average( - np.vstack( - [vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]] - ), - axis=0 + np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]), axis=0 ) - ang_data = (tth_centers, - angs[i_p][-1]) + ang_data = (tth_centers, angs[i_p][-1]) if do_fitting: fit_data = [] else: ang_data = vtx_angs prows, pcols = areas.shape - area_fac = areas/float(native_area) + area_fac = areas / float(native_area) # interpolate if not collapse_tth: @@ -2658,19 +2913,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, # catch interpolation type image = images[j_p] if do_interpolation: - p_img = panel.interpolate_bilinear( + p_img = ( + panel.interpolate_bilinear( xy_eval, image, - ).reshape(prows, pcols)*area_fac + ).reshape(prows, pcols) + * area_fac + ) else: - p_img = image[ijs[0], ijs[1]]*area_fac + p_img = image[ijs[0], ijs[1]] * area_fac # catch flat spectrum data, which will cause # fitting to fail. # ???: best here, or make fitting handle it? mxval = np.max(p_img) mnval = np.min(p_img) - if mxval == 0 or (1. - mnval/mxval) < 0.01: + if mxval == 0 or (1.0 - mnval / mxval) < 0.01: continue # catch collapsing options @@ -2687,11 +2945,16 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, tmp = tth_distortion.apply( panel.angles_to_cart( np.vstack( - [np.radians(this_tth0), - np.tile(ang_data[-1], len(this_tth0))] + [ + np.radians(this_tth0), + np.tile( + ang_data[-1], len(this_tth0) + ), + ] ).T ), - return_nominal=True) + return_nominal=True, + ) pk_centers = np.degrees(tmp[:, 0]) else: pk_centers = this_tth0 diff --git a/hexrd/laue/material/crystallography.py b/hexrd/laue/material/crystallography.py index 482c625d9..29e621972 100644 --- a/hexrd/laue/material/crystallography.py +++ b/hexrd/laue/material/crystallography.py @@ -38,7 +38,13 @@ from hexrd.core.deprecation import deprecated from hexrd.core import constants from hexrd.core.matrixutil import unitVector -from hexrd.core.rotations import rotMatOfExpMap, mapAngle, applySym, ltypeOfLaueGroup, quatOfLaueGroup +from hexrd.core.rotations import ( + rotMatOfExpMap, + mapAngle, + applySym, + ltypeOfLaueGroup, + quatOfLaueGroup, +) from hexrd.core.transforms import xfcapi from hexrd.core import valunits from hexrd.core.valunits import toFloat @@ -159,6 +165,7 @@ def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' ).getVal(dUnit) + def latticeParameters(lvec): """ Generates direct and reciprocal lattice vector components in a @@ -186,6 +193,7 @@ def latticeParameters(lvec): return [a, b, c, alfa, beta, gama] + def latticePlanes( hkls: np.ndarray, lparms: np.ndarray, @@ -562,6 +570,7 @@ def latticeVectors( 'rparms': rparms, } + def hexagonalIndicesFromRhombohedral(hkl): """ converts rhombohedral hkl to hexagonal indices @@ -909,7 +918,7 @@ def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: elif len(exclusions.shape) == 2: # treat exclusions as ranges of indices for r in exclusions: - excl[self.tThSort[r[0]:r[1]]] = True + excl[self.tThSort[r[0] : r[1]]] = True else: raise RuntimeError( f'Unclear behavior for shape {exclusions.shape}' @@ -1844,8 +1853,10 @@ def get_exclusions(self): def set_exclusions(self, exclusions): self.exclusions = exclusions - @deprecated(new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", - removal_date="2025-08-01") + @deprecated( + new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", + removal_date="2025-08-01", + ) def getLatticeType(self): return ltypeOfLaueGroup(self.laueGroup) diff --git a/hexrd/laue/xrdutil/utils.py b/hexrd/laue/xrdutil/utils.py index fbc929901..1528403ec 100644 --- a/hexrd/laue/xrdutil/utils.py +++ b/hexrd/laue/xrdutil/utils.py @@ -508,9 +508,8 @@ def _filter_hkls_eta_ome( angMask_eta = np.zeros(len(angles), dtype=bool) for etas in eta_range: angMask_eta = np.logical_or( - angMask_eta, xfcapi.validate_angle_ranges( - angles[:, 1], etas[0], etas[1] - ) + angMask_eta, + xfcapi.validate_angle_ranges(angles[:, 1], etas[0], etas[1]), ) ccw = True @@ -991,8 +990,13 @@ def simulateGVecs( # first find valid G-vectors angList = np.vstack( xfcapi.oscill_angles_of_hkls( - full_hkls[:, 1:], chi, rMat_c, bMat, wlen, v_inv=vInv_s, - beam_vec=beam_vector + full_hkls[:, 1:], + chi, + rMat_c, + bMat, + wlen, + v_inv=vInv_s, + beam_vec=beam_vector, ) ) allAngs, allHKLs = _filter_hkls_eta_ome( @@ -1008,8 +1012,15 @@ def simulateGVecs( else: # ??? preallocate for speed? det_xy, rMat_ss, _ = _project_on_detector_plane( - allAngs, rMat_d, rMat_c, chi, tVec_d, tVec_c, tVec_s, distortion, - beamVec=beam_vector + allAngs, + rMat_d, + rMat_c, + chi, + tVec_d, + tVec_c, + tVec_s, + distortion, + beamVec=beam_vector, ) on_panel = np.logical_and( @@ -1472,7 +1483,7 @@ def make_reflection_patches( def extract_detector_transformation( - detector_params: Union[dict[str, Any], np.ndarray] + detector_params: Union[dict[str, Any], np.ndarray], ) -> tuple[np.ndarray, np.ndarray, float, np.ndarray]: """ Construct arrays from detector parameters. diff --git a/hexrd/module_map.py b/hexrd/module_map.py index 4f1e92b01..d5efdbd7e 100644 --- a/hexrd/module_map.py +++ b/hexrd/module_map.py @@ -44,7 +44,7 @@ def path_to_module(path: Path) -> str: module_map: dict[str, tuple[str, Path]] = {} for old_path, new_paths in file_map.items(): - if old_path.suffix not in ("", ".py") or not "hexrd" in old_path.parts: + if old_path.suffix not in ("", ".py") or not "hexrd" in old_path.parts: continue old_module_path = path_to_module(old_path) # TODO: This just picks one. We should probably pick the right one? We should know the right one after @@ -94,15 +94,18 @@ def get(alias: str) -> ModuleAlias | str | None: return None - class ModuleSpecWithParent(importlib.machinery.ModuleSpec): - def __init__(self, name, loader, *, origin=None, parent=None, is_package=False): + def __init__( + self, name, loader, *, origin=None, parent=None, is_package=False + ): super().__init__(name, loader, origin=origin, is_package=is_package) self._parent = parent @property def parent(self): return self._parent + + class ModuleAliasFinder(importlib.abc.MetaPathFinder): def find_spec(self, fullname, path, target=None): if fullname in module_map: @@ -113,7 +116,6 @@ def find_spec(self, fullname, path, target=None): else: parent = mapped_module - # Need to set these to be the exact same module so that class comparison # works correctly if you are comparing classes that are imported one way with classes # that are imported the mapped way. @@ -138,12 +140,12 @@ def find_spec(self, fullname, path, target=None): spec = ModuleSpecWithParent( mapped_module, importlib.machinery.NamespaceLoader( - mapped_module, - list(mapped_fp.parts), - path_finder=importlib.machinery.PathFinder.find_spec, # type: ignore + mapped_module, + list(mapped_fp.parts), + path_finder=importlib.machinery.PathFinder.find_spec, # type: ignore ), parent=parent, - is_package=True + is_package=True, ) return spec return None diff --git a/hexrd/powder/fitting/calibration/instrument.py b/hexrd/powder/fitting/calibration/instrument.py index 790eee365..a9326ac20 100644 --- a/hexrd/powder/fitting/calibration/instrument.py +++ b/hexrd/powder/fitting/calibration/instrument.py @@ -4,8 +4,18 @@ import lmfit import numpy as np -from ....core.fitting.calibration.lmfit_param_handling import add_engineering_constraints, create_instr_params, DEFAULT_EULER_CONVENTION, update_instrument_from_params, validate_params_list -from ....core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType +from ....core.fitting.calibration.lmfit_param_handling import ( + add_engineering_constraints, + create_instr_params, + DEFAULT_EULER_CONVENTION, + update_instrument_from_params, + validate_params_list, +) +from ....core.fitting.calibration.relative_constraints import ( + create_relative_constraints, + RelativeConstraints, + RelativeConstraintsType, +) logger = logging.getLogger() logger.setLevel('INFO') @@ -16,9 +26,13 @@ def _normalized_ssqr(resd): class InstrumentCalibrator: - def __init__(self, *args, engineering_constraints=None, - euler_convention=DEFAULT_EULER_CONVENTION, - relative_constraints_type=RelativeConstraintsType.none): + def __init__( + self, + *args, + engineering_constraints=None, + euler_convention=DEFAULT_EULER_CONVENTION, + relative_constraints_type=RelativeConstraintsType.none, + ): """ Model for instrument calibration class as a function of @@ -38,17 +52,19 @@ def __init__(self, *args, engineering_constraints=None, assert len(args) > 0, "must have at least one calibrator" self.calibrators = args for calib in self.calibrators: - assert calib.instr is self.instr, \ - "all calibrators must refer to the same instrument" + assert ( + calib.instr is self.instr + ), "all calibrators must refer to the same instrument" self._engineering_constraints = engineering_constraints self._relative_constraints = create_relative_constraints( - relative_constraints_type, self.instr) + relative_constraints_type, self.instr + ) self.euler_convention = euler_convention self.params = self.make_lmfit_params() - self.fitter = lmfit.Minimizer(self.minimizer_function, - self.params, - nan_policy='omit') + self.fitter = lmfit.Minimizer( + self.minimizer_function, self.params, nan_policy='omit' + ) def make_lmfit_params(self): params = create_instr_params( @@ -124,10 +140,9 @@ def minimize(self, method='least_squares', odict=None): result = self.fitter.least_squares(self.params, **odict) else: - result = self.fitter.scalar_minimize(method=method, - params=self.params, - max_nfev=50000, - **odict) + result = self.fitter.scalar_minimize( + method=method, params=self.params, max_nfev=50000, **odict + ) return result @@ -167,7 +182,8 @@ def relative_constraints_type(self, v: Optional[RelativeConstraintsType]): current = getattr(self, '_relative_constraints', None) if current is None or current.type != v: self.relative_constraints = create_relative_constraints( - v, self.instr) + v, self.instr + ) @property def relative_constraints(self) -> RelativeConstraints: @@ -195,7 +211,7 @@ def run_calibration(self, odict): nrm_ssr_1 = _normalized_ssqr(resd1) - delta_r = 1. - nrm_ssr_1/nrm_ssr_0 + delta_r = 1.0 - nrm_ssr_1 / nrm_ssr_0 if delta_r > 0: logger.info('OPTIMIZATION SUCCESSFUL') diff --git a/hexrd/powder/fitting/calibration/powder.py b/hexrd/powder/fitting/calibration/powder.py index 6820433db..ed061be17 100644 --- a/hexrd/powder/fitting/calibration/powder.py +++ b/hexrd/powder/fitting/calibration/powder.py @@ -8,7 +8,10 @@ from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl from ....core.fitting.calibration.calibrator import Calibrator -from ....core.fitting.calibration.lmfit_param_handling import create_material_params, update_material_from_params +from ....core.fitting.calibration.lmfit_param_handling import ( + create_material_params, + update_material_from_params, +) nfields_powder_data = 8 @@ -16,14 +19,26 @@ class PowderCalibrator(Calibrator): type = 'powder' - def __init__(self, instr, material, img_dict, default_refinements=None, - tth_tol=None, eta_tol=0.25, - fwhm_estimate=None, min_pk_sep=1e-3, min_ampl=0., - pktype='pvoigt', bgtype='linear', - tth_distortion=None, calibration_picks=None, - xray_source: Optional[str] = None): - assert list(instr.detectors.keys()) == list(img_dict.keys()), \ - "instrument and image dict must have the same keys" + def __init__( + self, + instr, + material, + img_dict, + default_refinements=None, + tth_tol=None, + eta_tol=0.25, + fwhm_estimate=None, + min_pk_sep=1e-3, + min_ampl=0.0, + pktype='pvoigt', + bgtype='linear', + tth_distortion=None, + calibration_picks=None, + xray_source: Optional[str] = None, + ): + assert list(instr.detectors.keys()) == list( + img_dict.keys() + ), "instrument and image dict must have the same keys" self.instr = instr self.material = material @@ -79,8 +94,9 @@ def _update_tth_distortion_panels(self): def create_lmfit_params(self, current_params): # There shouldn't be more than one calibrator for a given material, so # just assume we have a unique name... - params = create_material_params(self.material, - self.default_refinements) + params = create_material_params( + self.material, self.default_refinements + ) # If multiple powder calibrators were used for the same material (such # as in 2XRS), then don't add params again. @@ -110,11 +126,13 @@ def tth_tol(self, x): @property def spectrum_kwargs(self): - return dict(pktype=self.pktype, - bgtype=self.bgtype, - fwhm_init=self.fwhm_estimate, - min_ampl=self.min_ampl, - min_pk_sep=self.min_pk_sep) + return dict( + pktype=self.pktype, + bgtype=self.bgtype, + fwhm_init=self.fwhm_estimate, + min_ampl=self.min_ampl, + min_pk_sep=self.min_pk_sep, + ) @property def calibration_picks(self): @@ -152,7 +170,7 @@ def calibration_picks(self, v): self.data_dict = data_dict - def autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): + def autopick_points(self, fit_tth_tol=5.0, int_cutoff=1e-4): """ return the RHS for the instrument DOF and image dict @@ -169,7 +187,7 @@ def autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): with switch_xray_source(self.instr, self.xray_source): return self._autopick_points(fit_tth_tol, int_cutoff) - def _autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): + def _autopick_points(self, fit_tth_tol=5.0, int_cutoff=1e-4): # ideal tth dsp_ideal = np.atleast_1d(self.plane_data.getPlaneSpacings()) hkls_ref = self.plane_data.hkls.T @@ -244,13 +262,15 @@ def _autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): ) # cat results - output = np.hstack([ - xy_meas, - tth_meas.reshape(npeaks, 1), - this_hkl, - this_dsp0.reshape(npeaks, 1), - eta_ref_tile.reshape(npeaks, 1), - ]) + output = np.hstack( + [ + xy_meas, + tth_meas.reshape(npeaks, 1), + this_hkl, + this_dsp0.reshape(npeaks, 1), + eta_ref_tile.reshape(npeaks, 1), + ] + ) ret.append(output) if not ret: @@ -311,18 +331,16 @@ def _evaluate(self, output='residual'): # to (tth, eta) meas_xy = pdata[:, :2] updated_angles, _ = panel.cart_to_angles( - meas_xy, - tvec_s=self.instr.tvec, - apply_distortion=True + meas_xy, tvec_s=self.instr.tvec, apply_distortion=True ) # derive ideal tth positions from additional ring point info hkls = pdata[:, 3:6] gvecs = np.dot(hkls, bmat.T) - dsp0 = 1./np.sqrt(np.sum(gvecs*gvecs, axis=1)) + dsp0 = 1.0 / np.sqrt(np.sum(gvecs * gvecs, axis=1)) # updated reference Bragg angles - tth0 = 2.*np.arcsin(0.5*wlen/dsp0) + tth0 = 2.0 * np.arcsin(0.5 * wlen / dsp0) # !!! get eta from mapped markers rather than ref # eta0 = pdata[:, -1] @@ -346,23 +364,16 @@ def _evaluate(self, output='residual'): # meas_xy.flatten() - calc_xy.flatten() # ) retval = np.append( - retval, - updated_angles[:, 0].flatten() - tth0.flatten() + retval, updated_angles[:, 0].flatten() - tth0.flatten() ) elif output == 'model': calc_xy = panel.angles_to_cart( - tth_eta, - tvec_s=self.instr.tvec, - apply_distortion=True - ) - retval = np.append( - retval, - calc_xy.flatten() + tth_eta, tvec_s=self.instr.tvec, apply_distortion=True ) + retval = np.append(retval, calc_xy.flatten()) else: raise RuntimeError( - "unrecognized output flag '%s'" - % output + "unrecognized output flag '%s'" % output ) return retval diff --git a/hexrd/powder/fitting/calibration/structureless.py b/hexrd/powder/fitting/calibration/structureless.py index 6c5724c0c..a64ae70a9 100644 --- a/hexrd/powder/fitting/calibration/structureless.py +++ b/hexrd/powder/fitting/calibration/structureless.py @@ -6,8 +6,19 @@ from hexrd.core.instrument import switch_xray_source -from ....core.fitting.calibration.lmfit_param_handling import add_engineering_constraints, create_instr_params, create_tth_parameters, DEFAULT_EULER_CONVENTION, tth_parameter_prefixes, update_instrument_from_params -from ....core.fitting.calibration.relative_constraints import create_relative_constraints, RelativeConstraints, RelativeConstraintsType +from ....core.fitting.calibration.lmfit_param_handling import ( + add_engineering_constraints, + create_instr_params, + create_tth_parameters, + DEFAULT_EULER_CONVENTION, + tth_parameter_prefixes, + update_instrument_from_params, +) +from ....core.fitting.calibration.relative_constraints import ( + create_relative_constraints, + RelativeConstraints, + RelativeConstraintsType, +) class StructurelessCalibrator: @@ -29,20 +40,24 @@ class StructurelessCalibrator: 22.83 mm <= |IMAGE-PLATE-2 tvec[1]| + |IMAGE-PLATE-2 tvec[1]| <= 23.43 mm """ - def __init__(self, - instr, - data, - tth_distortion=None, - engineering_constraints=None, - relative_constraints_type=RelativeConstraintsType.none, - euler_convention=DEFAULT_EULER_CONVENTION): + + def __init__( + self, + instr, + data, + tth_distortion=None, + engineering_constraints=None, + relative_constraints_type=RelativeConstraintsType.none, + euler_convention=DEFAULT_EULER_CONVENTION, + ): self._instr = instr self._data = data self._tth_distortion = tth_distortion self._engineering_constraints = engineering_constraints self._relative_constraints = create_relative_constraints( - relative_constraints_type, self.instr) + relative_constraints_type, self.instr + ) self.euler_convention = euler_convention self._update_tth_distortion_panels() self.make_lmfit_params() @@ -80,10 +95,9 @@ def calc_residual(self, params): prefixes = tth_parameter_prefixes(self.instr) for xray_source in self.data: prefix = prefixes[xray_source] - for ii, (rng, corr_rng) in enumerate(zip( - meas_angles[xray_source], - tth_correction[xray_source] - )): + for ii, (rng, corr_rng) in enumerate( + zip(meas_angles[xray_source], tth_correction[xray_source]) + ): for det_name, panel in self.instr.detectors.items(): if rng[det_name] is None or rng[det_name].size == 0: continue @@ -98,13 +112,11 @@ def calc_residual(self, params): return np.hstack(residual) def set_minimizer(self): - self.fitter = lmfit.Minimizer(self.calc_residual, - self.params, - nan_policy='omit') + self.fitter = lmfit.Minimizer( + self.calc_residual, self.params, nan_policy='omit' + ) - def run_calibration(self, - method='least_squares', - odict=None): + def run_calibration(self, method='least_squares', odict=None): """ odict is the options dictionary """ @@ -124,14 +136,12 @@ def run_calibration(self, } fdict.update(odict) - self.res = self.fitter.least_squares(self.params, - **fdict) + self.res = self.fitter.least_squares(self.params, **fdict) else: fdict = odict - self.res = self.fitter.scalar_minimize(method=method, - params=self.params, - max_nfev=50000, - **fdict) + self.res = self.fitter.scalar_minimize( + method=method, params=self.params, max_nfev=50000, **fdict + ) self.params = self.res.params # res = self.fitter.least_squares(**fdict) @@ -169,7 +179,8 @@ def relative_constraints_type(self, v: Optional[RelativeConstraintsType]): current = getattr(self, '_relative_constraints', None) if current is None or current.type != v: self.relative_constraints = create_relative_constraints( - v, self.instr) + v, self.instr + ) @property def relative_constraints(self) -> RelativeConstraints: @@ -245,9 +256,10 @@ def meas_angles(self) -> dict: panel = self.instr.detectors[det_name] angles, _ = panel.cart_to_angles( - meas_xy, - tvec_s=self.instr.tvec, - apply_distortion=True) + meas_xy, + tvec_s=self.instr.tvec, + apply_distortion=True, + ) ang_dict[det_name] = angles ang_list.append(ang_dict) diff --git a/hexrd/powder/instrument/__init__.py b/hexrd/powder/instrument/__init__.py new file mode 100644 index 000000000..b5414013c --- /dev/null +++ b/hexrd/powder/instrument/__init__.py @@ -0,0 +1,13 @@ +from .hedm_instrument import ( + calc_angles_from_beam_vec, + calc_beam_vec, + centers_of_edge_vec, + GenerateEtaOmeMaps, + GrainDataWriter, + HEDMInstrument, + max_tth, + switch_xray_source, + unwrap_dict_to_h5, + unwrap_h5_to_dict, +) +from .detector import Detector diff --git a/hexrd/powder/instrument/detector.py b/hexrd/powder/instrument/detector.py index 0ebdd9c81..37cb8fcfe 100644 --- a/hexrd/powder/instrument/detector.py +++ b/hexrd/powder/instrument/detector.py @@ -3,7 +3,11 @@ import os from typing import Optional -from hexrd.core.instrument.constants import COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +from hexrd.core.instrument.constants import ( + COATING_DEFAULT, + FILTER_DEFAULTS, + PHOSPHOR_DEFAULT, +) from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numpy as np import numba @@ -11,18 +15,29 @@ from hexrd.core import constants as ct from hexrd.core import distortion as distortion_pkg from hexrd.core import matrixutil as mutil + from hexrd.hedm import xrdutil from hexrd.core.rotations import mapAngle -from hexrd.powder.material import crystallography -from hexrd.powder.material.crystallography import PlaneData +from hexrd.core.material import crystallography +from hexrd.core.material.crystallography import PlaneData -from hexrd.core.transforms.xfcapi import xy_to_gvec, gvec_to_xy, make_beam_rmat, make_rmat_of_expmap, oscill_angles_of_hkls, angles_to_dvec +from hexrd.core.transforms.xfcapi import ( + xy_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_rmat_of_expmap, + oscill_angles_of_hkls, + angles_to_dvec, +) from hexrd.core.utils.decorators import memoize from hexrd.core.gridutil import cellIndices from hexrd.core.instrument import detector_coatings -from hexrd.core.material.utils import calculate_linear_absorption_length, calculate_incoherent_scattering +from hexrd.core.material.utils import ( + calculate_linear_absorption_length, + calculate_incoherent_scattering, +) distortion_registry = distortion_pkg.Registry() @@ -279,7 +294,8 @@ def __init__( if detector_filter is None: detector_filter = detector_coatings.Filter( - **FILTER_DEFAULTS.TARDIS) + **FILTER_DEFAULTS.TARDIS + ) self.filter = detector_filter if detector_coating is None: @@ -530,8 +546,9 @@ def pixel_coords(self): # METHODS # ========================================================================= - def pixel_Q(self, energy: np.floating, - origin: np.ndarray = ct.zeros_3) -> np.ndarray: + def pixel_Q( + self, energy: np.floating, origin: np.ndarray = ct.zeros_3 + ) -> np.ndarray: '''get the equivalent momentum transfer for the angles. @@ -550,7 +567,7 @@ def pixel_Q(self, energy: np.floating, ''' lam = ct.keVToAngstrom(energy) tth, _ = self.pixel_angles(origin=origin) - return 4.*np.pi*np.sin(tth*0.5)/lam + return 4.0 * np.pi * np.sin(tth * 0.5) / lam def pixel_compton_energy_loss( self, @@ -577,9 +594,9 @@ def pixel_compton_energy_loss( ''' energy = np.asarray(energy) tth, _ = self.pixel_angles() - ang_fact = (1 - np.cos(tth)) - beta = energy/ct.cRestmasskeV - return energy/(1 + beta*ang_fact) + ang_fact = 1 - np.cos(tth) + beta = energy / ct.cRestmasskeV + return energy / (1 + beta * ang_fact) def pixel_compton_attenuation_length( self, @@ -628,8 +645,7 @@ def compute_compton_scattering_intensity( physics_package: AbstractPhysicsPackage, origin: np.array = ct.zeros_3, ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - - ''' compute the theoretical compton scattering + '''compute the theoretical compton scattering signal on the detector. this value is corrected for the transmission of compton scattered photons and normlaized before getting subtracting from the @@ -652,18 +668,20 @@ def compute_compton_scattering_intensity( q = self.pixel_Q(energy) inc_s = calculate_incoherent_scattering( - physics_package.sample_material, - q.flatten()).reshape(self.shape) + physics_package.sample_material, q.flatten() + ).reshape(self.shape) inc_w = calculate_incoherent_scattering( - physics_package.window_material, - q.flatten()).reshape(self.shape) + physics_package.window_material, q.flatten() + ).reshape(self.shape) t_s = self.calc_compton_physics_package_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) t_w = self.calc_compton_window_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) return inc_s * t_s + inc_w * t_w, t_s, t_w @@ -1087,9 +1105,14 @@ def interpolate_nearest(self, xy, img, pad_with_nans=True): int_xy[on_panel] = int_vals return int_xy - def interpolate_bilinear(self, xy, img, pad_with_nans=True, - clip_to_panel=True, - on_panel: Optional[np.ndarray] = None): + def interpolate_bilinear( + self, + xy, + img, + pad_with_nans=True, + clip_to_panel=True, + on_panel: Optional[np.ndarray] = None, + ): """ Interpolate an image array at the specified cartesian points. @@ -1766,19 +1789,23 @@ def increase_memoization_sizes(funcs, min_size): if cache_info['maxsize'] < min_size: f.set_cache_maxsize(min_size) - def calc_physics_package_transmission(self, energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage) -> np.float64: + def calc_physics_package_transmission( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + ) -> np.float64: """get the transmission from the physics package need to consider HED and HEDM samples separately """ bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1791,17 +1818,17 @@ def calc_physics_package_transmission(self, energy: np.floating, cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_transmission_sample( - seca, secb, energy, physics_package) - T_window = self.calc_transmission_window( - secb, energy, physics_package) + seca, secb, energy, physics_package + ) + T_window = self.calc_transmission_window(secb, energy, physics_package) transmission_physics_package = T_sample * T_window return transmission_physics_package @@ -1818,12 +1845,13 @@ def calc_compton_physics_package_transmission( routine than elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1836,18 +1864,19 @@ def calc_compton_physics_package_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'sample') + seca, secb, energy, physics_package, 'sample' + ) T_window = self.calc_compton_transmission_window( - secb, energy, physics_package) + secb, energy, physics_package + ) return T_sample * T_window @@ -1864,12 +1893,13 @@ def calc_compton_window_transmission( elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1882,45 +1912,54 @@ def calc_compton_window_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_window = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'window') + seca, secb, energy, physics_package, 'window' + ) T_sample = self.calc_compton_transmission_sample( - seca, energy, physics_package) + seca, energy, physics_package + ) return T_sample * T_window - def calc_transmission_sample(self, seca: np.array, - secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_sample( + self, + seca: np.array, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: thickness_s = physics_package.sample_thickness # in microns if np.isclose(thickness_s, 0): return np.ones(self.shape) # in microns^-1 - mu_s = 1./physics_package.sample_absorption_length(energy) - x = (mu_s*thickness_s) - pre = 1./x/(secb - seca) - num = np.exp(-x*seca) - np.exp(-x*secb) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + x = mu_s * thickness_s + pre = 1.0 / x / (secb - seca) + num = np.exp(-x * seca) - np.exp(-x * secb) return pre * num - def calc_transmission_window(self, secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_window( + self, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: material_w = physics_package.window_material thickness_w = physics_package.window_thickness # in microns if material_w is None or np.isclose(thickness_w, 0): return np.ones(self.shape) # in microns^-1 - mu_w = 1./physics_package.window_absorption_length(energy) - return np.exp(-thickness_w*mu_w*secb) + mu_w = 1.0 / physics_package.window_absorption_length(energy) + return np.exp(-thickness_w * mu_w * secb) def calc_compton_transmission( self, @@ -1935,9 +1974,11 @@ def calc_compton_transmission( formula = physics_package.sample_material density = physics_package.sample_density thickness = physics_package.sample_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1. / self.pixel_compton_attenuation_length( - energy, density, formula, + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, + density, + formula, ) elif pp_layer == 'window': formula = physics_package.window_material @@ -1946,17 +1987,18 @@ def calc_compton_transmission( density = physics_package.window_density thickness = physics_package.window_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) if thickness <= 0: return np.ones(self.shape) - x1 = mu*thickness*seca - x2 = mu_prime*thickness*secb - num = (np.exp(-x1) - np.exp(-x2)) - return -num/(x1 - x2) + x1 = mu * thickness * seca + x2 = mu_prime * thickness * secb + num = np.exp(-x1) - np.exp(-x2) + return -num / (x1 - x2) def calc_compton_transmission_sample( self, @@ -1966,9 +2008,8 @@ def calc_compton_transmission_sample( ) -> np.ndarray: thickness_s = physics_package.sample_thickness # in microns - mu_s = 1./physics_package.sample_absorption_length( - energy) - return np.exp(-mu_s*thickness_s*seca) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + return np.exp(-mu_s * thickness_s * seca) def calc_compton_transmission_window( self, @@ -1980,60 +2021,71 @@ def calc_compton_transmission_window( if formula is None: return np.ones(self.shape) - density = physics_package.window_density # in g/cc + density = physics_package.window_density # in g/cc thickness_w = physics_package.window_thickness # in microns - mu_w_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) - return np.exp(-mu_w_prime*thickness_w*secb) - - def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) -> np.array: - """get the effective pinhole area correction - """ - if (np.isclose(physics_package.pinhole_diameter, 0) - or np.isclose(physics_package.pinhole_thickness, 0)): + mu_w_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) + return np.exp(-mu_w_prime * thickness_w * secb) + + def calc_effective_pinhole_area( + self, physics_package: AbstractPhysicsPackage + ) -> np.array: + """get the effective pinhole area correction""" + if np.isclose(physics_package.pinhole_diameter, 0) or np.isclose( + physics_package.pinhole_thickness, 0 + ): return np.ones(self.shape) - hod = (physics_package.pinhole_thickness / - physics_package.pinhole_diameter) + hod = ( + physics_package.pinhole_thickness + / physics_package.pinhole_diameter + ) bvec = self.bvec tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) cth = -dvecs[:, 2].reshape(self.shape) tanth = np.tan(np.arccos(cth)) - f = hod*tanth - f[np.abs(f) > 1.] = np.nan + f = hod * tanth + f[np.abs(f) > 1.0] = np.nan asinf = np.arcsin(f) return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) - def calc_transmission_generic(self, - secb: np.array, - thickness: np.floating, - absorption_length: np.floating) -> np.array: + def calc_transmission_generic( + self, + secb: np.array, + thickness: np.floating, + absorption_length: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - mu = 1./absorption_length # in microns^-1 - return np.exp(-thickness*mu*secb) + mu = 1.0 / absorption_length # in microns^-1 + return np.exp(-thickness * mu * secb) - def calc_transmission_phosphor(self, - secb: np.array, - thickness: np.floating, - readout_length: np.floating, - absorption_length: np.floating, - energy: np.floating, - pre_U0: np.floating) -> np.array: + def calc_transmission_phosphor( + self, + secb: np.array, + thickness: np.floating, + readout_length: np.floating, + absorption_length: np.floating, + energy: np.floating, + pre_U0: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - f1 = absorption_length*thickness - f2 = absorption_length*readout_length - arg = (secb + 1/f2) - return pre_U0 * energy*((1.0 - np.exp(-f1*arg))/arg) + f1 = absorption_length * thickness + f2 = absorption_length * readout_length + arg = secb + 1 / f2 + return pre_U0 * energy * ((1.0 - np.exp(-f1 * arg)) / arg) + # ============================================================================= # UTILITY METHODS diff --git a/hexrd/powder/instrument/hedm_instrument.py b/hexrd/powder/instrument/hedm_instrument.py index 373114e1b..44915c515 100644 --- a/hexrd/powder/instrument/hedm_instrument.py +++ b/hexrd/powder/instrument/hedm_instrument.py @@ -59,7 +59,14 @@ from hexrd.core.fitting.utils import fit_ring from hexrd.core.gridutil import make_tolerance_grid from hexrd.core import matrixutil as mutil -from hexrd.core.transforms.xfcapi import angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector +from hexrd.core.transforms.xfcapi import ( + angles_to_gvec, + gvec_to_xy, + make_sample_rmat, + make_rmat_of_expmap, + unit_vector, +) + # TODO: Resolve extra-workflow dependency from hexrd.hedm import xrdutil from hexrd.powder.material.crystallography import PlaneData @@ -82,9 +89,11 @@ try: from fast_histogram import histogram1d + fast_histogram = True except ImportError: from numpy import histogram as histogram1d + fast_histogram = False logger = logging.getLogger() @@ -107,9 +116,9 @@ pixel_size_DFLT = (0.2, 0.2) tilt_params_DFLT = np.zeros(3) -t_vec_d_DFLT = np.r_[0., 0., -1000.] +t_vec_d_DFLT = np.r_[0.0, 0.0, -1000.0] -chi_DFLT = 0. +chi_DFLT = 0.0 t_vec_s_DFLT = np.zeros(3) multi_ims_key = ct.shared_ims_key @@ -123,8 +132,9 @@ # ============================================================================= -def generate_chunks(nrows, ncols, base_nrows, base_ncols, - row_gap=0, col_gap=0): +def generate_chunks( + nrows, ncols, base_nrows, base_ncols, row_gap=0, col_gap=0 +): """ Generate chunking data for regularly tiled composite detectors. @@ -156,18 +166,15 @@ def generate_chunks(nrows, ncols, base_nrows, base_ncols, [[row_start, row_stop], [col_start, col_stop]] """ - row_starts = np.array([i*(base_nrows + row_gap) for i in range(nrows)]) - col_starts = np.array([i*(base_ncols + col_gap) for i in range(ncols)]) + row_starts = np.array([i * (base_nrows + row_gap) for i in range(nrows)]) + col_starts = np.array([i * (base_ncols + col_gap) for i in range(ncols)]) rr = np.vstack([row_starts, row_starts + base_nrows]) cc = np.vstack([col_starts, col_starts + base_ncols]) rects = [] labels = [] for i in range(nrows): for j in range(ncols): - this_rect = np.array( - [[rr[0, i], rr[1, i]], - [cc[0, j], cc[1, j]]] - ) + this_rect = np.array([[rr[0, i], rr[1, i]], [cc[0, j], cc[1, j]]]) rects.append(this_rect) labels.append('%d_%d' % (i, j)) return rects, labels @@ -193,9 +200,11 @@ def chunk_instrument(instr, rects, labels, use_roi=False): """ icfg_dict = instr.write_config() - new_icfg_dict = dict(beam=icfg_dict['beam'], - oscillation_stage=icfg_dict['oscillation_stage'], - detectors={}) + new_icfg_dict = dict( + beam=icfg_dict['beam'], + oscillation_stage=icfg_dict['oscillation_stage'], + detectors={}, + ) for panel_id, panel in instr.detectors.items(): pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] @@ -205,7 +214,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): row_col_dim = np.diff(rect) # (2, 1) shape = tuple(row_col_dim.flatten()) - center = (rect[:, 0].reshape(2, 1) + 0.5*row_col_dim) + center = rect[:, 0].reshape(2, 1) + 0.5 * row_col_dim sp_tvec = np.concatenate( [panel.pixelToCart(center.T).flatten(), np.zeros(1)] @@ -230,7 +239,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): if panel.panel_buffer is not None: if panel.panel_buffer.ndim == 2: # have a mask array! submask = panel.panel_buffer[ - rect[0, 0]:rect[0, 1], rect[1, 0]:rect[1, 1] + rect[0, 0] : rect[0, 1], rect[1, 0] : rect[1, 1] ] new_icfg_dict['detectors'][panel_name]['buffer'] = submask return new_icfg_dict @@ -274,9 +283,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): images_in = imgser_dict[multi_ims_key] elif np.any(matched_det_keys): if sum(matched_det_keys) != 1: - raise RuntimeError( - f"multiple entries found for '{det_key}'" - ) + raise RuntimeError(f"multiple entries found for '{det_key}'") # use boolean array to index the proper key # !!! these should be in the same order img_keys = img_keys = np.asarray(list(imgser_dict.keys())) @@ -296,7 +303,12 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): if isinstance(images_in, ims_classes): # input is an imageseries of some kind - ims = ProcessedImageSeries(images_in, [('rectangle', roi), ]) + ims = ProcessedImageSeries( + images_in, + [ + ('rectangle', roi), + ], + ) if isinstance(images_in, OmegaImageSeries): # if it was an OmegaImageSeries, must re-cast ims = OmegaImageSeries(ims) @@ -304,16 +316,16 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): # 2- or 3-d array of images ndim = images_in.ndim if ndim == 2: - ims = images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims = images_in[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]] elif ndim == 3: nrows = roi[0][1] - roi[0][0] ncols = roi[1][1] - roi[1][0] n_images = len(images_in) - ims = np.empty((n_images, nrows, ncols), - dtype=images_in.dtype) + ims = np.empty((n_images, nrows, ncols), dtype=images_in.dtype) for i, image in images_in: - ims[i, :, :] = \ - images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims[i, :, :] = images_in[ + roi[0][0] : roi[0][1], roi[1][0] : roi[1][1] + ] else: raise RuntimeError( f"image input dim must be 2 or 3; you gave {ndim}" @@ -331,9 +343,8 @@ def calc_beam_vec(azim, pola): tht = np.radians(azim) phi = np.radians(pola) bv = np.r_[ - np.sin(phi)*np.cos(tht), - np.cos(phi), - np.sin(phi)*np.sin(tht)] + np.sin(phi) * np.cos(tht), np.cos(phi), np.sin(phi) * np.sin(tht) + ] return -bv @@ -344,9 +355,7 @@ def calc_angles_from_beam_vec(bvec): """ bvec = np.atleast_1d(bvec).flatten() nvec = unit_vector(-bvec) - azim = float( - np.degrees(np.arctan2(nvec[2], nvec[0])) - ) + azim = float(np.degrees(np.arctan2(nvec[2], nvec[0]))) pola = float(np.degrees(np.arccos(nvec[1]))) return azim, pola @@ -370,9 +379,9 @@ def angle_in_range(angle, ranges, ccw=True, units='degrees'): WARNING: always clockwise; assumes wedges are not overlapping """ - tau = 360. + tau = 360.0 if units.lower() == 'radians': - tau = 2*np.pi + tau = 2 * np.pi w = np.nan for i, wedge in enumerate(ranges): amin = wedge[0] @@ -404,7 +413,7 @@ def max_tth(instr): tth_max : float The maximum observable Bragg angle by the instrument in radians. """ - tth_max = 0. + tth_max = 0.0 for det in instr.detectors.values(): ptth, peta = det.pixel_angles() tth_max = max(np.max(ptth), tth_max) @@ -436,10 +445,9 @@ def pixel_resolution(instr): ang_ps_full = [] for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) ang_ps_full.append(angps) max_tth = min(max_tth, np.min(angps[:, 0])) @@ -471,10 +479,9 @@ def max_resolution(instr): max_eta = np.inf for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) max_tth = min(max_tth, np.min(angps[:, 0])) max_eta = min(max_eta, np.min(angps[:, 1])) @@ -482,16 +489,16 @@ def max_resolution(instr): def _gaussian_dist(x, cen, fwhm): - sigm = fwhm/(2*np.sqrt(2*np.log(2))) - return np.exp(-0.5*(x - cen)**2/sigm**2) + sigm = fwhm / (2 * np.sqrt(2 * np.log(2))) + return np.exp(-0.5 * (x - cen) ** 2 / sigm**2) def _sigma_to_fwhm(sigm): - return sigm*ct.sigma_to_fwhm + return sigm * ct.sigma_to_fwhm def _fwhm_to_sigma(fwhm): - return fwhm/ct.sigma_to_fwhm + return fwhm / ct.sigma_to_fwhm # ============================================================================= @@ -507,12 +514,17 @@ class HEDMInstrument(object): * where should reference eta be defined? currently set to default config """ - def __init__(self, instrument_config=None, - image_series=None, eta_vector=None, - instrument_name=None, tilt_calibration_mapping=None, - max_workers=max_workers_DFLT, - physics_package=None, - active_beam_name: Optional[str] = None): + def __init__( + self, + instrument_config=None, + image_series=None, + eta_vector=None, + instrument_name=None, + tilt_calibration_mapping=None, + max_workers=max_workers_DFLT, + physics_package=None, + active_beam_name: Optional[str] = None, + ): self._id = instrument_name_DFLT self._active_beam_name = active_beam_name @@ -537,7 +549,8 @@ def __init__(self, instrument_config=None, # FIXME: must add cylindrical self._detectors = dict( panel_id_DFLT=PlanarDetector( - rows=nrows_DFLT, cols=ncols_DFLT, + rows=nrows_DFLT, + cols=ncols_DFLT, pixel_size=pixel_size_DFLT, tvec=t_vec_d_DFLT, tilt=tilt_params_DFLT, @@ -545,9 +558,11 @@ def __init__(self, instrument_config=None, xrs_dist=self.source_distance, evec=self._eta_vector, distortion=None, - roi=None, group=None, - max_workers=self.max_workers), - ) + roi=None, + group=None, + max_workers=self.max_workers, + ), + ) self._tvec = t_vec_s_DFLT self._chi = chi_DFLT @@ -574,10 +589,7 @@ def __init__(self, instrument_config=None, self.physics_package = instrument_config['physics_package'] xrs_config = instrument_config['beam'] - is_single_beam = ( - 'energy' in xrs_config and - 'vector' in xrs_config - ) + is_single_beam = 'energy' in xrs_config and 'vector' in xrs_config if is_single_beam: # Assume single beam. Load the same way as multibeam self._create_default_beam() @@ -634,7 +646,7 @@ def __init__(self, instrument_config=None, elif isinstance(det_buffer, list): panel_buffer = np.asarray(det_buffer) elif np.isscalar(det_buffer): - panel_buffer = det_buffer*np.ones(2) + panel_buffer = det_buffer * np.ones(2) else: raise RuntimeError( "panel buffer spec invalid for %s" % det_id @@ -711,9 +723,9 @@ def mean_detector_center(self) -> np.ndarray: def mean_group_center(self, group: str) -> np.ndarray: """Return the mean center for detectors belonging to a group""" - centers = np.array([ - x.tvec for x in self.detectors_in_group(group).values() - ]) + centers = np.array( + [x.tvec for x in self.detectors_in_group(group).values()] + ) return centers.sum(axis=0) / len(centers) @property @@ -747,10 +759,11 @@ def detector_parameters(self): pdict = {} for key, panel in self.detectors.items(): pdict[key] = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) return pdict @@ -854,8 +867,9 @@ def beam_vector(self) -> np.ndarray: def beam_vector(self, x: np.ndarray): x = np.array(x).flatten() if len(x) == 3: - assert sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' bvec = x elif len(x) == 2: bvec = calc_beam_vec(*x) @@ -872,8 +886,9 @@ def source_distance(self): @source_distance.setter def source_distance(self, x): - assert np.isscalar(x), \ - f"'source_distance' must be a scalar; you input '{x}'" + assert np.isscalar( + x + ), f"'source_distance' must be a scalar; you input '{x}'" self.active_beam['distance'] = x self.beam_dict_modified() @@ -884,8 +899,9 @@ def eta_vector(self): @eta_vector.setter def eta_vector(self, x): x = np.array(x).flatten() - assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' self._eta_vector = x # ...maybe change dictionary item behavior for 3.x compatibility? for detector_id in self.detectors: @@ -897,10 +913,11 @@ def eta_vector(self, x): # ========================================================================= def write_config(self, file=None, style='yaml', calibration_dict={}): - """ WRITE OUT YAML FILE """ + """WRITE OUT YAML FILE""" # initialize output dictionary - assert style.lower() in ['yaml', 'hdf5'], \ + assert style.lower() in ['yaml', 'hdf5'], ( "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) par_dict = {} @@ -929,10 +946,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): if calibration_dict: par_dict['calibration_crystal'] = calibration_dict - ostage = dict( - chi=self.chi, - translation=self.tvec.tolist() - ) + ostage = dict(chi=self.chi, translation=self.tvec.tolist()) par_dict['oscillation_stage'] = ostage det_dict = dict.fromkeys(self.detectors) @@ -940,10 +954,13 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): # grab panel config # !!! don't need beam or tvec # !!! have vetted style - pdict = detector.config_dict(chi=self.chi, tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style=style) + pdict = detector.config_dict( + chi=self.chi, + tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style=style, + ) det_dict[det_name] = pdict['detector'] par_dict['detectors'] = det_dict @@ -953,6 +970,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): with open(file, 'w') as f: yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) else: + def _write_group(file): instr_grp = file.create_group('instrument') unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) @@ -968,9 +986,15 @@ def _write_group(file): return par_dict - def extract_polar_maps(self, plane_data, imgser_dict, - active_hkls=None, threshold=None, - tth_tol=None, eta_tol=0.25): + def extract_polar_maps( + self, + plane_data, + imgser_dict, + active_hkls=None, + threshold=None, + tth_tol=None, + eta_tol=0.25, + ): """ Extract eta-omega maps from an imageseries. @@ -994,23 +1018,25 @@ def extract_polar_maps(self, plane_data, imgser_dict, # detectors, so calculate it once # !!! grab first panel panel = next(iter(self.detectors.values())) - pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = \ + pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = ( panel.make_powder_rings( - plane_data, merge_hkls=False, - delta_eta=eta_tol, full_output=True + plane_data, + merge_hkls=False, + delta_eta=eta_tol, + full_output=True, ) + ) if active_hkls is not None: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" # need to re-cast for element-wise operations active_hkls = np.array(active_hkls) # these are all active reflection unique hklIDs - active_hklIDs = plane_data.getHKLID( - plane_data.hkls, master=True - ) + active_hklIDs = plane_data.getHKLID(plane_data.hkls, master=True) # find indices idx = np.zeros_like(active_hkls, dtype=int) @@ -1067,9 +1093,14 @@ def extract_polar_maps(self, plane_data, imgser_dict, # Divide up the images among processes tasks = distribute_tasks(len(ims), self.max_workers) - func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges, - ring_maps=ring_maps, ring_params=ring_params, - threshold=threshold) + func = partial( + _run_histograms, + ims=ims, + tth_ranges=tth_ranges, + ring_maps=ring_maps, + ring_params=ring_params, + threshold=threshold, + ) max_workers = self.max_workers if max_workers == 1 or len(tasks) == 1: @@ -1087,12 +1118,21 @@ def extract_polar_maps(self, plane_data, imgser_dict, return ring_maps_panel, eta_edges - def extract_line_positions(self, plane_data, imgser_dict, - tth_tol=None, eta_tol=1., npdiv=2, - eta_centers=None, - collapse_eta=True, collapse_tth=False, - do_interpolation=True, do_fitting=False, - tth_distortion=None, fitting_kwargs=None): + def extract_line_positions( + self, + plane_data, + imgser_dict, + tth_tol=None, + eta_tol=1.0, + npdiv=2, + eta_centers=None, + collapse_eta=True, + collapse_tth=False, + do_interpolation=True, + do_fitting=False, + tth_distortion=None, + fitting_kwargs=None, + ): """ Perform annular interpolation on diffraction images. @@ -1167,8 +1207,12 @@ def extract_line_positions(self, plane_data, imgser_dict, # LOOP OVER DETECTORS # ===================================================================== logger.info("Interpolating ring data") - pbar_dets = partial(tqdm, total=self.num_panels, desc="Detector", - position=self.num_panels) + pbar_dets = partial( + tqdm, + total=self.num_panels, + desc="Detector", + position=self.num_panels, + ) # Split up the workers among the detectors max_workers_per_detector = max(1, self.max_workers // self.num_panels) @@ -1191,23 +1235,26 @@ def extract_line_positions(self, plane_data, imgser_dict, def make_instr_cfg(panel): return panel.config_dict( - chi=self.chi, tvec=self.tvec, + chi=self.chi, + tvec=self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) images = [] for detector_id, panel in self.detectors.items(): - images.append(_parse_imgser_dict(imgser_dict, detector_id, - roi=panel.roi)) + images.append( + _parse_imgser_dict(imgser_dict, detector_id, roi=panel.roi) + ) panels = [self.detectors[k] for k in self.detectors] instr_cfgs = [make_instr_cfg(x) for x in panels] pbp_array = np.arange(self.num_panels) iter_args = zip(panels, instr_cfgs, images, pbp_array) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=self.num_panels) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=self.num_panels + ) as executor: results = list(pbar_dets(executor.map(func, iter_args))) panel_data = {} @@ -1216,12 +1263,9 @@ def make_instr_cfg(panel): return panel_data - def simulate_powder_pattern(self, - mat_list, - params=None, - bkgmethod=None, - origin=None, - noise=None): + def simulate_powder_pattern( + self, mat_list, params=None, bkgmethod=None, origin=None, noise=None + ): """ Generate powder diffraction iamges from specified materials. @@ -1260,8 +1304,7 @@ def simulate_powder_pattern(self, if origin is None: origin = self.tvec origin = np.asarray(origin).squeeze() - assert len(origin) == 3, \ - "origin must be a 3-element sequence" + assert len(origin) == 3, "origin must be a 3-element sequence" if bkgmethod is None: bkgmethod = {'chebyshev': 3} @@ -1301,7 +1344,7 @@ def simulate_powder_pattern(self, # find min and max tth over all panels tth_mi = np.inf - tth_ma = 0. + tth_ma = 0.0 ptth_dict = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): ptth, peta = panel.pixel_angles(origin=origin) @@ -1323,7 +1366,7 @@ def simulate_powder_pattern(self, ang_res = max_resolution(self) # !!! calc nsteps by oversampling - nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0]))) + nsteps = int(np.ceil(2 * (tth_ma - tth_mi) / np.degrees(ang_res[0]))) # evaulation vector for LeBail tth = np.linspace(tth_mi, tth_ma, nsteps) @@ -1332,7 +1375,7 @@ def simulate_powder_pattern(self, wavelength = [ valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), - 1. + 1.0, ] ''' @@ -1345,23 +1388,25 @@ def simulate_powder_pattern(self, tth = mat.planeData.getTTh() - LP = (1 + np.cos(tth)**2) / \ - np.cos(0.5*tth)/np.sin(0.5*tth)**2 + LP = ( + (1 + np.cos(tth) ** 2) + / np.cos(0.5 * tth) + / np.sin(0.5 * tth) ** 2 + ) intensity[mat.name] = {} - intensity[mat.name]['synchrotron'] = \ + intensity[mat.name]['synchrotron'] = ( mat.planeData.structFact * LP * multiplicity + ) kwargs = { 'expt_spectrum': expt, 'params': params, 'phases': mat_list, - 'wavelength': { - 'synchrotron': wavelength - }, + 'wavelength': {'synchrotron': wavelength}, 'bkgmethod': bkgmethod, 'intensity_init': intensity, - 'peakshape': 'pvtch' + 'peakshape': 'pvtch', } self.WPPFclass = LeBail(**kwargs) @@ -1379,9 +1424,11 @@ def simulate_powder_pattern(self, for det_key, panel in self.detectors.items(): ptth = ptth_dict[det_key] - img = np.interp(np.degrees(ptth), - self.simulated_spectrum.x, - self.simulated_spectrum.y + self.background.y) + img = np.interp( + np.degrees(ptth), + self.simulated_spectrum.x, + self.simulated_spectrum.y + self.background.y, + ) if noise is None: img_dict[det_key] = img @@ -1392,13 +1439,11 @@ def simulate_powder_pattern(self, img /= prev_max if noise.lower() == 'poisson': - im_noise = random_noise(img, - mode='poisson', - clip=True) + im_noise = random_noise(img, mode='poisson', clip=True) mi = im_noise.min() ma = im_noise.max() if ma > mi: - im_noise = (im_noise - mi)/(ma - mi) + im_noise = (im_noise - mi) / (ma - mi) elif noise.lower() == 'gaussian': im_noise = random_noise(img, mode='gaussian', clip=True) @@ -1420,9 +1465,14 @@ def simulate_powder_pattern(self, return img_dict - def simulate_laue_pattern(self, crystal_data, - minEnergy=5., maxEnergy=35., - rmat_s=None, grain_params=None): + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + grain_params=None, + ): """ Simulate Laue diffraction over the instrument. @@ -1452,17 +1502,28 @@ def simulate_laue_pattern(self, crystal_data, for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_laue_pattern( crystal_data, - minEnergy=minEnergy, maxEnergy=maxEnergy, - rmat_s=rmat_s, tvec_s=self.tvec, + minEnergy=minEnergy, + maxEnergy=maxEnergy, + rmat_s=rmat_s, + tvec_s=self.tvec, grain_params=grain_params, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) return results - def simulate_rotation_series(self, plane_data, grain_param_list, - eta_ranges=[(-np.pi, np.pi), ], - ome_ranges=[(-np.pi, np.pi), ], - ome_period=(-np.pi, np.pi), - wavelength=None): + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + wavelength=None, + ): """ Simulate a monochromatic rotation series over the instrument. @@ -1491,24 +1552,39 @@ def simulate_rotation_series(self, plane_data, grain_param_list, results = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_rotation_series( - plane_data, grain_param_list, + plane_data, + grain_param_list, eta_ranges=eta_ranges, ome_ranges=ome_ranges, ome_period=ome_period, - chi=self.chi, tVec_s=self.tvec, - wavelength=wavelength) + chi=self.chi, + tVec_s=self.tvec, + wavelength=wavelength, + ) return results - def pull_spots(self, plane_data, grain_params, - imgser_dict, - tth_tol=0.25, eta_tol=1., ome_tol=1., - npdiv=2, threshold=10, - eta_ranges=[(-np.pi, np.pi), ], - ome_period=None, - dirname='results', filename=None, output_format='text', - return_spot_list=False, - quiet=True, check_only=False, - interp='nearest'): + def pull_spots( + self, + plane_data, + grain_params, + imgser_dict, + tth_tol=0.25, + eta_tol=1.0, + ome_tol=1.0, + npdiv=2, + threshold=10, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_period=None, + dirname='results', + filename=None, + output_format='text', + return_spot_list=False, + quiet=True, + check_only=False, + interp='nearest', + ): """ Exctract reflection info from a rotation series. @@ -1568,12 +1644,14 @@ def pull_spots(self, plane_data, grain_params, # WARNING: all imageseries AND all wedges within are assumed to have # the same omega values; put in a check that they are all the same??? oims0 = next(iter(imgser_dict.values())) - ome_ranges = [np.radians([i['ostart'], i['ostop']]) - for i in oims0.omegawedges.wedges] + ome_ranges = [ + np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges + ] if ome_period is None: ims = next(iter(imgser_dict.values())) ostart = ims.omega[0, 0] - ome_period = np.radians(ostart + np.r_[0., 360.]) + ome_period = np.radians(ostart + np.r_[0.0, 360.0]) # delta omega in DEGREES grabbed from first imageseries in the dict delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] @@ -1581,7 +1659,10 @@ def pull_spots(self, plane_data, grain_params, # make omega grid for frame expansion around reference frame # in DEGREES ndiv_ome, ome_del = make_tolerance_grid( - delta_ome, ome_tol, 1, adjust_window=True, + delta_ome, + ome_tol, + 1, + adjust_window=True, ) # generate structuring element for connected component labeling @@ -1592,24 +1673,37 @@ def pull_spots(self, plane_data, grain_params, # simulate rotation series sim_results = self.simulate_rotation_series( - plane_data, [grain_params, ], + plane_data, + [ + grain_params, + ], eta_ranges=eta_ranges, ome_ranges=ome_ranges, - ome_period=ome_period) + ome_period=ome_period, + ) # patch vertex generator (global for instrument) - tol_vec = 0.5*np.radians( - [-tth_tol, -eta_tol, - -tth_tol, eta_tol, - tth_tol, eta_tol, - tth_tol, -eta_tol]) + tol_vec = 0.5 * np.radians( + [ + -tth_tol, + -eta_tol, + -tth_tol, + eta_tol, + tth_tol, + eta_tol, + tth_tol, + -eta_tol, + ] + ) # prepare output if requested if filename is not None and output_format.lower() == 'hdf5': this_filename = os.path.join(dirname, filename) writer = GrainDataWriter_h5( os.path.join(dirname, filename), - self.write_config(), grain_params) + self.write_config(), + grain_params, + ) # ===================================================================== # LOOP OVER PANELS @@ -1621,28 +1715,25 @@ def pull_spots(self, plane_data, grain_params, for detector_id, panel in self.detectors.items(): # initialize text-based output writer if filename is not None and output_format.lower() == 'text': - output_dir = os.path.join( - dirname, detector_id - ) + output_dir = os.path.join(dirname, detector_id) os.makedirs(output_dir, exist_ok=True) - this_filename = os.path.join( - output_dir, filename - ) + this_filename = os.path.join(output_dir, filename) writer = PatchDataWriter(this_filename) # grab panel instr_cfg = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) native_area = panel.pixel_area # pixel ref area # pull out the OmegaImageSeries for this panel from input dict - ome_imgser = _parse_imgser_dict(imgser_dict, - detector_id, - roi=panel.roi) + ome_imgser = _parse_imgser_dict( + imgser_dict, detector_id, roi=panel.roi + ) # extract simulation results sim_results_p = sim_results[detector_id] @@ -1658,19 +1749,24 @@ def pull_spots(self, plane_data, grain_params, # patch vertex array from sim nangs = len(ang_centers) patch_vertices = ( - np.tile(ang_centers[:, :2], (1, 4)) + - np.tile(tol_vec, (nangs, 1)) - ).reshape(4*nangs, 2) - ome_dupl = np.tile( - ang_centers[:, 2], (4, 1) - ).T.reshape(len(patch_vertices), 1) + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4 * nangs, 2) + ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape( + len(patch_vertices), 1 + ) # find vertices that all fall on the panel det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), - panel.rmat, rMat_c, self.chi, - panel.tvec, tVec_c, self.tvec, - panel.distortion) + panel.rmat, + rMat_c, + self.chi, + panel.tvec, + tVec_c, + self.tvec, + panel.distortion, + ) _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) # all vertices must be on... @@ -1701,7 +1797,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d %d %d) falls outside omega range - """ % tuple(hkls_p[i_pt, :]) + """ % tuple( + hkls_p[i_pt, :] + ) print(msg) continue else: @@ -1719,11 +1817,16 @@ def pull_spots(self, plane_data, grain_params, # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( instr_cfg, - ang_centers[:, :2], ang_pixel_size, + ang_centers[:, :2], + ang_pixel_size, omega=ang_centers[:, 2], - tth_tol=tth_tol, eta_tol=eta_tol, - rmat_c=rMat_c, tvec_c=tVec_c, - npdiv=npdiv, quiet=True) + tth_tol=tth_tol, + eta_tol=eta_tol, + rmat_c=rMat_c, + tvec_c=tVec_c, + npdiv=npdiv, + quiet=True, + ) # GRAND LOOP over reflections for this panel patch_output = [] @@ -1733,7 +1836,7 @@ def pull_spots(self, plane_data, grain_params, vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch prows, pcols = areas.shape - nrm_fac = areas/float(native_area) + nrm_fac = areas / float(native_area) nrm_fac = nrm_fac / np.min(nrm_fac) # grab hkl info @@ -1747,8 +1850,9 @@ def pull_spots(self, plane_data, grain_params, delta_eta = eta_edges[1] - eta_edges[0] # need to reshape eval pts for interpolation - xy_eval = np.vstack([xy_eval[0].flatten(), - xy_eval[1].flatten()]).T + xy_eval = np.vstack( + [xy_eval[0].flatten(), xy_eval[1].flatten()] + ).T # the evaluation omegas; # expand about the central value using tol vector @@ -1763,7 +1867,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d%d%d) falls outside omega range - """ % tuple(hkl) + """ % tuple( + hkl + ) print(msg) continue else: @@ -1772,8 +1878,8 @@ def pull_spots(self, plane_data, grain_params, peak_id = next_invalid_peak_id sum_int = np.nan max_int = np.nan - meas_angs = np.nan*np.ones(3) - meas_xy = np.nan*np.ones(2) + meas_angs = np.nan * np.ones(3) + meas_xy = np.nan * np.ones(2) # quick check for intensity contains_signal = False @@ -1791,19 +1897,23 @@ def pull_spots(self, plane_data, grain_params, # initialize patch data array for intensities if interp.lower() == 'bilinear': patch_data = np.zeros( - (len(frame_indices), prows, pcols)) + (len(frame_indices), prows, pcols) + ) for i, i_frame in enumerate(frame_indices): - patch_data[i] = \ - panel.interpolate_bilinear( - xy_eval, - ome_imgser[i_frame], - pad_with_nans=False - ).reshape(prows, pcols) # * nrm_fac + patch_data[i] = panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False, + ).reshape( + prows, pcols + ) # * nrm_fac elif interp.lower() == 'nearest': patch_data = patch_data_raw # * nrm_fac else: - msg = "interpolation option " + \ - "'%s' not understood" + msg = ( + "interpolation option " + + "'%s' not understood" + ) raise RuntimeError(msg % interp) # now have interpolated patch data... @@ -1816,9 +1926,10 @@ def pull_spots(self, plane_data, grain_params, peak_id = iRefl props = regionprops(labels, patch_data) coms = np.vstack( - [x.weighted_centroid for x in props]) + [x.weighted_centroid for x in props] + ) if num_peaks > 1: - center = np.r_[patch_data.shape]*0.5 + center = np.r_[patch_data.shape] * 0.5 center_t = np.tile(center, (num_peaks, 1)) com_diff = coms - center_t closest_peak_idx = np.argmin( @@ -1829,15 +1940,17 @@ def pull_spots(self, plane_data, grain_params, coms = coms[closest_peak_idx] # meas_omes = \ # ome_edges[0] + (0.5 + coms[0])*delta_ome - meas_omes = \ - ome_eval[0] + coms[0]*delta_ome + meas_omes = ome_eval[0] + coms[0] * delta_ome meas_angs = np.hstack( - [tth_edges[0] + (0.5 + coms[2])*delta_tth, - eta_edges[0] + (0.5 + coms[1])*delta_eta, - mapAngle( - np.radians(meas_omes), ome_period - ) - ] + [ + tth_edges[0] + + (0.5 + coms[2]) * delta_tth, + eta_edges[0] + + (0.5 + coms[1]) * delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ), + ] ) # intensities @@ -1864,15 +1977,21 @@ def pull_spots(self, plane_data, grain_params, meas_angs, chi=self.chi, rmat_c=rMat_c, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) rMat_s = make_sample_rmat( self.chi, meas_angs[2] ) meas_xy = gvec_to_xy( gvec_c, - panel.rmat, rMat_s, rMat_c, - panel.tvec, self.tvec, tVec_c, - beam_vec=self.beam_vector) + panel.rmat, + rMat_s, + rMat_c, + panel.tvec, + self.tvec, + tVec_c, + beam_vec=self.beam_vector, + ) if panel.distortion is not None: meas_xy = panel.distortion.apply_inverse( np.atleast_2d(meas_xy) @@ -1891,19 +2010,38 @@ def pull_spots(self, plane_data, grain_params, if filename is not None: if output_format.lower() == 'text': writer.dump_patch( - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, - xy_centers[i_pt], meas_xy) + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + xy_centers[i_pt], + meas_xy, + ) elif output_format.lower() == 'hdf5': xyc_arr = xy_eval.reshape( prows, pcols, 2 ).transpose(2, 0, 1) writer.dump_patch( - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy) + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, + ) if return_spot_list: # Full output @@ -1911,17 +2049,34 @@ def pull_spots(self, plane_data, grain_params, prows, pcols, 2 ).transpose(2, 0, 1) _patch_output = [ - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, ] else: # Trimmed output _patch_output = [ - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, meas_xy + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + meas_xy, ] patch_output.append(_patch_output) iRefl += 1 @@ -1939,7 +2094,9 @@ def update_memoization_sizes(self): PlanarDetector.update_memoization_sizes(all_panels) CylindricalDetector.update_memoization_sizes(all_panels) - def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: + def calc_transmission( + self, rMat_s: np.ndarray = None + ) -> dict[str, np.ndarray]: """calculate the transmission from the filter and polymer coating. the inverse of this number is the intensity correction that needs @@ -1953,26 +2110,31 @@ def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: transmissions = {} for det_name, det in self.detectors.items(): transmission_filter, transmission_phosphor = ( - det.calc_filter_coating_transmission(energy)) + det.calc_filter_coating_transmission(energy) + ) transmission = transmission_filter * transmission_phosphor if self.physics_package is not None: transmission_physics_package = ( det.calc_physics_package_transmission( - energy, rMat_s, self.physics_package)) + energy, rMat_s, self.physics_package + ) + ) effective_pinhole_area = det.calc_effective_pinhole_area( - self.physics_package) + self.physics_package + ) transmission = ( - transmission * - transmission_physics_package * - effective_pinhole_area + transmission + * transmission_physics_package + * effective_pinhole_area ) transmissions[det_name] = transmission return transmissions + # ============================================================================= # UTILITIES # ============================================================================= @@ -1983,6 +2145,7 @@ class PatchDataWriter(object): def __init__(self, filename): self._delim = ' ' + # fmt: off header_items = ( '# ID', 'PID', 'H', 'K', 'L', @@ -1997,6 +2160,7 @@ def __init__(self, filename): self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) ]) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2009,30 +2173,34 @@ def __del__(self): def close(self): self.fid.close() - def dump_patch(self, peak_id, hkl_id, - hkl, spot_int, max_int, - pangs, mangs, pxy, mxy): + def dump_patch( + self, peak_id, hkl_id, hkl, spot_int, max_int, pangs, mangs, pxy, mxy + ): """ !!! maybe need to check that last four inputs are arrays """ if mangs is None: spot_int = np.nan max_int = np.nan - mangs = np.nan*np.ones(3) - mxy = np.nan*np.ones(2) - - res = [int(peak_id), int(hkl_id)] \ - + np.array(hkl, dtype=int).tolist() \ - + [spot_int, max_int] \ - + pangs.tolist() \ - + mangs.tolist() \ - + pxy.tolist() \ + mangs = np.nan * np.ones(3) + mxy = np.nan * np.ones(2) + + res = ( + [int(peak_id), int(hkl_id)] + + np.array(hkl, dtype=int).tolist() + + [spot_int, max_int] + + pangs.tolist() + + mangs.tolist() + + pxy.tolist() + mxy.tolist() + ) output_str = self._delim.join( - [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), - self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), - self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + [ + self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:]), + ] ) print(output_str, file=self.fid) return output_str @@ -2048,20 +2216,23 @@ def __init__(self, filename=None, array=None): """ if filename is None and array is None: raise RuntimeError( - 'GrainDataWriter must be specified with filename or array') + 'GrainDataWriter must be specified with filename or array' + ) self.array = None self.fid = None # array supersedes filename if array is not None: - assert array.shape[1] == 21, \ - f'grain data table must have 21 columns not {array.shape[21]}' + assert ( + array.shape[1] == 21 + ), f'grain data table must have 21 columns not {array.shape[21]}' self.array = array self._array_row = 0 return self._delim = ' ' + # fmt: off header_items = ( '# grain ID', 'completeness', 'chi^2', 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', @@ -2081,6 +2252,7 @@ def __init__(self, filename=None, array=None): np.tile('{:<23}', len(header_items) - 3) ).format(*header_items[3:])] ) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2094,35 +2266,40 @@ def close(self): if self.fid is not None: self.fid.close() - def dump_grain(self, grain_id, completeness, chisq, - grain_params): - assert len(grain_params) == 12, \ - "len(grain_params) must be 12, not %d" % len(grain_params) + def dump_grain(self, grain_id, completeness, chisq, grain_params): + assert ( + len(grain_params) == 12 + ), "len(grain_params) must be 12, not %d" % len(grain_params) # extract strain emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) evec = mutil.symmToVecMV(emat, scale=False) - res = [int(grain_id), completeness, chisq] \ - + grain_params.tolist() \ + res = ( + [int(grain_id), completeness, chisq] + + grain_params.tolist() + evec.tolist() + ) if self.array is not None: row = self._array_row - assert row < self.array.shape[0], \ - f'invalid row {row} in array table' + assert ( + row < self.array.shape[0] + ), f'invalid row {row} in array table' self.array[row] = res self._array_row += 1 return res # (else) format and write to file output_str = self._delim.join( - [self._delim.join( - ['{:<12d}', '{:<12f}', '{:<12e}'] - ).format(*res[:3]), - self._delim.join( - np.tile('{:<23.16e}', len(res) - 3) - ).format(*res[3:])] + [ + self._delim.join(['{:<12d}', '{:<12f}', '{:<12e}']).format( + *res[:3] + ), + self._delim.join(np.tile('{:<23.16e}', len(res) - 3)).format( + *res[3:] + ), + ] ) print(output_str, file=self.fid) return output_str @@ -2152,12 +2329,12 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): vinv_s = np.array(grain_params[6:]).flatten() vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) - if use_attr: # attribute version + if use_attr: # attribute version self.grain_grp.attrs.create('rmat_c', rmat_c) self.grain_grp.attrs.create('tvec_c', tvec_c) self.grain_grp.attrs.create('inv(V)_s', vinv_s) self.grain_grp.attrs.create('vmat_s', vmat_s) - else: # dataset version + else: # dataset version self.grain_grp.create_dataset('rmat_c', data=rmat_c) self.grain_grp.create_dataset('tvec_c', data=tvec_c) self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) @@ -2176,11 +2353,26 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): def close(self): self.fid.close() - def dump_patch(self, panel_id, - i_refl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, ome_centers, - xy_centers, ijs, frame_indices, - spot_data, pangs, pxy, mangs, mxy, gzip=1): + def dump_patch( + self, + panel_id, + i_refl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + ome_centers, + xy_centers, + ijs, + frame_indices, + spot_data, + pangs, + pxy, + mangs, + mxy, + gzip=1, + ): """ to be called inside loop over patches @@ -2196,10 +2388,10 @@ def dump_patch(self, panel_id, spot_grp.attrs.create('predicted_angles', pangs) spot_grp.attrs.create('predicted_xy', pxy) if mangs is None: - mangs = np.nan*np.ones(3) + mangs = np.nan * np.ones(3) spot_grp.attrs.create('measured_angles', mangs) if mxy is None: - mxy = np.nan*np.ones(3) + mxy = np.nan * np.ones(3) spot_grp.attrs.create('measured_xy', mxy) # get centers crds from edge arrays @@ -2218,27 +2410,55 @@ def dump_patch(self, panel_id, eta_crd = centers_of_edge_vec(eta_edges) shuffle_data = True # reduces size by 20% - spot_grp.create_dataset('tth_crd', data=tth_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('eta_crd', data=eta_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ome_crd', data=ome_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('xy_centers', data=xy_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ij_centers', data=ijs, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('frame_indices', data=fi, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('intensities', data=spot_data, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) + spot_grp.create_dataset( + 'tth_crd', + data=tth_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'eta_crd', + data=eta_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ome_crd', + data=ome_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'xy_centers', + data=xy_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ij_centers', + data=ijs, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'frame_indices', + data=fi, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'intensities', + data=spot_data, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) return @@ -2260,9 +2480,16 @@ class GenerateEtaOmeMaps(object): """ - def __init__(self, image_series_dict, instrument, plane_data, - active_hkls=None, eta_step=0.25, threshold=None, - ome_period=(0, 360)): + def __init__( + self, + image_series_dict, + instrument, + plane_data, + active_hkls=None, + eta_step=0.25, + threshold=None, + ome_period=(0, 360), + ): """ image_series must be OmegaImageSeries class instrument_params must be a dict (loaded from yaml spec) @@ -2276,13 +2503,12 @@ def __init__(self, image_series_dict, instrument, plane_data, # ???: change name of iHKLList? # ???: can we change the behavior of iHKLList? if active_hkls is None: - self._iHKLList = plane_data.getHKLID( - plane_data.hkls, master=True - ) + self._iHKLList = plane_data.getHKLID(plane_data.hkls, master=True) n_rings = len(self._iHKLList) else: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" self._iHKLList = active_hkls n_rings = len(active_hkls) @@ -2297,14 +2523,18 @@ def __init__(self, image_series_dict, instrument, plane_data, omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES delta_ome = omegas_array[0][-1] - omegas_array[0][0] frame_mask = None - ome_period = omegas_array[0, 0] + np.r_[0., 360.] # !!! be careful + ome_period = omegas_array[0, 0] + np.r_[0.0, 360.0] # !!! be careful if this_det_ims.omegawedges.nwedges > 1: - delta_omes = [(i['ostop'] - i['ostart'])/i['nsteps'] - for i in this_det_ims.omegawedges.wedges] - check_wedges = mutil.uniqueVectors(np.atleast_2d(delta_omes), - tol=1e-6).squeeze() - assert check_wedges.size == 1, \ - "all wedges must have the same delta omega to 1e-6" + delta_omes = [ + (i['ostop'] - i['ostart']) / i['nsteps'] + for i in this_det_ims.omegawedges.wedges + ] + check_wedges = mutil.uniqueVectors( + np.atleast_2d(delta_omes), tol=1e-6 + ).squeeze() + assert ( + check_wedges.size == 1 + ), "all wedges must have the same delta omega to 1e-6" # grab representative delta ome # !!! assuming positive delta consistent with OmegaImageSeries delta_ome = delta_omes[0] @@ -2320,9 +2550,9 @@ def __init__(self, image_series_dict, instrument, plane_data, ) # compute total nsteps # FIXME: need check for roundoff badness - nsteps = int((ostop - ostart)/delta_ome) + nsteps = int((ostop - ostart) / delta_ome) ome_edges_full = np.linspace( - ostart, ostop, num=nsteps+1, endpoint=True + ostart, ostop, num=nsteps + 1, endpoint=True ) omegas_array = np.vstack( [ome_edges_full[:-1], ome_edges_full[1:]] @@ -2333,15 +2563,21 @@ def __init__(self, image_series_dict, instrument, plane_data, # !!! this array has -1 outside a wedge # !!! again assuming the valid frame order increases monotonically frame_mask = np.array( - [this_det_ims.omega_to_frame(ome)[0] != -1 - for ome in ome_centers] + [ + this_det_ims.omega_to_frame(ome)[0] != -1 + for ome in ome_centers + ] ) # ???: need to pass a threshold? eta_mapping, etas = instrument.extract_polar_maps( - plane_data, image_series_dict, - active_hkls=active_hkls, threshold=threshold, - tth_tol=None, eta_tol=eta_step) + plane_data, + image_series_dict, + active_hkls=active_hkls, + threshold=threshold, + tth_tol=None, + eta_tol=eta_step, + ) # for convenience grab map shape from first map_shape = next(iter(eta_mapping.values())).shape[1:] @@ -2368,7 +2604,7 @@ def __init__(self, image_series_dict, instrument, plane_data, if frame_mask is not None: # !!! must expand row dimension to include # skipped omegas - tmp = np.ones((len(frame_mask), map_shape[1]))*np.nan + tmp = np.ones((len(frame_mask), map_shape[1])) * np.nan tmp[frame_mask, :] = full_map full_map = tmp data_store.append(full_map) @@ -2377,11 +2613,11 @@ def __init__(self, image_series_dict, instrument, plane_data, # set required attributes self._omegas = mapAngle( np.radians(np.average(omegas_array, axis=1)), - np.radians(ome_period) + np.radians(ome_period), ) self._omeEdges = mapAngle( np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), - np.radians(ome_period) + np.radians(ome_period), ) # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the @@ -2395,7 +2631,7 @@ def __init__(self, image_series_dict, instrument, plane_data, # WARNING: unlinke the omegas in imageseries metadata, # these are in RADIANS and represent bin centers self._etaEdges = etas - self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + self._etas = self._etaEdges[:-1] + 0.5 * np.radians(eta_step) @property def dataStore(self): @@ -2431,9 +2667,7 @@ def save(self, filename): def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): # mark pixels in the spec'd tth range - pixels_in_tthr = np.logical_and( - ptth >= tthr[0], ptth <= tthr[1] - ) + pixels_in_tthr = np.logical_and(ptth >= tthr[0], ptth <= tthr[1]) # catch case where ring isn't on detector if not np.any(pixels_in_tthr): @@ -2450,8 +2684,7 @@ def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): def run_fast_histogram(x, bins, weights=None): - return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), - weights=weights) + return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), weights=weights) def run_numpy_histogram(x, bins, weights=None): @@ -2469,7 +2702,7 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): if threshold is not None: # !!! NaNs get preserved image = np.array(image) - image[image < threshold] = 0. + image[image < threshold] = 0.0 for i_r, tthr in enumerate(tth_ranges): this_map = ring_maps[i_r] @@ -2486,12 +2719,21 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): this_map[i_row, bins_on_detector] = result[bins_on_detector] -def _extract_detector_line_positions(iter_args, plane_data, tth_tol, - eta_tol, eta_centers, npdiv, - collapse_tth, collapse_eta, - do_interpolation, do_fitting, - fitting_kwargs, tth_distortion, - max_workers): +def _extract_detector_line_positions( + iter_args, + plane_data, + tth_tol, + eta_tol, + eta_centers, + npdiv, + collapse_tth, + collapse_eta, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, + max_workers, +): panel, instr_cfg, images, pbp = iter_args if images.ndim == 2: @@ -2506,9 +2748,13 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, tth_distr_cls = tth_distortion[panel.name] pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( - plane_data, merge_hkls=True, - delta_tth=tth_tol, delta_eta=eta_tol, - eta_list=eta_centers, tth_distortion=tth_distr_cls) + plane_data, + merge_hkls=True, + delta_tth=tth_tol, + delta_eta=eta_tol, + eta_list=eta_centers, + tth_distortion=tth_distr_cls, + ) tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) @@ -2523,8 +2769,9 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, # ================================================================= # LOOP OVER RING SETS # ================================================================= - pbar_rings = partial(tqdm, total=len(pow_angs), desc="Ringset", - position=pbp) + pbar_rings = partial( + tqdm, total=len(pow_angs), desc="Ringset", position=pbp + ) kwargs = { 'instr_cfg': instr_cfg, @@ -2541,15 +2788,26 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, } func = partial(_extract_ring_line_positions, **kwargs) iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=max_workers) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=max_workers + ) as executor: return list(pbar_rings(executor.map(func, iter_arg))) -def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, - collapse_tth, collapse_eta, images, - do_interpolation, do_fitting, fitting_kwargs, - tth_distortion): +def _extract_ring_line_positions( + iter_args, + instr_cfg, + panel, + eta_tol, + npdiv, + collapse_tth, + collapse_eta, + images, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, +): """ Extracts data for a single Debye-Scherrer ring . @@ -2597,16 +2855,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) nan_mask = np.logical_or.reduce(nan_mask, 1) if angs.ndim > 1 and xys.ndim > 1: - angs = angs[nan_mask,:] - xys = xys[nan_mask, :] + angs = angs[nan_mask, :] + xys = xys[nan_mask, :] n_images = len(images) native_area = panel.pixel_area # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( - instr_cfg, angs, panel.angularPixelSize(xys), - tth_tol=tth_tol, eta_tol=eta_tol, npdiv=npdiv, quiet=True) + instr_cfg, + angs, + panel.angularPixelSize(xys), + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + quiet=True, + ) # loop over patches # FIXME: fix initialization @@ -2619,9 +2883,7 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch # need to reshape eval pts for interpolation - xy_eval = np.vstack([ - xys_eval[0].flatten(), - xys_eval[1].flatten()]).T + xy_eval = np.vstack([xys_eval[0].flatten(), xys_eval[1].flatten()]).T _, on_panel = panel.clip_to_panel(xy_eval) @@ -2629,25 +2891,20 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, continue if collapse_tth: - ang_data = (vtx_angs[0][0, [0, -1]], - vtx_angs[1][[0, -1], 0]) + ang_data = (vtx_angs[0][0, [0, -1]], vtx_angs[1][[0, -1], 0]) elif collapse_eta: # !!! yield the tth bin centers tth_centers = np.average( - np.vstack( - [vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]] - ), - axis=0 + np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]), axis=0 ) - ang_data = (tth_centers, - angs[i_p][-1]) + ang_data = (tth_centers, angs[i_p][-1]) if do_fitting: fit_data = [] else: ang_data = vtx_angs prows, pcols = areas.shape - area_fac = areas/float(native_area) + area_fac = areas / float(native_area) # interpolate if not collapse_tth: @@ -2656,19 +2913,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, # catch interpolation type image = images[j_p] if do_interpolation: - p_img = panel.interpolate_bilinear( + p_img = ( + panel.interpolate_bilinear( xy_eval, image, - ).reshape(prows, pcols)*area_fac + ).reshape(prows, pcols) + * area_fac + ) else: - p_img = image[ijs[0], ijs[1]]*area_fac + p_img = image[ijs[0], ijs[1]] * area_fac # catch flat spectrum data, which will cause # fitting to fail. # ???: best here, or make fitting handle it? mxval = np.max(p_img) mnval = np.min(p_img) - if mxval == 0 or (1. - mnval/mxval) < 0.01: + if mxval == 0 or (1.0 - mnval / mxval) < 0.01: continue # catch collapsing options @@ -2685,11 +2945,16 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, tmp = tth_distortion.apply( panel.angles_to_cart( np.vstack( - [np.radians(this_tth0), - np.tile(ang_data[-1], len(this_tth0))] + [ + np.radians(this_tth0), + np.tile( + ang_data[-1], len(this_tth0) + ), + ] ).T ), - return_nominal=True) + return_nominal=True, + ) pk_centers = np.degrees(tmp[:, 0]) else: pk_centers = this_tth0 diff --git a/hexrd/powder/material/crystallography.py b/hexrd/powder/material/crystallography.py index 482c625d9..29e621972 100644 --- a/hexrd/powder/material/crystallography.py +++ b/hexrd/powder/material/crystallography.py @@ -38,7 +38,13 @@ from hexrd.core.deprecation import deprecated from hexrd.core import constants from hexrd.core.matrixutil import unitVector -from hexrd.core.rotations import rotMatOfExpMap, mapAngle, applySym, ltypeOfLaueGroup, quatOfLaueGroup +from hexrd.core.rotations import ( + rotMatOfExpMap, + mapAngle, + applySym, + ltypeOfLaueGroup, + quatOfLaueGroup, +) from hexrd.core.transforms import xfcapi from hexrd.core import valunits from hexrd.core.valunits import toFloat @@ -159,6 +165,7 @@ def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' ).getVal(dUnit) + def latticeParameters(lvec): """ Generates direct and reciprocal lattice vector components in a @@ -186,6 +193,7 @@ def latticeParameters(lvec): return [a, b, c, alfa, beta, gama] + def latticePlanes( hkls: np.ndarray, lparms: np.ndarray, @@ -562,6 +570,7 @@ def latticeVectors( 'rparms': rparms, } + def hexagonalIndicesFromRhombohedral(hkl): """ converts rhombohedral hkl to hexagonal indices @@ -909,7 +918,7 @@ def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: elif len(exclusions.shape) == 2: # treat exclusions as ranges of indices for r in exclusions: - excl[self.tThSort[r[0]:r[1]]] = True + excl[self.tThSort[r[0] : r[1]]] = True else: raise RuntimeError( f'Unclear behavior for shape {exclusions.shape}' @@ -1844,8 +1853,10 @@ def get_exclusions(self): def set_exclusions(self, exclusions): self.exclusions = exclusions - @deprecated(new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", - removal_date="2025-08-01") + @deprecated( + new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", + removal_date="2025-08-01", + ) def getLatticeType(self): return ltypeOfLaueGroup(self.laueGroup) diff --git a/hexrd/powder/wppf/LeBailCalibration.py b/hexrd/powder/wppf/LeBailCalibration.py index 794f3005d..ca2c8ed77 100644 --- a/hexrd/powder/wppf/LeBailCalibration.py +++ b/hexrd/powder/wppf/LeBailCalibration.py @@ -3,7 +3,15 @@ from numpy.polynomial.chebyshev import Chebyshev import lmfit import warnings -from hexrd.powder.wppf.peakfunctions import calc_rwp, computespectrum_pvfcj, computespectrum_pvtch, computespectrum_pvpink, calc_Iobs_pvfcj, calc_Iobs_pvtch, calc_Iobs_pvpink +from hexrd.powder.wppf.peakfunctions import ( + calc_rwp, + computespectrum_pvfcj, + computespectrum_pvtch, + computespectrum_pvpink, + calc_Iobs_pvfcj, + calc_Iobs_pvtch, + calc_Iobs_pvpink, +) from hexrd.powder.wppf.spectrum import Spectrum from hexrd.powder.wppf import wppfsupport, LeBail from hexrd.powder.wppf.parameters import Parameters @@ -20,6 +28,7 @@ from hexrd.core.projections.polar import PolarView import time + class LeBailCalibrator: """ ====================================================================== @@ -43,20 +52,23 @@ class LeBailCalibrator: ====================================================================== ====================================================================== """ - def __init__(self, - instrument, - img_dict, - extent=(0.,90.,0.,360.), - pixel_size=(0.1, 1.0), - params=None, - phases=None, - azimuthal_step=5.0, - bkgmethod={'chebyshev': 3}, - peakshape="pvtch", - intensity_init=None, - apply_solid_angle_correction=False, - apply_lp_correction=False, - polarization=None): + + def __init__( + self, + instrument, + img_dict, + extent=(0.0, 90.0, 0.0, 360.0), + pixel_size=(0.1, 1.0), + params=None, + phases=None, + azimuthal_step=5.0, + bkgmethod={'chebyshev': 3}, + peakshape="pvtch", + intensity_init=None, + apply_solid_angle_correction=False, + apply_lp_correction=False, + polarization=None, + ): self.bkgmethod = bkgmethod self.peakshape = peakshape @@ -111,8 +123,7 @@ def calctth(self): dsp = self.phases[p].dsp[allowed] tth_min = self.tth_min tth_max = self.tth_max - limit = np.logical_and(t >= tth_min, - t <= tth_max) + limit = np.logical_and(t >= tth_min, t <= tth_max) self.tth[p][k] = t[limit] self.hkls[p][k] = hkl[limit, :] self.dsp[p][k] = dsp[limit] @@ -130,8 +141,8 @@ def initialize_Icalc(self): Icalc = {} g = {} prefix = f"azpos{ii}" - lo = self.lineouts[prefix].data[:,1] - if(self.intensity_init is None): + lo = self.lineouts[prefix].data[:, 1] + if self.intensity_init is None: if np.nanmax(lo) > 0: n10 = np.floor(np.log10(np.nanmax(lo))) - 2 else: @@ -140,7 +151,7 @@ def initialize_Icalc(self): for p in self.phases: Icalc[p] = {} for k, l in self.phases.wavelength.items(): - Icalc[p][k] = (10**n10)*np.ones(self.tth[p][k].shape) + Icalc[p][k] = (10**n10) * np.ones(self.tth[p][k].shape) self.Icalc[prefix] = Icalc @@ -148,41 +159,39 @@ def initialize_Icalc(self): self.refine_instrument = False def prepare_polarview(self): - self.masked = self.pv.warp_image(self.img_dict, \ - pad_with_nans=True, \ - do_interpolation=True) + self.masked = self.pv.warp_image( + self.img_dict, pad_with_nans=True, do_interpolation=True + ) lo = self.masked.sum(axis=0) / np.sum(~self.masked.mask, axis=0) - self.fulllineout = np.vstack((self.tth_list,lo)).T + self.fulllineout = np.vstack((self.tth_list, lo)).T self.prepare_lineouts() def prepare_lineouts(self): self.lineouts = {} if hasattr(self, 'masked'): - azch = self.azimuthal_chunks - tth = self.tth_list - for ii in range(azch.shape[0]-1): + azch = self.azimuthal_chunks + tth = self.tth_list + for ii in range(azch.shape[0] - 1): istr = azch[ii] - istp = azch[ii+1] - lo = self.masked[istr:istp,:].sum(axis=0) / \ - np.sum(~self.masked[istr:istp,:].mask, axis=0) - data = np.ma.vstack((tth,lo)).T + istp = azch[ii + 1] + lo = self.masked[istr:istp, :].sum(axis=0) / np.sum( + ~self.masked[istr:istp, :].mask, axis=0 + ) + data = np.ma.vstack((tth, lo)).T key = f"azpos{ii}" self.lineouts[key] = data - - def computespectrum(self, - instr_updated, - lp_updated): + def computespectrum(self, instr_updated, lp_updated): """ this function calls the computespectrum function in the lebaillight class for all the azimuthal positions and accumulates the error vector from each of those lineouts. this is more or less a book keeping function rather """ - errvec = np.empty([0,]) + errvec = np.empty([0]) rwp = [] - for k,v in self.lineouts_sim.items(): + for k, v in self.lineouts_sim.items(): v.params = self.params if instr_updated: v.lineout = self.lineouts[k] @@ -195,17 +204,16 @@ def computespectrum(self, v.computespectrum() ww = v.weights - evec = ww*(v.spectrum_expt._y - - v.spectrum_sim._y)**2 + evec = ww * (v.spectrum_expt._y - v.spectrum_sim._y) ** 2 evec = np.sqrt(evec) evec = np.nan_to_num(evec) - errvec = np.concatenate((errvec,evec)) + errvec = np.concatenate((errvec, evec)) - weighted_expt = np.nan_to_num(ww*v.spectrum_expt._y**2) + weighted_expt = np.nan_to_num(ww * v.spectrum_expt._y**2) wss = np.trapz(evec, v.tth_list) den = np.trapz(weighted_expt, v.tth_list) - r = np.sqrt(wss/den)*100. + r = np.sqrt(wss / den) * 100.0 if ~np.isnan(r): rwp.append(r) @@ -224,15 +232,16 @@ def calcrwp(self, params): lp_updated = self.update_param_vals(params) self.update_shkl(params) instr_updated = self.update_instrument(params) - errvec, rwp = self.computespectrum(instr_updated, - lp_updated) + errvec, rwp = self.computespectrum(instr_updated, lp_updated) self.Rwp = np.mean(rwp) self.nfev += 1 self.Rwplist = np.append(self.Rwplist, self.Rwp) if np.mod(self.nfev, 10) == 0: - msg = (f"refinement ongoing... \n weighted residual at " - f"iteration # {self.nfev} = {self.Rwp}\n") + msg = ( + f"refinement ongoing... \n weighted residual at " + f"iteration # {self.nfev} = {self.Rwp}\n" + ) print(msg) return errvec @@ -244,8 +253,10 @@ def initialize_lmfit_parameters(self): for p in self.params: par = self.params[p] - if(par.vary): - params.add(p, value=par.value, min=par.min, max=par.max, vary=True) + if par.vary: + params.add( + p, value=par.value, min=par.min, max=par.max, vary=True + ) return params def Refine(self): @@ -267,16 +278,19 @@ def Refine(self): self.res = res if self.res.success: - msg = (f"\n \n optimization successful: {self.res.message}. \n" - f"weighted residual error = {self.Rwp}") + msg = ( + f"\n \n optimization successful: {self.res.message}. \n" + f"weighted residual error = {self.Rwp}" + ) else: - msg = (f"\n \n optimization unsuccessful: {self.res.message}. \n" - f"weighted residual error = {self.Rwp}") + msg = ( + f"\n \n optimization unsuccessful: {self.res.message}. \n" + f"weighted residual error = {self.Rwp}" + ) print(msg) - def update_param_vals(self, - params): + def update_param_vals(self, params): """ @date 03/12/2021 SS 1.0 original take values in parameters and set the @@ -309,7 +323,7 @@ def update_param_vals(self, elif nn in self.params: lp.append(self.params[nn].value) - if(not lpvary): + if not lpvary: pass else: lp = self.phases[p].Required_lp(lp) @@ -335,18 +349,17 @@ def update_shkl(self, params): eq_const = self.phases[p].eq_constraints mname = self.phases[p].name key = [f"{mname}_{s}" for s in shkl_name] - for s,k in zip(shkl_name,key): + for s, k in zip(shkl_name, key): if k in params: shkl_dict[s] = params[k].value else: shkl_dict[s] = self.params[k].value - self.phases[p].shkl = wppfsupport._fill_shkl(\ - shkl_dict, eq_const) + self.phases[p].shkl = wppfsupport._fill_shkl(shkl_dict, eq_const) def update_instrument(self, params): instr_updated = False - for key,det in self._instrument.detectors.items(): + for key, det in self._instrument.detectors.items(): for ii in range(3): pname = f"{key}_tvec{ii}" if pname in params: @@ -368,7 +381,6 @@ def bkgdegree(self): if "chebyshev" in self.bkgmethod.keys(): return self.bkgmethod["chebyshev"] - @property def instrument(self): return self._instrument @@ -377,11 +389,13 @@ def instrument(self): def instrument(self, ins): if isinstance(ins, instrument.HEDMInstrument): self._instrument = ins - self.pv = PolarView(self.extent[0:2], - ins, - eta_min=self.extent[2], - eta_max=self.extent[3], - pixel_size=self.pixel_size) + self.pv = PolarView( + self.extent[0:2], + ins, + eta_min=self.extent[2], + eta_max=self.extent[3], + pixel_size=self.pixel_size, + ) self.prepare_polarview() else: @@ -414,17 +428,20 @@ def extent(self, ext): if hasattr(self, "instrument"): if hasattr(self, "pixel_size"): - self.pv = PolarView(ext[0:2], - self.instrument, - eta_min=ext[2], - eta_max=ext[3], - pixel_size=self.pixel_size) + self.pv = PolarView( + ext[0:2], + self.instrument, + eta_min=ext[2], + eta_max=ext[3], + pixel_size=self.pixel_size, + ) self.prepare_polarview() """ this property returns a azimuthal range over which the summation is performed to get the lineouts """ + @property def azimuthal_chunks(self): extent = self.extent @@ -432,21 +449,20 @@ def azimuthal_chunks(self): azlim = extent[2:] pxsz = self.pixel_size[1] shp = self.masked.shape[0] - npix = int(np.round(step/pxsz)) - return np.r_[np.arange(0,shp,npix),shp] + npix = int(np.round(step / pxsz)) + return np.r_[np.arange(0, shp, npix), shp] @property def tth_list(self): - return np.squeeze(np.degrees(self.pv.angular_grid[1][0,:])) + return np.squeeze(np.degrees(self.pv.angular_grid[1][0, :])) @property def wavelength(self): lam = keVToAngstrom(self.instrument.beam_energy) - return {"lam1": - [valWUnit('lp', 'length', lam, 'angstrom'),1.0]} + return {"lam1": [valWUnit('lp', 'length', lam, 'angstrom'), 1.0]} def striphkl(self, g): - return str(g)[1:-1].replace(" ","") + return str(g)[1:-1].replace(" ", "") @property def refine_background(self): @@ -459,7 +475,10 @@ def refine_background(self, val): self._refine_background = val prefix = "azpos" for ii in range(len(self.lineouts)): - pname = [f"{prefix}{ii}_bkg_C{jj}" for jj in range(self.bkgdegree)] + pname = [ + f"{prefix}{ii}_bkg_C{jj}" + for jj in range(self.bkgdegree) + ] for p in pname: self.params[p].vary = val else: @@ -480,7 +499,7 @@ def refine_instrument(self, val): for key in self.instrument.detectors: pnametvec = [f"{key}_tvec{i}" for i in range(3)] pnametilt = [f"{key}_tilt{i}" for i in range(3)] - for ptv,pti in zip(pnametvec,pnametilt): + for ptv, pti in zip(pnametvec, pnametilt): self.params[ptv].vary = val self.params[pti].vary = val else: @@ -529,11 +548,13 @@ def pixel_size(self, px_sz): if hasattr(self, "instrument"): if hasattr(self, "extent"): - self.pv = PolarView(self.extent[0:2], - ins, - eta_min=self.extent[2], - eta_max=self.extent[3], - pixel_size=px_sz) + self.pv = PolarView( + self.extent[0:2], + ins, + eta_min=self.extent[2], + eta_max=self.extent[3], + pixel_size=px_sz, + ) self.prepare_polarview() @property @@ -564,7 +585,6 @@ def lpcorrection(self, val): msg = "only boolean values accepted" raise ValueError(msg) - @property def img_dict(self): @@ -578,8 +598,7 @@ def img_dict(self): if self.lpcorrection: hpol, vpol = self.polarization for dname, det in self.instrument.detectors.items(): - lp = det.polarization_factor(hpol, vpol) *\ - det.lorentz_factor() + lp = det.polarization_factor(hpol, vpol) * det.lorentz_factor() imd[dname] = imd[dname] / lp return imd @@ -612,11 +631,11 @@ def azimuthal_step(self, val): @property def tth_min(self): - return self.extent[0]+self.pixel_size[0]*0.5 + return self.extent[0] + self.pixel_size[0] * 0.5 @property def tth_max(self): - return self.extent[1]-+self.pixel_size[0]*0.5 + return self.extent[1] - +self.pixel_size[0] * 0.5 @property def peakshape(self): @@ -636,21 +655,25 @@ def peakshape(self, val): elif val == "pvpink": self._peakshape = 2 else: - msg = (f"invalid peak shape string. " + msg = ( + f"invalid peak shape string. " f"must be: \n" f"1. pvfcj: pseudo voight (Finger, Cox, Jephcoat)\n" f"2. pvtch: pseudo voight (Thompson, Cox, Hastings)\n" - f"3. pvpink: Pink beam (Von Dreele)") + f"3. pvpink: Pink beam (Von Dreele)" + ) raise ValueError(msg) elif isinstance(val, int): - if val >=0 and val <=2: + if val >= 0 and val <= 2: self._peakshape = val else: - msg = (f"invalid peak shape int. " + msg = ( + f"invalid peak shape int. " f"must be: \n" f"1. 0: pseudo voight (Finger, Cox, Jephcoat)\n" f"2. 1: pseudo voight (Thompson, Cox, Hastings)\n" - f"3. 2: Pink beam (Von Dreele)") + f"3. 2: Pink beam (Von Dreele)" + ) raise ValueError(msg) """ @@ -658,13 +681,13 @@ def peakshape(self, val): """ if hasattr(self, 'params'): params = wppfsupport._generate_default_parameters_Rietveld( - self.phases, self.peakshape) + self.phases, self.peakshape + ) for p in params: if p in self.params: params[p] = self.params[p] self._params = params - @property def phases(self): return self._phases @@ -680,8 +703,8 @@ def phases(self, phase_info): >> @DETAILS: load the phases for the LeBail fits """ - if(phase_info is not None): - if(isinstance(phase_info, Phases_LeBail)): + if phase_info is not None: + if isinstance(phase_info, Phases_LeBail): """ directly passing the phase class """ @@ -689,62 +712,62 @@ def phases(self, phase_info): else: - if(hasattr(self, 'wavelength')): - if(self.wavelength is not None): + if hasattr(self, 'wavelength'): + if self.wavelength is not None: p = Phases_LeBail(wavelength=self.wavelength) else: p = Phases_LeBail() - if(isinstance(phase_info, dict)): + if isinstance(phase_info, dict): """ initialize class using a dictionary with key as material file and values as the name of each phase """ for material_file in phase_info: material_names = phase_info[material_file] - if(not isinstance(material_names, list)): + if not isinstance(material_names, list): material_names = [material_names] p.add_many(material_file, material_names) - elif(isinstance(phase_info, str)): + elif isinstance(phase_info, str): """ load from a yaml file """ - if(path.exists(phase_info)): + if path.exists(phase_info): p.load(phase_info) else: raise FileError('phase file doesn\'t exist.') - elif(isinstance(phase_info, Material)): + elif isinstance(phase_info, Material): p[phase_info.name] = Material_LeBail( fhdf=None, xtal=None, dmin=None, - material_obj=phase_info) + material_obj=phase_info, + ) - elif(isinstance(phase_info, list)): + elif isinstance(phase_info, list): for mat in phase_info: p[mat.name] = Material_LeBail( - fhdf=None, - xtal=None, - dmin=None, - material_obj=mat) + fhdf=None, xtal=None, dmin=None, material_obj=mat + ) p.num_phases += 1 for mat in p: - p[mat].pf = 1.0/p.num_phases + p[mat].pf = 1.0 / p.num_phases self._phases = p self.calctth() for p in self.phases: - self.phases[p].valid_shkl, \ - self.phases[p].eq_constraints, \ - self.phases[p].rqd_index, \ - self.phases[p].trig_ptype = \ - wppfsupport._required_shkl_names(self.phases[p]) + ( + self.phases[p].valid_shkl, + self.phases[p].eq_constraints, + self.phases[p].rqd_index, + self.phases[p].trig_ptype, + ) = wppfsupport._required_shkl_names(self.phases[p]) @property def params(self): @@ -761,15 +784,17 @@ def params(self, param_info): to some default values (lattice constants are for CeO2) """ from scipy.special import roots_legendre + xn, wn = roots_legendre(16) self.xn = xn[8:] self.wn = wn[8:] - if(param_info is not None): + if param_info is not None: pl = wppfsupport._generate_default_parameters_LeBail( - self.phases, self.peakshape, ptype="lmfit") + self.phases, self.peakshape, ptype="lmfit" + ) self.lebail_param_list = [p for p in pl] - if(isinstance(param_info, Parameters_lmfit)): + if isinstance(param_info, Parameters_lmfit): """ directly passing the parameter class """ @@ -778,21 +803,25 @@ def params(self, param_info): else: params = Parameters_lmfit() - if(isinstance(param_info, dict)): + if isinstance(param_info, dict): """ initialize class using dictionary read from the yaml file """ for k in param_info: v = param_info[k] - params.add(k, value=float(v[0]), - min=float(v[1]), max=float(v[2]), - vary=bool(v[3])) - - elif(isinstance(param_info, str)): + params.add( + k, + value=float(v[0]), + min=float(v[1]), + max=float(v[2]), + vary=bool(v[3]), + ) + + elif isinstance(param_info, str): """ load from a yaml file """ - if(path.exists(param_info)): + if path.exists(param_info): params.load(param_info) else: raise FileError('input spectrum file doesn\'t exist.') @@ -801,18 +830,20 @@ def params(self, param_info): this part initializes the lattice parameters in the """ for p in self.phases: - wppfsupport._add_lp_to_params( - params, self.phases[p]) + wppfsupport._add_lp_to_params(params, self.phases[p]) self._params = params else: params = wppfsupport._generate_default_parameters_LeBail( - self.phases, self.peakshape, ptype="lmfit") + self.phases, self.peakshape, ptype="lmfit" + ) self.lebail_param_list = [p for p in params] wppfsupport._add_detector_geometry(params, self.instrument) if "chebyshev" in self.bkgmethod.keys(): - wppfsupport._add_background(params, self.lineouts, self.bkgdegree) + wppfsupport._add_background( + params, self.lineouts, self.bkgdegree + ) self._params = params @property @@ -822,21 +853,23 @@ def shkl(self): shkl[p] = self.phases[p].shkl return shkl - def calc_simulated(self): self.lineouts_sim = {} for key, lo in self.lineouts.items(): - self.lineouts_sim[key] = LeBaillight(key, - lo, - self.Icalc[key], - self.tth, - self.hkls, - self.dsp, - self.shkl, - self.lebail_param_list, - self.params, - self.peakshape, - self.bkgmethod) + self.lineouts_sim[key] = LeBaillight( + key, + lo, + self.Icalc[key], + self.tth, + self.hkls, + self.dsp, + self.shkl, + self.lebail_param_list, + self.params, + self.peakshape, + self.bkgmethod, + ) + class LeBaillight: """ @@ -844,18 +877,21 @@ class LeBaillight: simple computation of diffraction spectrum given the parameters and intensity values """ - def __init__(self, - name, - lineout, - Icalc, - tth, - hkls, - dsp, - shkl, - lebail_param_list, - params, - peakshape, - bkgmethod): + + def __init__( + self, + name, + lineout, + Icalc, + tth, + hkls, + dsp, + shkl, + lebail_param_list, + params, + peakshape, + bkgmethod, + ): self.name = name self.lebail_param_list = lebail_param_list @@ -883,12 +919,20 @@ def computespectrum(self): Ic = self.Icalc[p][k] - shft_c = np.cos(0.5*np.radians(self.tth[p][k]))*self.params["shft"].value - trns_c = np.sin(np.radians(self.tth[p][k]))*self.params["trns"].value - tth = self.tth[p][k] + \ - self.params["zero_error"].value + \ - shft_c + \ - trns_c + shft_c = ( + np.cos(0.5 * np.radians(self.tth[p][k])) + * self.params["shft"].value + ) + trns_c = ( + np.sin(np.radians(self.tth[p][k])) + * self.params["trns"].value + ) + tth = ( + self.tth[p][k] + + self.params["zero_error"].value + + shft_c + + trns_c + ) dsp = self.dsp[p][k] hkls = self.hkls[p][k] @@ -897,74 +941,97 @@ def computespectrum(self): name = p eta_n = f"{name}_eta_fwhm" eta_fwhm = self.params[eta_n].value - strain_direction_dot_product = 0. + strain_direction_dot_product = 0.0 is_in_sublattice = False - cag = np.array([self.params["U"].value, - self.params["V"].value, - self.params["W"].value]) + cag = np.array( + [ + self.params["U"].value, + self.params["V"].value, + self.params["W"].value, + ] + ) gaussschrerr = self.params["P"].value - lorbroad = np.array([self.params["X"].value, - self.params["Y"].value]) - anisbroad = np.array([self.params["Xe"].value, - self.params["Ye"].value, - self.params["Xs"].value]) + lorbroad = np.array( + [self.params["X"].value, self.params["Y"].value] + ) + anisbroad = np.array( + [ + self.params["Xe"].value, + self.params["Ye"].value, + self.params["Xs"].value, + ] + ) if self.peakshape == 0: HL = self.params["HL"].value SL = self.params["SL"].value - args = (cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - HL, - SL, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic, - self.xn, - self.wn) + args = ( + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + HL, + SL, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + self.xn, + self.wn, + ) elif self.peakshape == 1: - args = (cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic) + args = ( + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + ) elif self.peakshape == 2: - alpha = np.array([self.params["alpha0"].value, - self.params["alpha1"].value]) - beta = np.array([self.params["beta0"].value, - self.params["beta1"].value]) - args = (alpha, - beta, - cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic) + alpha = np.array( + [ + self.params["alpha0"].value, + self.params["alpha1"].value, + ] + ) + beta = np.array( + [ + self.params["beta0"].value, + self.params["beta1"].value, + ] + ) + args = ( + alpha, + beta, + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + ) y += self.computespectrum_fcn(*args) @@ -980,7 +1047,7 @@ def CalcIobs(self): self.Iobs = {} spec_expt = self.spectrum_expt.data_array - spec_sim = self.spectrum_sim.data_array + spec_sim = self.spectrum_sim.data_array tth_list = np.ascontiguousarray(self.tth_list) for p in self.tth: @@ -989,12 +1056,20 @@ def CalcIobs(self): Ic = self.Icalc[p][k] - shft_c = np.cos(0.5*np.radians(self.tth[p][k]))*self.params["shft"].value - trns_c = np.sin(np.radians(self.tth[p][k]))*self.params["trns"].value - tth = self.tth[p][k] + \ - self.params["zero_error"].value + \ - shft_c + \ - trns_c + shft_c = ( + np.cos(0.5 * np.radians(self.tth[p][k])) + * self.params["shft"].value + ) + trns_c = ( + np.sin(np.radians(self.tth[p][k])) + * self.params["trns"].value + ) + tth = ( + self.tth[p][k] + + self.params["zero_error"].value + + shft_c + + trns_c + ) dsp = self.dsp[p][k] hkls = self.hkls[p][k] @@ -1003,81 +1078,104 @@ def CalcIobs(self): name = p eta_n = f"{name}_eta_fwhm" eta_fwhm = self.params[eta_n].value - strain_direction_dot_product = 0. + strain_direction_dot_product = 0.0 is_in_sublattice = False - cag = np.array([self.params["U"].value, - self.params["V"].value, - self.params["W"].value]) + cag = np.array( + [ + self.params["U"].value, + self.params["V"].value, + self.params["W"].value, + ] + ) gaussschrerr = self.params["P"].value - lorbroad = np.array([self.params["X"].value, - self.params["Y"].value]) - anisbroad = np.array([self.params["Xe"].value, - self.params["Ye"].value, - self.params["Xs"].value]) + lorbroad = np.array( + [self.params["X"].value, self.params["Y"].value] + ) + anisbroad = np.array( + [ + self.params["Xe"].value, + self.params["Ye"].value, + self.params["Xs"].value, + ] + ) if self.peakshape == 0: HL = self.params["HL"].value SL = self.params["SL"].value - args = (cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - HL, - SL, - self.xn, - self.wn, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic, - spec_expt, - spec_sim) + args = ( + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + HL, + SL, + self.xn, + self.wn, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + spec_expt, + spec_sim, + ) elif self.peakshape == 1: - args = (cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic, - spec_expt, - spec_sim) + args = ( + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + spec_expt, + spec_sim, + ) elif self.peakshape == 2: - alpha = np.array([self.params["alpha0"].value, - self.params["alpha1"].value]) - beta = np.array([self.params["beta0"].value, - self.params["beta1"].value]) - args = (alpha, - beta, - cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic, - spec_expt, - spec_sim) + alpha = np.array( + [ + self.params["alpha0"].value, + self.params["alpha1"].value, + ] + ) + beta = np.array( + [ + self.params["beta0"].value, + self.params["beta1"].value, + ] + ) + args = ( + alpha, + beta, + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + spec_expt, + spec_sim, + ) self.Iobs[p][k] = self.calc_Iobs_fcn(*args) self.Icalc = self.Iobs @@ -1085,12 +1183,11 @@ def CalcIobs(self): @property def weights(self): lo = self.lineout - weights = np.divide(1., np.sqrt(lo.data[:,1])) + weights = np.divide(1.0, np.sqrt(lo.data[:, 1])) weights[np.isinf(weights)] = 0.0 return weights - @property def bkgdegree(self): if "chebyshev" in self.bkgmethod.keys(): @@ -1098,37 +1195,38 @@ def bkgdegree(self): @property def tth_step(self): - return (self.lineout.data[1,0]-self.lineout.data[0,0]) + return self.lineout.data[1, 0] - self.lineout.data[0, 0] @property def background(self): tth, I = self.spectrum_expt.data - mask = self.mask[:,1] + mask = self.mask[:, 1] if "chebyshev" in self.bkgmethod.keys(): - pname = [f"{self.name}_bkg_C{ii}" - for ii in range(self.bkgdegree)] + pname = [f"{self.name}_bkg_C{ii}" for ii in range(self.bkgdegree)] coef = [self.params[p].value for p in pname] - c = Chebyshev(coef,domain=[tth[0],tth[-1]]) + c = Chebyshev(coef, domain=[tth[0], tth[-1]]) bkg = c(tth) bkg[mask] = np.nan elif 'snip1d' in self.bkgmethod.keys(): - ww = np.rint(self.bkgmethod["snip1d"][0] / - self.tth_step).astype(np.int32) + ww = np.rint(self.bkgmethod["snip1d"][0] / self.tth_step).astype( + np.int32 + ) numiter = self.bkgmethod["snip1d"][1] - bkg = np.squeeze(snip1d_quad(np.atleast_2d(I), - w=ww, numiter=numiter)) + bkg = np.squeeze( + snip1d_quad(np.atleast_2d(I), w=ww, numiter=numiter) + ) bkg[mask] = np.nan return bkg @property def spectrum_sim(self): tth, I = self._spectrum_sim.data - mask = self.mask[:,1] + mask = self.mask[:, 1] # I[mask] = np.nan I += self.background @@ -1137,9 +1235,9 @@ def spectrum_sim(self): @property def spectrum_expt(self): d = self.lineout.data - mask = self.mask[:,1] + mask = self.mask[:, 1] # d[mask,1] = np.nan - return Spectrum(x=d[:,0], y=d[:,1]) + return Spectrum(x=d[:, 0], y=d[:, 1]) @property def params(self): @@ -1161,17 +1259,20 @@ def params(self, params): self._params[p].vary = params[p].vary else: from scipy.special import roots_legendre + xn, wn = roots_legendre(16) self.xn = xn[8:] self.wn = wn[8:] self._params = Parameters_lmfit() for p in params: if (p in self.lebail_param_list) or (self.name in p): - self._params.add(name=p, - value=params[p].value, - max=params[p].max, - min=params[p].min, - vary=params[p].vary) + self._params.add( + name=p, + value=params[p].value, + max=params[p].max, + min=params[p].min, + vary=params[p].vary, + ) # if hasattr(self, "tth") and \ # hasattr(self, "dsp") and \ @@ -1186,8 +1287,8 @@ def lineout(self): return self._lineout @lineout.setter - def lineout(self,lo): - if isinstance(lo,np.ma.MaskedArray): + def lineout(self, lo): + if isinstance(lo, np.ma.MaskedArray): self._lineout = lo else: msg = f"only masked arrays input is allowed." @@ -1202,14 +1303,12 @@ def mask(self): @property def tth_list(self): - return self.lineout[:,0].data - + return self.lineout[:, 0].data @property def tth(self): return self._tth - @tth.setter def tth(self, val): if isinstance(val, dict): @@ -1217,13 +1316,13 @@ def tth(self, val): # if hasattr(self,"dsp"): # self.computespectrum() else: - msg = (f"two theta vallues need " - f"to be in a dictionary") + msg = f"two theta vallues need " f"to be in a dictionary" raise ValueError(msg) @property def hkls(self): return self._hkls + @hkls.setter def hkls(self, val): if isinstance(val, dict): @@ -1233,8 +1332,7 @@ def hkls(self, val): # hasattr(self,"lineout"): # self.computespectrum() else: - msg = (f"two theta vallues need " - f"to be in a dictionary") + msg = f"two theta vallues need " f"to be in a dictionary" raise ValueError(msg) @property @@ -1247,8 +1345,7 @@ def dsp(self, val): self._dsp = val # self.computespectrum() else: - msg = (f"two theta vallues need " - f"to be in a dictionary") + msg = f"two theta vallues need " f"to be in a dictionary" raise ValueError(msg) @property @@ -1257,8 +1354,7 @@ def mask(self): @property def tth_list(self): - return self.lineout[:,0].data - + return self.lineout[:, 0].data @property def Icalc(self): diff --git a/hexrd/powder/wppf/WPPF.py b/hexrd/powder/wppf/WPPF.py index 1ec8797c1..9aaa153b0 100644 --- a/hexrd/powder/wppf/WPPF.py +++ b/hexrd/powder/wppf/WPPF.py @@ -22,11 +22,24 @@ from hexrd.core.material import Material from hexrd.core.utils.multiprocess_generic import GenericMultiprocessing from hexrd.core.valunits import valWUnit -from hexrd.powder.wppf.peakfunctions import calc_rwp, computespectrum_pvfcj, computespectrum_pvtch, computespectrum_pvpink, calc_Iobs_pvfcj, calc_Iobs_pvtch, calc_Iobs_pvpink +from hexrd.powder.wppf.peakfunctions import ( + calc_rwp, + computespectrum_pvfcj, + computespectrum_pvtch, + computespectrum_pvpink, + calc_Iobs_pvfcj, + calc_Iobs_pvtch, + calc_Iobs_pvpink, +) from hexrd.powder.wppf import wppfsupport from hexrd.powder.wppf.spectrum import Spectrum from hexrd.powder.wppf.parameters import Parameters -from hexrd.powder.wppf.phase import Phases_LeBail, Phases_Rietveld, Material_LeBail, Material_Rietveld +from hexrd.powder.wppf.phase import ( + Phases_LeBail, + Phases_Rietveld, + Material_LeBail, + Material_Rietveld, +) class LeBail: @@ -85,7 +98,7 @@ def __init__( phases=None, wavelength={ "kalpha1": [_nm(0.15406), 1.0], - "kalpha2": [_nm(0.154443), 1.0] + "kalpha2": [_nm(0.154443), 1.0], }, bkgmethod={"spline": None}, intensity_init=None, @@ -278,7 +291,7 @@ def bkgmethod(self, v): # In case the degree has changed, slice off any extra at the end, # and in case it is less, pad with zeros. if len(self.bkg_coef) > degree + 1: - self.bkg_coef = self.bkg_coef[:degree + 1] + self.bkg_coef = self.bkg_coef[: degree + 1] elif len(self.bkg_coef) < degree + 1: pad_width = (0, degree + 1 - len(self.bkg_coef)) self.bkg_coef = np.pad(self.bkg_coef, pad_width) @@ -351,8 +364,9 @@ def initialize_Icalc(self): for p in self.phases: self.Icalc[p] = {} for k in self.phases.wavelength.keys(): - self.Icalc[p][k] = \ - (10 ** n10) * np.ones(self.tth[p][k].shape) + self.Icalc[p][k] = (10**n10) * np.ones( + self.tth[p][k].shape + ) elif isinstance(self.intensity_init, dict): """ @@ -385,10 +399,12 @@ def initialize_Icalc(self): from the dictionary." ) - if (self.tth[p][k].shape[0] - <= self.intensity_init[p][k].shape[0]): + if ( + self.tth[p][k].shape[0] + <= self.intensity_init[p][k].shape[0] + ): self.Icalc[p][k] = self.intensity_init[p][k][ - 0:self.tth[p][k].shape[0] + 0 : self.tth[p][k].shape[0] ] else: raise RuntimeError( @@ -422,15 +438,29 @@ def computespectrum(self): Xs = np.zeros(Ic.shape) if self.phases[p].sf_alpha is not None: alpha = getattr(self, f"{p}_sf_alpha") - beta = getattr(self, f"{p}_twin_beta") - sf_shift = alpha*np.tan(np.radians(self.tth[p][k])) *\ - self.sf_hkl_factors[p][k] + beta = getattr(self, f"{p}_twin_beta") + sf_shift = ( + alpha + * np.tan(np.radians(self.tth[p][k])) + * self.sf_hkl_factors[p][k] + ) Xs = np.degrees( - 0.9*(1.5*alpha+beta)*( - self.sf_lfactor[p][k]*lam/self.phases[p].lparms[0])) + 0.9 + * (1.5 * alpha + beta) + * ( + self.sf_lfactor[p][k] + * lam + / self.phases[p].lparms[0] + ) + ) - tth = self.tth[p][k] + self.zero_error + \ - shft_c + trns_c + sf_shift + tth = ( + self.tth[p][k] + + self.zero_error + + shft_c + + trns_c + + sf_shift + ) dsp = self.dsp[p][k] hkls = self.hkls[p][k] @@ -537,15 +567,29 @@ def CalcIobs(self): Xs = np.zeros(Ic.shape) if self.phases[p].sf_alpha is not None: alpha = getattr(self, f"{p}_sf_alpha") - beta = getattr(self, f"{p}_twin_beta") - sf_shift = alpha*np.tan(np.radians(self.tth[p][k])) *\ - self.sf_hkl_factors[p][k] + beta = getattr(self, f"{p}_twin_beta") + sf_shift = ( + alpha + * np.tan(np.radians(self.tth[p][k])) + * self.sf_hkl_factors[p][k] + ) Xs = np.degrees( - 0.9*(1.5*alpha+beta)*( - self.sf_lfactor[p][k]*lam/self.phases[p].lparms[0])) + 0.9 + * (1.5 * alpha + beta) + * ( + self.sf_lfactor[p][k] + * lam + / self.phases[p].lparms[0] + ) + ) - tth = self.tth[p][k] + self.zero_error + \ - shft_c + trns_c + sf_shift + tth = ( + self.tth[p][k] + + self.zero_error + + shft_c + + trns_c + + sf_shift + ) dsp = self.dsp[p][k] hkls = self.hkls[p][k] @@ -673,8 +717,10 @@ def RefineCycle(self, print_to_screen=True): self.gofFlist = np.append(self.gofFlist, self.gofF) if print_to_screen: - msg = (f"Finished iteration. Rwp: " - f"{self.Rwp*100.0:.2f} % and chi^2: {self.gofF:.2f}") + msg = ( + f"Finished iteration. Rwp: " + f"{self.Rwp*100.0:.2f} % and chi^2: {self.gofF:.2f}" + ) print(msg) def Refine(self): @@ -926,8 +972,7 @@ def cheb_coef(self): @property def cheb_polynomial(self): return np.polynomial.Chebyshev( - self.cheb_coef, - domain=[self.tth_list[0], self.tth_list[-1]] + self.cheb_coef, domain=[self.tth_list[0], self.tth_list[-1]] ) @property @@ -1076,9 +1121,7 @@ def spectrum_expt(self, expt_spectrum): for s in expt_spec_list: self._spectrum_expt.append( Spectrum( - x=s[:, 0], - y=s[:, 1], - name="expt_spectrum" + x=s[:, 0], y=s[:, 1], name="expt_spectrum" ) ) @@ -1240,9 +1283,9 @@ def params(self): @property def init_bkg(self): degree = self.bkgmethod["chebyshev"] - x = np.empty([0, ]) - y = np.empty([0, ]) - wts = np.empty([0, ]) + x = np.empty([0]) + y = np.empty([0]) + wts = np.empty([0]) for i, s in enumerate(self._spectrum_expt): tth = self._tth_list[i] wt = self._weights[i] @@ -1323,7 +1366,7 @@ def params(self, param_info): self.phases, self.peakshape, self.bkgmethod, - init_val=self.cheb_init_coef + init_val=self.cheb_init_coef, ) self._params = params @@ -1384,15 +1427,16 @@ def phases(self, phase_info): elif isinstance(phase_info, Material): p[phase_info.name] = Material_LeBail( - fhdf=None, xtal=None, - dmin=None, material_obj=phase_info + fhdf=None, + xtal=None, + dmin=None, + material_obj=phase_info, ) elif isinstance(phase_info, list): for mat in phase_info: p[mat.name] = Material_LeBail( - fhdf=None, xtal=None, - dmin=None, material_obj=mat + fhdf=None, xtal=None, dmin=None, material_obj=mat ) p.num_phases += 1 @@ -1656,8 +1700,10 @@ def __init__( expt_spectrum=None, params=None, phases=None, - wavelength={"kalpha1": [_nm(0.15406), 1.0], - "kalpha2": [_nm(0.154443), 0.52]}, + wavelength={ + "kalpha1": [_nm(0.15406), 1.0], + "kalpha2": [_nm(0.154443), 0.52], + }, bkgmethod={"spline": None}, peakshape="pvfcj", shape_factor=1.0, @@ -1800,7 +1846,7 @@ def bkgmethod(self, v): # In case the degree has changed, slice off any extra at the end, # and in case it is less, pad with zeros. if len(self.bkg_coef) > degree + 1: - self.bkg_coef = self.bkg_coef[:degree + 1] + self.bkg_coef = self.bkg_coef[: degree + 1] elif len(self.bkg_coef) < degree + 1: pad_width = (0, degree + 1 - len(self.bkg_coef)) self.bkg_coef = np.pad(self.bkg_coef, pad_width) @@ -1874,8 +1920,9 @@ def calcsf(self): tth = self.tth[p][k] # allowed = self.phases[p][k].wavelength_allowed_hkls # limit = self.limit[p][k] - self.sf[p][k], self.sf_raw[p][k] = \ - self.phases[p][k].CalcXRSF(w, w_int) + self.sf[p][k], self.sf_raw[p][k] = self.phases[p][k].CalcXRSF( + w, w_int + ) self.extinction[p][k] = self.phases[p][k].calc_extinction( 10.0 * w, @@ -1928,15 +1975,29 @@ def computespectrum(self): Xs = np.zeros(self.tth[p][k].shape) if self.phases[p][k].sf_alpha is not None: alpha = getattr(self, f"{p}_sf_alpha") - beta = getattr(self, f"{p}_twin_beta") - sf_shift = alpha*np.tan(np.radians(self.tth[p][k])) *\ - self.sf_hkl_factors[p][k] - Xs = np.degrees(0.9*(1.5*alpha+beta)*( - self.sf_lfactor[p][k] * - lam/self.phases[p][k].lparms[0])) + beta = getattr(self, f"{p}_twin_beta") + sf_shift = ( + alpha + * np.tan(np.radians(self.tth[p][k])) + * self.sf_hkl_factors[p][k] + ) + Xs = np.degrees( + 0.9 + * (1.5 * alpha + beta) + * ( + self.sf_lfactor[p][k] + * lam + / self.phases[p][k].lparms[0] + ) + ) - tth = self.tth[p][k] + self.zero_error + \ - shft_c + trns_c + sf_shift + tth = ( + self.tth[p][k] + + self.zero_error + + shft_c + + trns_c + + sf_shift + ) pf = self.phases[p][k].pf / self.phases[p][k].vol ** 2 sf = self.sf[p][k] @@ -2101,8 +2162,10 @@ def Refine(self): self.Rwplist = np.append(self.Rwplist, self.Rwp) self.gofFlist = np.append(self.gofFlist, self.gofF) - msg = (f"Finished iteration. Rwp: " - f"{self.Rwp*100.0:.2f} % and chi^2: {self.gofF:.2f}") + msg = ( + f"Finished iteration. Rwp: " + f"{self.Rwp*100.0:.2f} % and chi^2: {self.gofF:.2f}" + ) print(msg) else: print("Nothing to refine...") @@ -2121,7 +2184,11 @@ def _set_params_vals_to_class(self, params, init=False, skip_phases=False): if not skip_phases: updated_lp = False updated_atominfo = False - pf = np.zeros([self.phases.num_phases, ]) + pf = np.zeros( + [ + self.phases.num_phases, + ] + ) pf_cur = self.phases.phase_fraction.copy() for ii, p in enumerate(self.phases): name = f"{p}_phase_fraction" @@ -2236,7 +2303,7 @@ def _set_params_vals_to_class(self, params, init=False, skip_phases=False): if updated_lp or updated_atominfo: self.calcsf() - self.phases.phase_fraction = pf/np.sum(pf) + self.phases.phase_fraction = pf / np.sum(pf) def _update_shkl(self, params): """ @@ -2359,7 +2426,7 @@ def params(self, param_info): self.phases, self.peakshape, self.bkgmethod, - init_val=self.cheb_init_coef + init_val=self.cheb_init_coef, ) self._params = params @@ -2436,9 +2503,9 @@ def spectrum_expt(self, expt_spectrum): self._spectrum_expt = [] for s in expt_spec_list: self._spectrum_expt.append( - Spectrum(x=s[:, 0], - y=s[:, 1], - name="expt_spectrum") + Spectrum( + x=s[:, 0], y=s[:, 1], name="expt_spectrum" + ) ) else: @@ -2657,8 +2724,10 @@ def phases(self, phase_info): "kev", "ENERGY", E, "keV" ) p[phase_info.name][k] = Material_Rietveld( - fhdf=None, xtal=None, - dmin=None, material_obj=phase_info + fhdf=None, + xtal=None, + dmin=None, + material_obj=phase_info, ) p[phase_info.name][k].pf = 1.0 p.num_phases = 1 @@ -2678,8 +2747,10 @@ def phases(self, phase_info): "kev", "ENERGY", E, "keV" ) p[mat.name][k] = Material_Rietveld( - fhdf=None, xtal=None, - dmin=None, material_obj=mat + fhdf=None, + xtal=None, + dmin=None, + material_obj=mat, ) p.num_phases += 1 @@ -2747,7 +2818,7 @@ def peakshape(self, val): self.phases, self.peakshape, self.bkgmethod, - init_val=self.cheb_init_coef + init_val=self.cheb_init_coef, ) for p in params: if p in self.params: @@ -2759,9 +2830,9 @@ def peakshape(self, val): @property def init_bkg(self): degree = self.bkgmethod["chebyshev"] - x = np.empty([0, ]) - y = np.empty([0, ]) - wts = np.empty([0, ]) + x = np.empty([0]) + y = np.empty([0]) + wts = np.empty([0]) for i, s in enumerate(self._spectrum_expt): tth = self._tth_list[i] wt = self._weights[i] @@ -2782,8 +2853,7 @@ def cheb_coef(self): @property def cheb_polynomial(self): return np.polynomial.Chebyshev( - self.cheb_coef, - domain=[self.tth_list[0], self.tth_list[-1]] + self.cheb_coef, domain=[self.tth_list[0], self.tth_list[-1]] ) @property @@ -2971,7 +3041,7 @@ def separate_regions(masked_spec_array): m0 = np.concatenate(([False], mask, [False])) idx = np.flatnonzero(m0[1:] != m0[:-1]) gidx = [(idx[i], idx[i + 1]) for i in range(0, len(idx), 2)] - return [array[idx[i]: idx[i + 1], :] for i in range(0, len(idx), 2)], gidx + return [array[idx[i] : idx[i + 1], :] for i in range(0, len(idx), 2)], gidx def join_regions(vector_list, global_index, global_shape): @@ -2988,7 +3058,7 @@ def join_regions(vector_list, global_index, global_shape): ) out_vector[:] = np.nan for s, ids in zip(vector_list, global_index): - out_vector[ids[0]: ids[1]] = s + out_vector[ids[0] : ids[1]] = s # out_array = np.ma.masked_array(out_array, mask = np.isnan(out_array)) return out_vector diff --git a/hexrd/powder/wppf/parameters.py b/hexrd/powder/wppf/parameters.py index d589a85c9..2144214b6 100644 --- a/hexrd/powder/wppf/parameters.py +++ b/hexrd/powder/wppf/parameters.py @@ -4,6 +4,7 @@ import warnings from os import path + class Parameters: """ ================================================================================== @@ -19,45 +20,28 @@ class Parameters: =============================================================================== """ - def __init__(self, - name=None, - vary=False, - value=0.0, - lb=-np.Inf, - ub=np.Inf): + def __init__( + self, name=None, vary=False, value=0.0, lb=-np.Inf, ub=np.Inf + ): self.param_dict = {} - if(name is not None): - self.add(name=name, - vary=vary, - value=value, - lb=min, - ub=max) - - def add(self, - name, - vary=False, - value=0.0, - lb=-np.Inf, - ub=np.Inf): + if name is not None: + self.add(name=name, vary=vary, value=value, lb=min, ub=max) + + def add(self, name, vary=False, value=0.0, lb=-np.Inf, ub=np.Inf): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 05/18/2020 SS 1.0 original - >> @DETAILS: add a single named parameter + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 05/18/2020 SS 1.0 original + >> @DETAILS: add a single named parameter """ self[name] = Parameter(name=name, vary=vary, value=value, lb=lb, ub=ub) - def add_many(self, - names, - varies, - values, - lbs, - ubs): + def add_many(self, names, varies, values, lbs, ubs): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 05/18/2020 SS 1.0 original - >> @DETAILS: load a list of named parameters + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 05/18/2020 SS 1.0 original + >> @DETAILS: load a list of named parameters """ assert len(names) == len(varies), "lengths of tuples not consistent" assert len(names) == len(values), "lengths of tuples not consistent" @@ -69,24 +53,29 @@ def add_many(self, def load(self, fname): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 05/18/2020 SS 1.0 original - >> @DETAILS: load parameters from yaml file + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 05/18/2020 SS 1.0 original + >> @DETAILS: load parameters from yaml file """ with open(fname) as file: dic = yaml.load(file, Loader=yaml.FullLoader) for k in dic.keys(): v = dic[k] - self.add(k, value=float(v[0]), lb=float(v[1]), - ub=float(v[2]), vary=bool(v[3])) + self.add( + k, + value=float(v[0]), + lb=float(v[1]), + ub=float(v[2]), + vary=bool(v[3]), + ) def dump(self, fname): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 05/18/2020 SS 1.0 original - >> @DETAILS: dump the class to a yaml looking file. name is the key and the list - has [value, lb, ub, vary] in that order + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 05/18/2020 SS 1.0 original + >> @DETAILS: dump the class to a yaml looking file. name is the key and the list + has [value, lb, ub, vary] in that order """ dic = {} for k in self.param_dict.keys(): @@ -97,31 +86,32 @@ def dump(self, fname): def dump_hdf5(self, file): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 01/15/2021 SS 1.0 original - >> @DETAILS: dump the class to a hdf5 file. the file argument could either be a - string or a h5.File instance. If it is a filename, then HDF5 file - is created, a parameter group is created and data is written out - with data names being the parameter name. Else data written to Parameter - group in existing file object + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 01/15/2021 SS 1.0 original + >> @DETAILS: dump the class to a hdf5 file. the file argument could either be a + string or a h5.File instance. If it is a filename, then HDF5 file + is created, a parameter group is created and data is written out + with data names being the parameter name. Else data written to Parameter + group in existing file object """ - if(isinstance(file, str)): + if isinstance(file, str): fexist = path.isfile(file) - if(fexist): + if fexist: fid = h5py.File(file, 'r+') else: fid = h5py.File(file, 'x') - elif(isinstance(file, h5py.File)): + elif isinstance(file, h5py.File): fid = file else: raise RuntimeError( 'Parameters: dump_hdf5 Pass in a \ - filename string or h5py.File object') + filename string or h5py.File object' + ) - if("/Parameters" in fid): - del(fid["Parameters"]) + if "/Parameters" in fid: + del fid["Parameters"] gid_top = fid.create_group("Parameters") for p in self: @@ -129,27 +119,27 @@ def dump_hdf5(self, file): gid = gid_top.create_group(p) # write the value, lower and upper bounds and vary status - did = gid.create_dataset("value", (1, ), dtype=np.float64) + did = gid.create_dataset("value", (1,), dtype=np.float64) did.write_direct(np.array(param.value, dtype=np.float64)) - did = gid.create_dataset("lb", (1, ), dtype=np.float64) + did = gid.create_dataset("lb", (1,), dtype=np.float64) did.write_direct(np.array(param.lb, dtype=np.float64)) - did = gid.create_dataset("ub", (1, ), dtype=np.float64) + did = gid.create_dataset("ub", (1,), dtype=np.float64) did.write_direct(np.array(param.ub, dtype=np.float64)) - did = gid.create_dataset("vary", (1, ), dtype=bool) + did = gid.create_dataset("vary", (1,), dtype=bool) did.write_direct(np.array(param.vary, dtype=bool)) def __getitem__(self, key): - if(key in self.param_dict.keys()): + if key in self.param_dict.keys(): return self.param_dict[key] else: raise ValueError(f'variable with name {key} not found') def __setitem__(self, key, parm_cls): - if(isinstance(parm_cls, Parameter)): + if isinstance(parm_cls, Parameter): self.param_dict[key] = parm_cls else: raise ValueError('input not a Parameter class') @@ -159,7 +149,7 @@ def __iter__(self): return self def __next__(self): - if(self.n < len(self.param_dict.keys())): + if self.n < len(self.param_dict.keys()): res = list(self.param_dict.keys())[self.n] self.n += 1 return res @@ -169,7 +159,7 @@ def __next__(self): def __str__(self): retstr = 'Parameters{\n' for k in self.param_dict.keys(): - retstr += self[k].__str__()+'\n' + retstr += self[k].__str__() + '\n' retstr += '}' return retstr @@ -189,12 +179,9 @@ class Parameter: ================================================================================= """ - def __init__(self, - name=None, - vary=False, - value=0.0, - lb=-np.Inf, - ub=np.Inf): + def __init__( + self, name=None, vary=False, value=0.0, lb=-np.Inf, ub=np.Inf + ): self.name = name self.vary = vary @@ -203,10 +190,11 @@ def __init__(self, self.ub = ub def __str__(self): + # fmt: off retstr = '< Parameter \''+self.name+'\'; value : ' + \ str(self.value)+'; bounds : ['+str(self.lb)+',' + \ str(self.ub)+' ]; vary :'+str(self.vary)+' >' - + # fmt: on return retstr @property @@ -215,7 +203,7 @@ def name(self): @name.setter def name(self, name): - if(isinstance(name, str)): + if isinstance(name, str): self._name = name @property @@ -252,5 +240,5 @@ def vary(self): @vary.setter def vary(self, vary): - if(isinstance(vary, (bool, np.bool_))): + if isinstance(vary, (bool, np.bool_)): self._vary = vary diff --git a/hexrd/powder/wppf/phase.py b/hexrd/powder/wppf/phase.py index ed1c93840..238a03dfd 100644 --- a/hexrd/powder/wppf/phase.py +++ b/hexrd/powder/wppf/phase.py @@ -6,11 +6,18 @@ from hexrd.core.material import Material from hexrd.core.material.unitcell import _rqpDict from hexrd.powder.wppf import wppfsupport -from hexrd.powder.wppf.xtal import _calc_dspacing, _get_tth, _calcxrsf, _calc_extinction_factor, _calc_absorption_factor +from hexrd.powder.wppf.xtal import ( + _calc_dspacing, + _get_tth, + _calcxrsf, + _calc_extinction_factor, + _calc_absorption_factor, +) import h5py import importlib.resources import hexrd.core.resources + class Material_LeBail: """ ======================================================================================== @@ -29,34 +36,35 @@ class Material_LeBail: ========================================================================================= """ - def __init__(self, - fhdf=None, - xtal=None, - dmin=None, - material_obj=None): + def __init__(self, fhdf=None, xtal=None, dmin=None, material_obj=None): - if(material_obj is None): + if material_obj is None: self.dmin = dmin.value self._readHDF(fhdf, xtal) self._calcrmt() self.sf_and_twin_probability() - _, self.SYM_PG_d, self.SYM_PG_d_laue, \ - self.centrosymmetric, self.symmorphic = \ - symmetry.GenerateSGSym(self.sgnum, self.sgsetting) + ( + _, + self.SYM_PG_d, + self.SYM_PG_d_laue, + self.centrosymmetric, + self.symmorphic, + ) = symmetry.GenerateSGSym(self.sgnum, self.sgsetting) self.latticeType = symmetry.latticeType(self.sgnum) - self.sg_hmsymbol = symbols.pstr_spacegroup[self.sgnum-1].strip() + self.sg_hmsymbol = symbols.pstr_spacegroup[self.sgnum - 1].strip() self.GenerateRecipPGSym() self.CalcMaxGIndex() self._calchkls() self.sg = SpaceGroup(self.sgnum) else: - if(isinstance(material_obj, Material)): + if isinstance(material_obj, Material): self._init_from_materials(material_obj) else: raise ValueError( "Invalid material_obj argument. \ - only Material class can be passed here.") + only Material class can be passed here." + ) self._shkl = np.zeros((15,)) def _readHDF(self, fhdf, xtal): @@ -65,18 +73,20 @@ def _readHDF(self, fhdf, xtal): # if(fexist): fid = h5py.File(fhdf, 'r') name = xtal - xtal = "/"+xtal + xtal = "/" + xtal if xtal not in fid: - raise IOError('crystal doesn''t exist in material file.') + raise IOError('crystal doesn' 't exist in material file.') # else: # raise IOError('material file does not exist.') gid = fid.get(xtal) - self.sgnum = np.asscalar(np.array(gid.get('SpaceGroupNumber'), - dtype=np.int32)) - self.sgsetting = np.asscalar(np.array(gid.get('SpaceGroupSetting'), - dtype=np.int32)) + self.sgnum = np.asscalar( + np.array(gid.get('SpaceGroupNumber'), dtype=np.int32) + ) + self.sgsetting = np.asscalar( + np.array(gid.get('SpaceGroupSetting'), dtype=np.int32) + ) """ IMPORTANT NOTE: note that the latice parameters is nm by default @@ -104,9 +114,9 @@ def _init_from_materials(self, material_obj): self.sg = SpaceGroup(self.sgnum) self.sf_and_twin_probability() - if(material_obj.latticeParameters[0].unit == 'nm'): + if material_obj.latticeParameters[0].unit == 'nm': self.lparms = [x.value for x in material_obj.latticeParameters] - elif(material_obj.latticeParameters[0].unit == 'angstrom'): + elif material_obj.latticeParameters[0].unit == 'angstrom': lparms = [x.value for x in material_obj.latticeParameters] for i in range(3): lparms[i] /= 10.0 @@ -136,7 +146,6 @@ def _init_from_materials(self, material_obj): self.hkls = material_obj.planeData.getHKLs() def _calcrmt(self): - """ O7/01/2021 SS ADDED DIRECT AND RECIPROCAL STRUCTURE MATRIX AS FIELDS IN THE CLASS @@ -160,12 +169,16 @@ def _calcrmt(self): """ direct metric tensor """ - self.dmt = np.array([[a**2, a*b*cg, a*c*cb], - [a*b*cg, b**2, b*c*ca], - [a*c*cb, b*c*ca, c**2]]) + self.dmt = np.array( + [ + [a**2, a * b * cg, a * c * cb], + [a * b * cg, b**2, b * c * ca], + [a * c * cb, b * c * ca, c**2], + ] + ) self.vol = np.sqrt(np.linalg.det(self.dmt)) - if(self.vol < 1e-5): + if self.vol < 1e-5: warnings.warn('unitcell volume is suspiciously small') """ @@ -176,18 +189,28 @@ def _calcrmt(self): """ direct structure matrix """ - self.dsm = np.array([[a, b*cg, c*cb], - [0., b*sg, -c*(cb*cg - ca)/sg], - [0., 0., self.vol/(a*b*sg)]]) + self.dsm = np.array( + [ + [a, b * cg, c * cb], + [0.0, b * sg, -c * (cb * cg - ca) / sg], + [0.0, 0.0, self.vol / (a * b * sg)], + ] + ) """ reciprocal structure matrix """ - self.rsm = np.array([[1./a, 0., 0.], - [-1./(a*tg), 1./(b*sg), 0.], - [b*c*(cg*ca - cb)/(self.vol*sg), - a*c*(cb*cg - ca)/(self.vol*sg), - a*b*sg/self.vol]]) + self.rsm = np.array( + [ + [1.0 / a, 0.0, 0.0], + [-1.0 / (a * tg), 1.0 / (b * sg), 0.0], + [ + b * c * (cg * ca - cb) / (self.vol * sg), + a * c * (cb * cg - ca) / (self.vol * sg), + a * b * sg / self.vol, + ], + ] + ) def _calchkls(self): self.hkls = self.getHKLs(self.dmin) @@ -196,11 +219,11 @@ def _calchkls(self): def CalcLength(self, u, space): - if(space == 'd'): + if space == 'd': vlen = np.sqrt(np.dot(u, np.dot(self.dmt, u))) - elif(space == 'r'): + elif space == 'r': vlen = np.sqrt(np.dot(u, np.dot(self.rmt, u))) - elif(spec == 'c'): + elif spec == 'c': vlen = np.linalg.norm(u) else: raise ValueError('incorrect space argument') @@ -208,11 +231,11 @@ def CalcLength(self, u, space): return vlen def CalcDot(self, u, v, space): - if(space == 'd'): + if space == 'd': dot = np.dot(u, np.dot(self.dmt, v)) - elif(space == 'r'): + elif space == 'r': dot = np.dot(u, np.dot(self.rmt, v)) - elif(space == 'c'): + elif space == 'c': dot = np.dot(u, v) else: raise ValueError('space is unidentified') @@ -221,12 +244,11 @@ def CalcDot(self, u, v, space): def getTTh(self, wavelength): tth = [] - self.dsp = _calc_dspacing(self.rmt.astype(np.float64), - self.hkls.astype(np.float64)) - tth, wavelength_allowed_hkls = \ - _get_tth(self.dsp, wavelength) - self.wavelength_allowed_hkls = \ - wavelength_allowed_hkls.astype(bool) + self.dsp = _calc_dspacing( + self.rmt.astype(np.float64), self.hkls.astype(np.float64) + ) + tth, wavelength_allowed_hkls = _get_tth(self.dsp, wavelength) + self.wavelength_allowed_hkls = wavelength_allowed_hkls.astype(bool) return tth def get_sf_hkl_factors(self): @@ -244,35 +266,35 @@ def get_sf_hkl_factors(self): """ if self.sgnum == 225: hkls = self.hkls.astype(np.float64) - H2 = np.sum(hkls**2,axis=1) + H2 = np.sum(hkls**2, axis=1) sf_affected = [] multiplicity = [] Lfact = [] Lfact_broadening = [] for g in hkls: gsym = self.CalcStar(g, 'r') - L0 = np.sum(gsym,axis=1) + L0 = np.sum(gsym, axis=1) sign = np.mod(L0, 3) sign[sign == 2] = -1 multiplicity.append(gsym.shape[0]) - Lfact.append(np.sum(L0*sign)) - Lfact_broadening.append(np.sum(np.abs(L0*sign))) + Lfact.append(np.sum(L0 * sign)) + Lfact_broadening.append(np.sum(np.abs(L0 * sign))) Lfact = np.array(Lfact) multiplicity = np.array(multiplicity) Lfact_broadening = np.array(Lfact_broadening) - Lfact_broadening = Lfact_broadening/(np.sqrt(H2)*multiplicity) - sf_f = (90.*np.sqrt(3)/np.pi**2)*Lfact/(H2*multiplicity) + Lfact_broadening = Lfact_broadening / (np.sqrt(H2) * multiplicity) + sf_f = (90.0 * np.sqrt(3) / np.pi**2) * Lfact / (H2 * multiplicity) return sf_f, Lfact_broadening else: return None, None def sf_and_twin_probability(self): self.sf_alpha = None - self.twin_beta = None + self.twin_beta = None if self.sgnum == 225: self.sf_alpha = 0.0 - self.twin_beta = 0.0 + self.twin_beta = 0.0 def GenerateRecipPGSym(self): @@ -298,19 +320,25 @@ def GenerateRecipPGSym(self): def CalcMaxGIndex(self): self.ih = 1 - while (1.0 / self.CalcLength( - np.array([self.ih, 0, 0], dtype=np.float64), 'r') - > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([self.ih, 0, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ih = self.ih + 1 self.ik = 1 - while (1.0 / self.CalcLength( - np.array([0, self.ik, 0], dtype=np.float64), 'r') - > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([0, self.ik, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ik = self.ik + 1 self.il = 1 - while (1.0 / self.CalcLength( - np.array([0, 0, self.il], dtype=np.float64), 'r') - > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([0, 0, self.il], dtype=np.float64), 'r') + > self.dmin + ): self.il = self.il + 1 def CalcStar(self, v, space, applyLaue=False): @@ -318,13 +346,13 @@ def CalcStar(self, v, space, applyLaue=False): this function calculates the symmetrically equivalent hkls (or uvws) for the reciprocal (or direct) point group symmetry. """ - if(space == 'd'): - if(applyLaue): + if space == 'd': + if applyLaue: sym = self.SYM_PG_d_laue else: sym = self.SYM_PG_d - elif(space == 'r'): - if(applyLaue): + elif space == 'r': + if applyLaue: sym = self.SYM_PG_r_laue else: sym = self.SYM_PG_r @@ -336,32 +364,32 @@ def CalcStar(self, v, space, applyLaue=False): # check if this is new isnew = True for vec in vsym: - if(np.sum(np.abs(vp - vec)) < 1E-4): + if np.sum(np.abs(vp - vec)) < 1e-4: isnew = False break - if(isnew): + if isnew: vsym = np.vstack((vsym, vp)) return vsym def removeinversion(self, ksym): """ - this function chooses a subset from a list + this function chooses a subset from a list of symmetrically equivalent reflections such that there are no g and -g present. """ klist = [] for i in range(ksym.shape[0]): - k = ksym[i,:] + k = ksym[i, :] kk = list(k) nkk = list(-k) if not klist: - if(np.sum(k) > np.sum(-k)): + if np.sum(k) > np.sum(-k): klist.append(kk) else: klist.append(nkk) else: - if ( (kk in klist) or (nkk in klist) ): + if (kk in klist) or (nkk in klist): pass else: klist.append(kk) @@ -378,9 +406,9 @@ def ChooseSymmetric(self, hkllist, InversionSymmetry=True): mask = np.ones(hkllist.shape[0], dtype=bool) laue = InversionSymmetry for i, g in enumerate(hkllist): - if(mask[i]): + if mask[i]: geqv = self.CalcStar(g, 'r', applyLaue=laue) - for r in geqv[1:, ]: + for r in geqv[1:,]: rid = np.where(np.all(r == hkllist, axis=1)) mask[rid] = False hkl = hkllist[mask, :].astype(np.int32) @@ -403,8 +431,14 @@ def SortHKL(self, hkllist): for g in hkllist: glen.append(np.round(self.CalcLength(g, 'r'), 8)) # glen = np.atleast_2d(np.array(glen,dtype=float)).T - dtype = [('glen', float), ('max', int), ('sum', int), - ('h', int), ('k', int), ('l', int)] + dtype = [ + ('glen', float), + ('max', int), + ('sum', int), + ('h', int), + ('k', int), + ('l', int), + ] a = [] for i, gl in enumerate(glen): g = hkllist[i, :] @@ -425,24 +459,29 @@ def getHKLs(self, dmin): are sampled for unique hkls. By convention we will ignore all l < 0 """ - hmin = -self.ih-1 + hmin = -self.ih - 1 hmax = self.ih - kmin = -self.ik-1 + kmin = -self.ik - 1 kmax = self.ik lmin = -1 lmax = self.il - hkllist = np.array([[ih, ik, il] for ih in np.arange(hmax, hmin, -1) - for ik in np.arange(kmax, kmin, -1) - for il in np.arange(lmax, lmin, -1)]) + hkllist = np.array( + [ + [ih, ik, il] + for ih in np.arange(hmax, hmin, -1) + for ik in np.arange(kmax, kmin, -1) + for il in np.arange(lmax, lmin, -1) + ] + ) hkl_allowed = Allowed_HKLs(self.sgnum, hkllist) hkl = [] dsp = [] hkl_dsp = [] for g in hkl_allowed: # ignore [0 0 0] as it is the direct beam - if(np.sum(np.abs(g)) != 0): - dspace = 1./self.CalcLength(g, 'r') - if(dspace >= dmin): + if np.sum(np.abs(g)) != 0: + dspace = 1.0 / self.CalcLength(g, 'r') + if dspace >= dmin: hkl_dsp.append(g) """ we now have a list of g vectors which are all within dmin range @@ -476,13 +515,13 @@ def shkl(self, val): """ set the shkl as array """ - if(len(val) != 15): - msg = (f"incorrect shape for shkl. " - f"shape should be (15, ).") + if len(val) != 15: + msg = f"incorrect shape for shkl. " f"shape should be (15, )." raise ValueError(msg) self._shkl = val + class Phases_LeBail: """ ======================================================================================== @@ -497,18 +536,23 @@ class Phases_LeBail: ========================================================================================= ========================================================================================= """ + def _kev(x): return valWUnit('beamenergy', 'energy', x, 'keV') def _nm(x): return valWUnit('lp', 'length', x, 'nm') - def __init__(self, material_file=None, - material_keys=None, - dmin=_nm(0.05), - wavelength={'alpha1': [_nm(0.15406), 1.0], - 'alpha2': [_nm(0.154443), 0.52]} - ): + def __init__( + self, + material_file=None, + material_keys=None, + dmin=_nm(0.05), + wavelength={ + 'alpha1': [_nm(0.15406), 1.0], + 'alpha2': [_nm(0.154443), 0.52], + }, + ): self.phase_dict = {} self.num_phases = 0 @@ -519,16 +563,18 @@ def __init__(self, material_file=None, """ wavelength_nm = {} for k, v in wavelength.items(): - wavelength_nm[k] = [valWUnit('lp', 'length', - v[0].getVal('nm'), 'nm'), v[1]] + wavelength_nm[k] = [ + valWUnit('lp', 'length', v[0].getVal('nm'), 'nm'), + v[1], + ] self.wavelength = wavelength_nm self.dmin = dmin - if(material_file is not None): - if(material_keys is not None): - if(type(material_keys) is not list): + if material_file is not None: + if material_keys is not None: + if type(material_keys) is not list: self.add(material_file, material_keys) else: self.add_many(material_file, material_keys) @@ -536,21 +582,23 @@ def __init__(self, material_file=None, def __str__(self): resstr = 'Phases in calculation:\n' for i, k in enumerate(self.phase_dict.keys()): - resstr += '\t'+str(i+1)+'. '+k+'\n' + resstr += '\t' + str(i + 1) + '. ' + k + '\n' return resstr def __getitem__(self, key): - if(key in self.phase_dict.keys()): + if key in self.phase_dict.keys(): return self.phase_dict[key] else: raise ValueError('phase with name not found') def __setitem__(self, key, mat_cls): - if(key in self.phase_dict.keys()): - warnings.warn('phase already in parameter \ - list. overwriting ...') - if(isinstance(mat_cls, Material_LeBail)): + if key in self.phase_dict.keys(): + warnings.warn( + 'phase already in parameter \ + list. overwriting ...' + ) + if isinstance(mat_cls, Material_LeBail): self.phase_dict[key] = mat_cls else: raise ValueError('input not a material class') @@ -560,7 +608,7 @@ def __iter__(self): return self def __next__(self): - if(self.n < len(self.phase_dict.keys())): + if self.n < len(self.phase_dict.keys()): res = list(self.phase_dict.keys())[self.n] self.n += 1 return res @@ -573,28 +621,30 @@ def __len__(self): def add(self, material_file, material_key): self[material_key] = Material_LeBail( - fhdf=material_file, xtal=material_key, dmin=self.dmin) + fhdf=material_file, xtal=material_key, dmin=self.dmin + ) def add_many(self, material_file, material_keys): for k in material_keys: self[k] = Material_LeBail( - fhdf=material_file, xtal=k, dmin=self.dmin) + fhdf=material_file, xtal=k, dmin=self.dmin + ) self.num_phases += 1 for k in self: - self[k].pf = 1.0/len(self) + self[k].pf = 1.0 / len(self) self.material_file = material_file self.material_keys = material_keys def load(self, fname): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 06/08/2020 SS 1.0 original - >> @DETAILS: load parameters from yaml file + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 06/08/2020 SS 1.0 original + >> @DETAILS: load parameters from yaml file """ with open(fname) as file: dic = yaml.load(file, Loader=yaml.FullLoader) @@ -605,9 +655,9 @@ def load(self, fname): def dump(self, fname): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 06/08/2020 SS 1.0 original - >> @DETAILS: dump parameters to yaml file + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 06/08/2020 SS 1.0 original + >> @DETAILS: dump parameters to yaml file """ dic = {} k = self.material_file @@ -623,23 +673,24 @@ def dump_hdf5(self, file): >> @ DETAILS dumps the information from each material in the phase class to a hdf5 file specified by filename or h5py.File object """ - if(isinstance(file, str)): + if isinstance(file, str): fexist = path.isfile(file) - if(fexist): + if fexist: fid = h5py.File(file, 'r+') else: fid = h5py.File(file, 'x') - elif(isinstance(file, h5py.File)): + elif isinstance(file, h5py.File): fid = file else: raise RuntimeError( 'Parameters: dump_hdf5 Pass in a filename \ - string or h5py.File object') + string or h5py.File object' + ) - if("/Phases" in fid): - del(fid["Phases"]) + if "/Phases" in fid: + del fid["Phases"] gid_top = fid.create_group("Phases") for p in self: @@ -653,21 +704,22 @@ def dump_hdf5(self, file): gid = gid_top.create_group(p) - did = gid.create_dataset("SpaceGroupNumber", (1, ), dtype=np.int32) + did = gid.create_dataset("SpaceGroupNumber", (1,), dtype=np.int32) did.write_direct(np.array(sgnum, dtype=np.int32)) - did = gid.create_dataset( - "SpaceGroupSetting", (1, ), dtype=np.int32) + did = gid.create_dataset("SpaceGroupSetting", (1,), dtype=np.int32) did.write_direct(np.array(sgsetting, dtype=np.int32)) did = gid.create_dataset( - "LatticeParameters", (6, ), dtype=np.float64) + "LatticeParameters", (6,), dtype=np.float64 + ) did.write_direct(np.array(lparms, dtype=np.float64)) - did = gid.create_dataset("dmin", (1, ), dtype=np.float64) + did = gid.create_dataset("dmin", (1,), dtype=np.float64) did.attrs["units"] = "nm" did.write_direct(np.array(dmin, dtype=np.float64)) + class Material_Rietveld: """ =========================================================================================== @@ -685,16 +737,13 @@ class Material_Rietveld: ========================================================================================== """ - def __init__(self, - fhdf=None, - xtal=None, - dmin=None, - kev=None, - material_obj=None): + def __init__( + self, fhdf=None, xtal=None, dmin=None, kev=None, material_obj=None + ): self._shkl = np.zeros((15,)) self.abs_fact = 1e4 - if(material_obj is None): + if material_obj is None: """ dmin in nm """ @@ -707,14 +756,18 @@ def __init__(self, self._readHDF(fhdf, xtal) self._calcrmt() self.sf_and_twin_probability() - if(self.aniU): + if self.aniU: self.calcBetaij() - self.SYM_SG, self.SYM_PG_d, self.SYM_PG_d_laue, \ - self.centrosymmetric, self.symmorphic = \ - symmetry.GenerateSGSym(self.sgnum, self.sgsetting) + ( + self.SYM_SG, + self.SYM_PG_d, + self.SYM_PG_d_laue, + self.centrosymmetric, + self.symmorphic, + ) = symmetry.GenerateSGSym(self.sgnum, self.sgsetting) self.latticeType = symmetry.latticeType(self.sgnum) - self.sg_hmsymbol = symbols.pstr_spacegroup[self.sgnum-1].strip() + self.sg_hmsymbol = symbols.pstr_spacegroup[self.sgnum - 1].strip() self.GenerateRecipPGSym() self.CalcMaxGIndex() self._calchkls() @@ -724,22 +777,21 @@ def __init__(self, self.sg = SpaceGroup(self.sgnum) else: - if(isinstance(material_obj, Material)): + if isinstance(material_obj, Material): self._init_from_materials(material_obj) else: raise ValueError( "Invalid material_obj argument. \ - only Material class can be passed here.") + only Material class can be passed here." + ) def _init_from_materials(self, material_obj): - """ - - """ + """ """ # name self.name = material_obj.name # inverse of absorption length - self.abs_fact = 1e-4 * (1./material_obj.absorption_length) + self.abs_fact = 1e-4 * (1.0 / material_obj.absorption_length) # min d-spacing for sampling hkl self.dmin = material_obj.dmin @@ -763,12 +815,16 @@ def _init_from_materials(self, material_obj): self.sg_hmsymbol = material_obj.unitcell.sg_hmsymbol # lattice parameters - self.lparms = np.array([material_obj.unitcell.a, - material_obj.unitcell.b, - material_obj.unitcell.c, - material_obj.unitcell.alpha, - material_obj.unitcell.beta, - material_obj.unitcell.gamma]) + self.lparms = np.array( + [ + material_obj.unitcell.a, + material_obj.unitcell.b, + material_obj.unitcell.c, + material_obj.unitcell.alpha, + material_obj.unitcell.beta, + material_obj.unitcell.gamma, + ] + ) # asymmetric atomic positions self.atom_pos = material_obj.unitcell.atom_pos @@ -776,7 +832,7 @@ def _init_from_materials(self, material_obj): # Debye-Waller factors including anisotropic ones self.U = material_obj.unitcell.U self.aniU = False - if(self.U.ndim > 1): + if self.U.ndim > 1: self.aniU = True self.betaij = material_obj.unitcell.betaij @@ -828,18 +884,20 @@ def _readHDF(self, fhdf, xtal): # if(fexist): fid = h5py.File(fhdf, 'r') name = xtal - xtal = "/"+xtal + xtal = "/" + xtal if xtal not in fid: - raise IOError('crystal doesn''t exist in material file.') + raise IOError('crystal doesn' 't exist in material file.') # else: # raise IOError('material file does not exist.') gid = fid.get(xtal) - self.sgnum = np.asscalar(np.array(gid.get('SpaceGroupNumber'), - dtype=np.int32)) - self.sgsetting = np.asscalar(np.array(gid.get('SpaceGroupSetting'), - dtype=np.int32)) + self.sgnum = np.asscalar( + np.array(gid.get('SpaceGroupNumber'), dtype=np.int32) + ) + self.sgsetting = np.asscalar( + np.array(gid.get('SpaceGroupSetting'), dtype=np.int32) + ) """ IMPORTANT NOTE: note that the latice parameters in EMsoft is nm by default @@ -851,7 +909,8 @@ def _readHDF(self, fhdf, xtal): # the last field in this is already self.atom_pos = np.transpose( - np.array(gid.get('AtomData'), dtype=np.float64)) + np.array(gid.get('AtomData'), dtype=np.float64) + ) # the U factors are related to B by the relation B = 8pi^2 U self.U = np.transpose(np.array(gid.get('U'), dtype=np.float64)) @@ -868,26 +927,30 @@ def calcBetaij(self): self.betaij = np.zeros([3, 3, self.atom_ntype]) for i in range(self.U.shape[0]): U = self.U[i, :] - self.betaij[:, :, i] = np.array([[U[0], U[3], U[4]], - [U[3], U[1], U[5]], - [U[4], U[5], U[2]]]) + self.betaij[:, :, i] = np.array( + [[U[0], U[3], U[4]], [U[3], U[1], U[5]], [U[4], U[5], U[2]]] + ) - self.betaij[:, :, i] *= 2. * np.pi**2 * self.aij + self.betaij[:, :, i] *= 2.0 * np.pi**2 * self.aij def CalcWavelength(self): # wavelength in nm - self.wavelength = constants.cPlanck * \ - constants.cLight / \ - constants.cCharge / \ - self.voltage + self.wavelength = ( + constants.cPlanck + * constants.cLight + / constants.cCharge + / self.voltage + ) self.wavelength *= 1e9 # self.CalcAnomalous() def CalcKeV(self): - self.kev = constants.cPlanck * \ - constants.cLight / \ - constants.cCharge / \ - self.wavelength + self.kev = ( + constants.cPlanck + * constants.cLight + / constants.cCharge + / self.wavelength + ) self.kev *= 1e-3 @@ -915,12 +978,16 @@ def _calcrmt(self): """ direct metric tensor """ - self.dmt = np.array([[a**2, a*b*cg, a*c*cb], - [a*b*cg, b**2, b*c*ca], - [a*c*cb, b*c*ca, c**2]]) + self.dmt = np.array( + [ + [a**2, a * b * cg, a * c * cb], + [a * b * cg, b**2, b * c * ca], + [a * c * cb, b * c * ca, c**2], + ] + ) self.vol = np.sqrt(np.linalg.det(self.dmt)) - if(self.vol < 1e-5): + if self.vol < 1e-5: warnings.warn('unitcell volume is suspiciously small') """ @@ -931,25 +998,39 @@ def _calcrmt(self): """ direct structure matrix """ - self.dsm = np.array([[a, b*cg, c*cb], - [0., b*sg, -c*(cb*cg - ca)/sg], - [0., 0., self.vol/(a*b*sg)]]) + self.dsm = np.array( + [ + [a, b * cg, c * cb], + [0.0, b * sg, -c * (cb * cg - ca) / sg], + [0.0, 0.0, self.vol / (a * b * sg)], + ] + ) """ reciprocal structure matrix """ - self.rsm = np.array([[1./a, 0., 0.], - [-1./(a*tg), 1./(b*sg), 0.], - [b*c*(cg*ca - cb)/(self.vol*sg), - a*c*(cb*cg - ca)/(self.vol*sg), - a*b*sg/self.vol]]) + self.rsm = np.array( + [ + [1.0 / a, 0.0, 0.0], + [-1.0 / (a * tg), 1.0 / (b * sg), 0.0], + [ + b * c * (cg * ca - cb) / (self.vol * sg), + a * c * (cb * cg - ca) / (self.vol * sg), + a * b * sg / self.vol, + ], + ] + ) ast = self.CalcLength([1, 0, 0], 'r') bst = self.CalcLength([0, 1, 0], 'r') cst = self.CalcLength([0, 0, 1], 'r') - self.aij = np.array([[ast**2, ast*bst, ast*cst], - [bst*ast, bst**2, bst*cst], - [cst*ast, cst*bst, cst**2]]) + self.aij = np.array( + [ + [ast**2, ast * bst, ast * cst], + [bst * ast, bst**2, bst * cst], + [cst * ast, cst * bst, cst**2], + ] + ) def get_sf_hkl_factors(self): """ @@ -966,35 +1047,35 @@ def get_sf_hkl_factors(self): """ if self.sgnum == 225: hkls = self.hkls.astype(np.float64) - H2 = np.sum(hkls**2,axis=1) + H2 = np.sum(hkls**2, axis=1) sf_affected = [] multiplicity = [] Lfact = [] Lfact_broadening = [] for g in hkls: gsym = self.CalcStar(g, 'r') - L0 = np.sum(gsym,axis=1) + L0 = np.sum(gsym, axis=1) sign = np.mod(L0, 3) sign[sign == 2] = -1 multiplicity.append(gsym.shape[0]) - Lfact.append(np.sum(L0*sign)) + Lfact.append(np.sum(L0 * sign)) Lfact_broadening.append(np.sum(np.abs(L0))) Lfact = np.array(Lfact) multiplicity = np.array(multiplicity) Lfact_broadening = np.array(Lfact_broadening) - Lfact_broadening = (H2*multiplicity)/Lfact_broadening - sf_f = (90.*np.sqrt(3)/np.pi**2)*Lfact/(H2*multiplicity) + Lfact_broadening = (H2 * multiplicity) / Lfact_broadening + sf_f = (90.0 * np.sqrt(3) / np.pi**2) * Lfact / (H2 * multiplicity) return sf_f, Lfact_broadening else: return None, None def sf_and_twin_probability(self): self.sf_alpha = None - self.twin_beta = None + self.twin_beta = None if self.sgnum == 225: self.sf_alpha = 0.0 - self.twin_beta = 0.0 + self.twin_beta = 0.0 def _calchkls(self): self.hkls, self.multiplicity = self.getHKLs(self.dmin) @@ -1003,11 +1084,11 @@ def _calchkls(self): def CalcLength(self, u, space): - if(space == 'd'): + if space == 'd': vlen = np.sqrt(np.dot(u, np.dot(self.dmt, u))) - elif(space == 'r'): + elif space == 'r': vlen = np.sqrt(np.dot(u, np.dot(self.rmt, u))) - elif(spec == 'c'): + elif spec == 'c': vlen = np.linalg.norm(u) else: raise ValueError('incorrect space argument') @@ -1017,40 +1098,44 @@ def CalcLength(self, u, space): def getTTh(self, wavelength): tth = [] - self.dsp = _calc_dspacing(self.rmt.astype(np.float64), - self.hkls.astype(np.float64)) - tth, wavelength_allowed_hkls = \ - _get_tth(self.dsp, wavelength) + self.dsp = _calc_dspacing( + self.rmt.astype(np.float64), self.hkls.astype(np.float64) + ) + tth, wavelength_allowed_hkls = _get_tth(self.dsp, wavelength) self.wavelength_allowed_hkls = wavelength_allowed_hkls.astype(bool) return tth ''' transform between any crystal space to any other space. choices are 'd' (direct), 'r' (reciprocal) and 'c' (cartesian)''' + def TransSpace(self, v_in, inspace, outspace): - if(inspace == 'd'): - if(outspace == 'r'): + if inspace == 'd': + if outspace == 'r': v_out = np.dot(v_in, self.dmt) - elif(outspace == 'c'): + elif outspace == 'c': v_out = np.dot(self.dsm, v_in) else: raise ValueError( - 'inspace in ''d'' but outspace can''t be identified') - elif(inspace == 'r'): - if(outspace == 'd'): + 'inspace in ' 'd' ' but outspace can' 't be identified' + ) + elif inspace == 'r': + if outspace == 'd': v_out = np.dot(v_in, self.rmt) - elif(outspace == 'c'): + elif outspace == 'c': v_out = np.dot(self.rsm, v_in) else: raise ValueError( - 'inspace in ''r'' but outspace can''t be identified') - elif(inspace == 'c'): - if(outspace == 'r'): + 'inspace in ' 'r' ' but outspace can' 't be identified' + ) + elif inspace == 'c': + if outspace == 'r': v_out = np.dot(v_in, self.rsm) - elif(outspace == 'd'): + elif outspace == 'd': v_out = np.dot(v_in, self.dsm) else: raise ValueError( - 'inspace in ''c'' but outspace can''t be identified') + 'inspace in ' 'c' ' but outspace can' 't be identified' + ) else: raise ValueError('incorrect inspace argument') return v_out @@ -1079,19 +1164,25 @@ def GenerateRecipPGSym(self): def CalcMaxGIndex(self): self.ih = 1 - while (1.0 / self.CalcLength( - np.array([self.ih, 0, 0], dtype=np.float64), 'r') - > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([self.ih, 0, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ih = self.ih + 1 self.ik = 1 - while (1.0 / self.CalcLength( - np.array([0, self.ik, 0], dtype=np.float64), 'r') > - self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([0, self.ik, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ik = self.ik + 1 self.il = 1 - while (1.0 / self.CalcLength( - np.array([0, 0, self.il], dtype=np.float64), 'r') > - self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([0, 0, self.il], dtype=np.float64), 'r') + > self.dmin + ): self.il = self.il + 1 def CalcStar(self, v, space, applyLaue=False): @@ -1099,13 +1190,13 @@ def CalcStar(self, v, space, applyLaue=False): this function calculates the symmetrically equivalent hkls (or uvws) for the reciprocal (or direct) point group symmetry. """ - if(space == 'd'): - if(applyLaue): + if space == 'd': + if applyLaue: sym = self.SYM_PG_d_laue else: sym = self.SYM_PG_d - elif(space == 'r'): - if(applyLaue): + elif space == 'r': + if applyLaue: sym = self.SYM_PG_r_laue else: sym = self.SYM_PG_r @@ -1117,10 +1208,10 @@ def CalcStar(self, v, space, applyLaue=False): # check if this is new isnew = True for vec in vsym: - if(np.sum(np.abs(vp - vec)) < 1E-4): + if np.sum(np.abs(vp - vec)) < 1e-4: isnew = False break - if(isnew): + if isnew: vsym = np.vstack((vsym, vp)) return vsym @@ -1134,9 +1225,9 @@ def ChooseSymmetric(self, hkllist, InversionSymmetry=True): mask = np.ones(hkllist.shape[0], dtype=bool) laue = InversionSymmetry for i, g in enumerate(hkllist): - if(mask[i]): + if mask[i]: geqv = self.CalcStar(g, 'r', applyLaue=laue) - for r in geqv[1:, ]: + for r in geqv[1:,]: rid = np.where(np.all(r == hkllist, axis=1)) mask[rid] = False hkl = hkllist[mask, :].astype(np.int32) @@ -1159,8 +1250,14 @@ def SortHKL(self, hkllist): for g in hkllist: glen.append(np.round(self.CalcLength(g, 'r'), 8)) # glen = np.atleast_2d(np.array(glen,dtype=float)).T - dtype = [('glen', float), ('max', int), ('sum', int), - ('h', int), ('k', int), ('l', int)] + dtype = [ + ('glen', float), + ('max', int), + ('sum', int), + ('h', int), + ('k', int), + ('l', int), + ] a = [] for i, gl in enumerate(glen): g = hkllist[i, :] @@ -1181,24 +1278,29 @@ def getHKLs(self, dmin): are sampled for unique hkls. By convention we will ignore all l < 0 """ - hmin = -self.ih-1 + hmin = -self.ih - 1 hmax = self.ih - kmin = -self.ik-1 + kmin = -self.ik - 1 kmax = self.ik lmin = -1 lmax = self.il - hkllist = np.array([[ih, ik, il] for ih in np.arange(hmax, hmin, -1) - for ik in np.arange(kmax, kmin, -1) - for il in np.arange(lmax, lmin, -1)]) + hkllist = np.array( + [ + [ih, ik, il] + for ih in np.arange(hmax, hmin, -1) + for ik in np.arange(kmax, kmin, -1) + for il in np.arange(lmax, lmin, -1) + ] + ) hkl_allowed = Allowed_HKLs(self.sgnum, hkllist) hkl = [] dsp = [] hkl_dsp = [] for g in hkl_allowed: # ignore [0 0 0] as it is the direct beam - if(np.sum(np.abs(g)) != 0): - dspace = 1./self.CalcLength(g, 'r') - if(dspace >= dmin): + if np.sum(np.abs(g)) != 0: + dspace = 1.0 / self.CalcLength(g, 'r') + if dspace >= dmin: hkl_dsp.append(g) """ we now have a list of g vectors which are all within dmin range @@ -1239,7 +1341,7 @@ def CalcPositions(self): n = 1 r = self.atom_pos[i, 0:3] - r = np.hstack((r, 1.)) + r = np.hstack((r, 1.0)) asym_pos.append(np.broadcast_to(r[0:3], [1, 3])) @@ -1251,18 +1353,18 @@ def CalcPositions(self): # coordinates between 0-1 rr = rnew[0:3] rr = np.modf(rr)[0] - rr[rr < 0.] += 1. - rr[np.abs(rr) < 1.0E-6] = 0. + rr[rr < 0.0] += 1.0 + rr[np.abs(rr) < 1.0e-6] = 0.0 # check if this is new isnew = True for j in range(n): - if(np.sum(np.abs(rr - asym_pos[i][j, :])) < 1E-4): + if np.sum(np.abs(rr - asym_pos[i][j, :])) < 1e-4: isnew = False break # if its new add this to the list - if(isnew): + if isnew: asym_pos[i] = np.vstack((asym_pos[i], rr)) n += 1 @@ -1274,111 +1376,110 @@ def CalcPositions(self): def InitializeInterpTable(self): f_anomalous_data = [] - data = importlib.resources.open_binary(hexrd.core.resources, 'Anomalous.h5') + data = importlib.resources.open_binary( + hexrd.core.resources, 'Anomalous.h5' + ) with h5py.File(data, 'r') as fid: for i in range(0, self.atom_ntype): Z = self.atom_type[i] elem = constants.ptableinverse[Z] - gid = fid.get('/'+elem) + gid = fid.get('/' + elem) data = np.array(gid.get('data')) - data = data[:,[7,1,2]] + data = data[:, [7, 1, 2]] f_anomalous_data.append(data) n = max([x.shape[0] for x in f_anomalous_data]) - self.f_anomalous_data = np.zeros([self.atom_ntype,n,3]) - self.f_anomalous_data_sizes = np.zeros([self.atom_ntype,], - dtype=np.int32) + self.f_anomalous_data = np.zeros([self.atom_ntype, n, 3]) + self.f_anomalous_data_sizes = np.zeros( + [ + self.atom_ntype, + ], + dtype=np.int32, + ) for i in range(self.atom_ntype): nd = f_anomalous_data[i].shape[0] self.f_anomalous_data_sizes[i] = nd - self.f_anomalous_data[i,:nd,:] = f_anomalous_data[i] + self.f_anomalous_data[i, :nd, :] = f_anomalous_data[i] - def CalcXRSF(self, - wavelength, - w_int): + def CalcXRSF(self, wavelength, w_int): """ the 1E-2 is to convert to A^-2 since the fitting is done in those units """ - fNT = np.zeros([self.atom_ntype,]) - frel = np.zeros([self.atom_ntype,]) - scatfac = np.zeros([self.atom_ntype,11]) + fNT = np.zeros([self.atom_ntype]) + frel = np.zeros([self.atom_ntype]) + scatfac = np.zeros([self.atom_ntype, 11]) f_anomalous_data = self.f_anomalous_data aniU = self.aniU - occ = self.atom_pos[:,3] + occ = self.atom_pos[:, 3] if aniU: betaij = self.betaij else: betaij = self.U - self.numat = np.zeros(self.atom_ntype,dtype=np.int32) + self.numat = np.zeros(self.atom_ntype, dtype=np.int32) for i in range(0, self.atom_ntype): self.numat[i] = self.asym_pos[i].shape[0] Z = self.atom_type[i] elem = constants.ptableinverse[Z] - scatfac[i,:] = constants.scatfac[elem] + scatfac[i, :] = constants.scatfac[elem] frel[i] = constants.frel[elem] fNT[i] = constants.fNT[elem] - self.asym_pos_arr = np.zeros([self.numat.max(),self.atom_ntype, 3]) + self.asym_pos_arr = np.zeros([self.numat.max(), self.atom_ntype, 3]) for i in range(0, self.atom_ntype): nn = self.numat[i] - self.asym_pos_arr[:nn,i,:] = self.asym_pos[i] + self.asym_pos_arr[:nn, i, :] = self.asym_pos[i] nref = self.hkls.shape[0] - sf, sf_raw = _calcxrsf(self.hkls.astype(np.float64), - nref, - self.multiplicity, - w_int, - wavelength, - self.rmt.astype(np.float64), - self.atom_type, - self.atom_ntype, - betaij, - occ, - self.asym_pos_arr, - self.numat, - scatfac, - fNT, - frel, - f_anomalous_data, - self.f_anomalous_data_sizes) + sf, sf_raw = _calcxrsf( + self.hkls.astype(np.float64), + nref, + self.multiplicity, + w_int, + wavelength, + self.rmt.astype(np.float64), + self.atom_type, + self.atom_ntype, + betaij, + occ, + self.asym_pos_arr, + self.numat, + scatfac, + fNT, + frel, + f_anomalous_data, + self.f_anomalous_data_sizes, + ) return sf, sf_raw - def calc_extinction(self, - wavelength, - tth, - f_sqr, - shape_factor_K, - particle_size_D): + def calc_extinction( + self, wavelength, tth, f_sqr, shape_factor_K, particle_size_D + ): hkls = self.hkls v_unitcell = self.vol - extinction = _calc_extinction_factor(hkls, - tth, - v_unitcell*1e3, - wavelength, - f_sqr, - shape_factor_K, - particle_size_D) + extinction = _calc_extinction_factor( + hkls, + tth, + v_unitcell * 1e3, + wavelength, + f_sqr, + shape_factor_K, + particle_size_D, + ) return extinction - def calc_absorption(self, - tth, - phi, - wavelength): + def calc_absorption(self, tth, phi, wavelength): abs_fact = self.abs_fact - absorption = _calc_absorption_factor(abs_fact, - tth, - phi, - wavelength) + absorption = _calc_absorption_factor(abs_fact, tth, phi, wavelength) return absorption @@ -1394,13 +1495,13 @@ def shkl(self, val): """ set the shkl as array """ - if(len(val) != 15): - msg = (f"incorrect shape for shkl. " - f"shape should be (15, ).") + if len(val) != 15: + msg = f"incorrect shape for shkl. " f"shape should be (15, )." raise ValueError(msg) self._shkl = val + class Phases_Rietveld: """ ============================================================================================== @@ -1415,18 +1516,23 @@ class Phases_Rietveld: ============================================================================================== ============================================================================================= """ + def _kev(x): return valWUnit('beamenergy', 'energy', x, 'keV') def _nm(x): return valWUnit('lp', 'length', x, 'nm') - def __init__(self, material_file=None, - material_keys=None, - dmin=_nm(0.05), - wavelength={'alpha1': [_nm(0.15406), 1.], 'alpha2': [ - _nm(0.154443), 0.52]} - ): + def __init__( + self, + material_file=None, + material_keys=None, + dmin=_nm(0.05), + wavelength={ + 'alpha1': [_nm(0.15406), 1.0], + 'alpha2': [_nm(0.154443), 0.52], + }, + ): self.phase_dict = {} self.num_phases = 0 @@ -1437,18 +1543,20 @@ def __init__(self, material_file=None, """ wavelength_nm = {} for k, v in wavelength.items(): - if(v[0].unit == 'angstrom'): + if v[0].unit == 'angstrom': wavelength_nm[k] = [ - valWUnit('lp', 'length', v[0].getVal("nm"), 'nm'), v[1]] + valWUnit('lp', 'length', v[0].getVal("nm"), 'nm'), + v[1], + ] else: wavelength_nm[k] = v self.wavelength = wavelength_nm self.dmin = dmin - if(material_file is not None): - if(material_keys is not None): - if(type(material_keys) is not list): + if material_file is not None: + if material_keys is not None: + if type(material_keys) is not list: self.add(material_file, material_keys) else: self.add_many(material_file, material_keys) @@ -1456,18 +1564,18 @@ def __init__(self, material_file=None, def __str__(self): resstr = 'Phases in calculation:\n' for i, k in enumerate(self.phase_dict.keys()): - resstr += '\t'+str(i+1)+'. '+k+'\n' + resstr += '\t' + str(i + 1) + '. ' + k + '\n' return resstr def __getitem__(self, key): - if(key in self.phase_dict.keys()): + if key in self.phase_dict.keys(): return self.phase_dict[key] else: raise ValueError('phase with name not found') def __setitem__(self, key, mat_cls): - if(key in self.phase_dict.keys()): + if key in self.phase_dict.keys(): warnings.warn('phase already in parameter list. overwriting ...') # if(isinstance(mat_cls, Material_Rietveld)): self.phase_dict[key] = mat_cls @@ -1479,7 +1587,7 @@ def __iter__(self): return self def __next__(self): - if(self.n < len(self.phase_dict.keys())): + if self.n < len(self.phase_dict.keys()): res = list(self.phase_dict.keys())[self.n] self.n += 1 return res @@ -1498,11 +1606,12 @@ def add(self, material_file, material_key): E *= 1e-3 kev = valWUnit('beamenergy', 'energy', E, 'keV') self[material_key][l] = Material_Rietveld( - material_file, material_key, dmin=self.dmin, kev=kev) + material_file, material_key, dmin=self.dmin, kev=kev + ) for k in self: for l in self.wavelength: - self[k][l].pf = 1.0/self.num_phases + self[k][l].pf = 1.0 / self.num_phases def add_many(self, material_file, material_keys): @@ -1511,25 +1620,30 @@ def add_many(self, material_file, material_keys): self.num_phases += 1 for l in self.wavelength: lam = self.wavelength[l][0].getVal('nm') * 1e-9 - E = constants.cPlanck * constants.cLight / \ - constants.cCharge / lam + E = ( + constants.cPlanck + * constants.cLight + / constants.cCharge + / lam + ) E *= 1e-3 kev = valWUnit('beamenergy', 'energy', E, 'keV') self[k][l] = Material_Rietveld( - material_file, k, dmin=self.dmin, kev=kev) + material_file, k, dmin=self.dmin, kev=kev + ) for k in self: for l in self.wavelength: - self[k][l].pf = 1.0/self.num_phases + self[k][l].pf = 1.0 / self.num_phases self.material_file = material_file self.material_keys = material_keys def load(self, fname): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 06/08/2020 SS 1.0 original - >> @DETAILS: load parameters from yaml file + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 06/08/2020 SS 1.0 original + >> @DETAILS: load parameters from yaml file """ with open(fname) as file: dic = yaml.load(file, Loader=yaml.FullLoader) @@ -1540,9 +1654,9 @@ def load(self, fname): def dump(self, fname): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 06/08/2020 SS 1.0 original - >> @DETAILS: dump parameters to yaml file + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 06/08/2020 SS 1.0 original + >> @DETAILS: dump parameters to yaml file """ dic = {} k = self.material_file @@ -1558,13 +1672,15 @@ def phase_fraction(self): l = list(self.wavelength.keys())[0] pf.append(self[k][l].pf) pf = np.array(pf) - return pf/np.sum(pf) + return pf / np.sum(pf) @phase_fraction.setter def phase_fraction(self, val): - msg = (f"phase_fraction setter: " - f"number of phases does not match" - f"size of input") + msg = ( + f"phase_fraction setter: " + f"number of phases does not match" + f"size of input" + ) if isinstance(val, list): if len(val) != len(self): @@ -1573,6 +1689,6 @@ def phase_fraction(self, val): if val.shape[0] != len(self): raise ValueError(msg) - for ii,k in enumerate(self): + for ii, k in enumerate(self): for l in self.wavelength: self[k][l].pf = val[ii] diff --git a/hexrd/powder/wppf/spectrum.py b/hexrd/powder/wppf/spectrum.py index c87960e01..84e702875 100644 --- a/hexrd/powder/wppf/spectrum.py +++ b/hexrd/powder/wppf/spectrum.py @@ -2,6 +2,7 @@ import h5py from os import path + class Spectrum: """ ================================================================================== @@ -18,11 +19,11 @@ class Spectrum: def __init__(self, x=None, y=None, name=''): if x is None: - self._x = np.linspace(10., 100., 500) + self._x = np.linspace(10.0, 100.0, 500) else: self._x = x if y is None: - self._y = np.log(self._x ** 2) - (self._x * 0.2) ** 2 + self._y = np.log(self._x**2) - (self._x * 0.2) ** 2 else: self._y = y self.name = name @@ -69,51 +70,51 @@ def rebin(self, bin_size): new_x = np.arange(x_min, x_max + 0.1 * bin_size, bin_size) bins = np.hstack((x_min - bin_size * 0.5, new_x + bin_size * 0.5)) - new_y = (np.histogram(x, bins, weights=y) - [0] / np.histogram(x, bins)[0]) + new_y = np.histogram(x, bins, weights=y)[0] / np.histogram(x, bins)[0] return Spectrum(new_x, new_y) def dump_hdf5(self, file, name): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 01/15/2021 SS 1.0 original - >> @DETAILS: dump the class to a hdf5 file. the file argument could either be a - string or a h5.File instance. If it is a filename, then HDF5 file - is created, a Spectrum group is created and data is written out. - Else data written to Spectrum group in existing file object - >> @PARAMS file file name string or h5py.File object - name name ID of the spectrum e.g. experimental or simulated or background + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 01/15/2021 SS 1.0 original + >> @DETAILS: dump the class to a hdf5 file. the file argument could either be a + string or a h5.File instance. If it is a filename, then HDF5 file + is created, a Spectrum group is created and data is written out. + Else data written to Spectrum group in existing file object + >> @PARAMS file file name string or h5py.File object + name name ID of the spectrum e.g. experimental or simulated or background """ - if(isinstance(file, str)): + if isinstance(file, str): fexist = path.isfile(file) - if(fexist): + if fexist: fid = h5py.File(file, 'r+') else: fid = h5py.File(file, 'x') - elif(isinstance(file, h5py.File)): + elif isinstance(file, h5py.File): fid = file else: raise RuntimeError( 'Parameters: dump_hdf5 Pass in a filename \ - string or h5py.File object') + string or h5py.File object' + ) - name_spectrum = 'Spectrum/'+name - if(name_spectrum in fid): - del(fid[name_spectrum]) + name_spectrum = 'Spectrum/' + name + if name_spectrum in fid: + del fid[name_spectrum] gid = fid.create_group(name_spectrum) tth, I = self.data # make sure these arrays are not zero sized - if(tth.shape[0] > 0): + if tth.shape[0] > 0: did = gid.create_dataset("tth", tth.shape, dtype=np.float64) did.write_direct(tth.astype(np.float64)) - if(I.shape[0] > 0): + if I.shape[0] > 0: did = gid.create_dataset("intensity", I.shape, dtype=np.float64) did.write_direct(I.astype(np.float64)) @@ -128,22 +129,22 @@ def data(self): f_bkg = interp1d(x_bkg, y_bkg, kind='linear') # find overlapping x and y values: - ind = np.where((self._x <= np.max(x_bkg)) & - (self._x >= np.min(x_bkg))) + ind = np.where( + (self._x <= np.max(x_bkg)) & (self._x >= np.min(x_bkg)) + ) x = self._x[ind] y = self._y[ind] if len(x) == 0: - """ if there is no overlapping between background - and Spectrum, raise an error """ + """if there is no overlapping between background + and Spectrum, raise an error""" raise BkgNotInRangeError(self.name) y = y * self._scaling + self.offset - f_bkg(x) else: - """ if Spectrum and bkg have the same - x basis we just delete y-y_bkg""" - x, y = self._x, self._y * \ - self._scaling + self.offset - y_bkg + """if Spectrum and bkg have the same + x basis we just delete y-y_bkg""" + x, y = self._x, self._y * self._scaling + self.offset - y_bkg else: x, y = self.original_data @@ -171,8 +172,7 @@ def data_array(self): @property def original_data(self): - return self._x, self._y * self._scaling +\ - self.offset + return self._x, self._y * self._scaling + self.offset @property def x(self): @@ -203,13 +203,15 @@ def scaling(self, value): def limit(self, x_min, x_max): x, y = self.data - return Spectrum(x[np.where((x_min < x) & (x < x_max))], - y[np.where((x_min < x) & (x < x_max))]) + return Spectrum( + x[np.where((x_min < x) & (x < x_max))], + y[np.where((x_min < x) & (x < x_max))], + ) def extend_to(self, x_value, y_value): """ - Extends the current Spectrum to a specific x_value by filling it - with the y_value. Does not modify inplace but returns a new filled + Extends the current Spectrum to a specific x_value by filling it + with the y_value. Does not modify inplace but returns a new filled Spectrum :param x_value: Point to which extend the Spectrum should be smaller than the lowest x-value in the Spectrum or vice versa @@ -220,15 +222,16 @@ def extend_to(self, x_value, y_value): x_min = np.min(self.x) x_max = np.max(self.x) if x_value < x_min: - x_fill = np.arange(x_min - x_step, x_value - - x_step*0.5, -x_step)[::-1] + x_fill = np.arange( + x_min - x_step, x_value - x_step * 0.5, -x_step + )[::-1] y_fill = np.zeros(x_fill.shape) y_fill.fill(y_value) new_x = np.concatenate((x_fill, self.x)) new_y = np.concatenate((y_fill, self.y)) elif x_value > x_max: - x_fill = np.arange(x_max + x_step, x_value+x_step*0.5, x_step) + x_fill = np.arange(x_max + x_step, x_value + x_step * 0.5, x_step) y_fill = np.zeros(x_fill.shape) y_fill.fill(y_value) @@ -241,6 +244,7 @@ def extend_to(self, x_value, y_value): def plot(self, show=False, *args, **kwargs): import matplotlib.pyplot as plt + plt.plot(self.x, self.y, *args, **kwargs) if show: plt.show() @@ -265,8 +269,9 @@ def __sub__(self, other): other_fcn = interp1d(other_x, other_x, kind='linear') # find overlapping x and y values: - ind = np.where((orig_x <= np.max(other_x)) & - (orig_x >= np.min(other_x))) + ind = np.where( + (orig_x <= np.max(other_x)) & (orig_x >= np.min(other_x)) + ) x = orig_x[ind] y = orig_y[ind] @@ -287,8 +292,9 @@ def __add__(self, other): other_fcn = interp1d(other_x, other_x, kind='linear') # find overlapping x and y values: - ind = np.where((orig_x <= np.max(other_x)) & - (orig_x >= np.min(other_x))) + ind = np.where( + (orig_x <= np.max(other_x)) & (orig_x >= np.min(other_x)) + ) x = orig_x[ind] y = orig_y[ind] diff --git a/hexrd/powder/wppf/texture.py b/hexrd/powder/wppf/texture.py index 88d5f8061..c7f25e484 100644 --- a/hexrd/powder/wppf/texture.py +++ b/hexrd/powder/wppf/texture.py @@ -16,6 +16,7 @@ # HEXRD imports import hexrd.core.resources + # FIXME: unused imports @saransh13? # from hexrd.core.transforms.xfcapi import angles_to_gvec # from hexrd.powder.wppf import phase @@ -42,13 +43,14 @@ # FIXME: these are available in hexrd.core.constants @saransh13 I3 = np.eye(3) -Xl = np.ascontiguousarray(I3[:, 0].reshape(3, 1)) # X in the lab frame -Yl = np.ascontiguousarray(I3[:, 1].reshape(3, 1)) # Z in the lab frame -Zl = np.ascontiguousarray(I3[:, 2].reshape(3, 1)) # Z in the lab frame +Xl = np.ascontiguousarray(I3[:, 0].reshape(3, 1)) # X in the lab frame +Yl = np.ascontiguousarray(I3[:, 1].reshape(3, 1)) # Z in the lab frame +Zl = np.ascontiguousarray(I3[:, 2].reshape(3, 1)) # Z in the lab frame bVec_ref = -Zl eta_ref = Xl + class mesh_s2: """ this class deals with the basic functions of the s2 mesh. the @@ -58,10 +60,12 @@ class is initialized just based on the symmetry. the main functions is the main class used for computing the general axis distribution function. """ - def __init__(self, - symmetry): - data = importlib.resources.open_binary(hexrd.core.resources, "surface_harmonics.h5") + def __init__(self, symmetry): + + data = importlib.resources.open_binary( + hexrd.core.resources, "surface_harmonics.h5" + ) with h5py.File(data, 'r') as fid: gname = f"{symmetry}" @@ -80,15 +84,16 @@ def __init__(self, pts = np.array(fid[dname]) n = np.linalg.norm(pts, axis=1) - self.points = pts/np.tile(n, [3,1]).T + self.points = pts / np.tile(n, [3, 1]).T - points_st = self.points[:,:2]/np.tile( - (1.+np.abs(self.points[:,2])),[2,1]).T + points_st = ( + self.points[:, :2] + / np.tile((1.0 + np.abs(self.points[:, 2])), [2, 1]).T + ) self.mesh = Delaunay(points_st, qhull_options="QJ") - def _get_simplices(self, - points): + def _get_simplices(self, points): """ this function is used to get the index of the simplex in which the point lies. this is the first step to @@ -97,10 +102,11 @@ def _get_simplices(self, """ n = np.linalg.norm(points, axis=1) - points = points/np.tile(n, [3,1]).T + points = points / np.tile(n, [3, 1]).T - points_st = points[:,:2]/np.tile( - (1.+np.abs(points[:,2])),[2,1]).T + points_st = ( + points[:, :2] / np.tile((1.0 + np.abs(points[:, 2])), [2, 1]).T + ) simplices = self.mesh.find_simplex(points_st) @@ -110,19 +116,19 @@ def _get_simplices(self, by a number close to one to bring them closer to the origin. the program should ideally never get here """ - mask = (simplices == -1) - simplices[mask] = self.mesh.find_simplex(points_st[mask,:]*0.995) + mask = simplices == -1 + simplices[mask] = self.mesh.find_simplex(points_st[mask, :] * 0.995) if -1 in simplices: - msg = (f"some points seem to not be in the " - f"mesh. please check input") + msg = ( + f"some points seem to not be in the " + f"mesh. please check input" + ) raise RuntimeError(msg) return simplices - def _get_barycentric_coordinates(self, - points): - + def _get_barycentric_coordinates(self, points): """ get the barycentric coordinates of points this is used for linear interpolation of @@ -134,10 +140,9 @@ def _get_barycentric_coordinates(self, to unit length then take the stereographic projection """ n = np.linalg.norm(points, axis=1) - points = points/np.tile(n, [3,1]).T + points = points / np.tile(n, [3, 1]).T - points_st = points[:,:2]/np.tile( - (1.+points[:,2]),[2,1]).T + points_st = points[:, :2] / np.tile((1.0 + points[:, 2]), [2, 1]).T """ next get the simplices. a value of -1 is returned @@ -153,25 +158,31 @@ def _get_barycentric_coordinates(self, to compute T^{-1}(r-r3) see wikipedia for more details """ - bary_center = [self.mesh.transform[simplices[i],:2].dot( - (np.transpose(points_st[i,:] - - self.mesh.transform[simplices[i],2]))) - for i in np.arange(points.shape[0])] + bary_center = [ + self.mesh.transform[simplices[i], :2].dot( + ( + np.transpose( + points_st[i, :] - self.mesh.transform[simplices[i], 2] + ) + ) + ) + for i in np.arange(points.shape[0]) + ] bary_center = np.array(bary_center) """ the fourth coordinate is 1 - sum of the other three """ - bary_center = np.hstack((bary_center, - 1. - np.atleast_2d(bary_center.sum(axis=1)).T)) + bary_center = np.hstack( + (bary_center, 1.0 - np.atleast_2d(bary_center.sum(axis=1)).T) + ) - bary_center[np.abs(bary_center)<1e-9] = 0. + bary_center[np.abs(bary_center) < 1e-9] = 0.0 return np.array(bary_center), simplices - def _get_equivalent_node(self, - node_id): + def _get_equivalent_node(self, node_id): """ given the index of the node, find out the equivalent node. if the node is already one @@ -181,7 +192,7 @@ def _get_equivalent_node(self, the scipy Delaunay function """ node_id = node_id - mask = node_id+1 > self.nindp + mask = node_id + 1 > self.nindp eqv_node_id = np.zeros(node_id.shape).astype(np.int32) """ @@ -194,16 +205,15 @@ def _get_equivalent_node(self, using the eqv array """ if not np.all(mask == False): - eqv_id = np.array([np.where(self.eqv[:,0] == i+1) - for i in node_id[mask]]).astype(np.int32) + eqv_id = np.array( + [np.where(self.eqv[:, 0] == i + 1) for i in node_id[mask]] + ).astype(np.int32) eqv_id = np.squeeze(eqv_id) - eqv_node_id[mask] = self.eqv[eqv_id,1]-1 + eqv_node_id[mask] = self.eqv[eqv_id, 1] - 1 return eqv_node_id - - def _get_harmonic_values(self, - points_inp): + def _get_harmonic_values(self, points_inp): """ this is the main function which compute the value of the harmonic function at a given set @@ -228,20 +238,26 @@ def _get_harmonic_values(self, bary_center, simplex_id = self._get_barycentric_coordinates(points) node_id = self.mesh.simplices[simplex_id] - eqv_node_id = np.array([self._get_equivalent_node(nid) - for nid in node_id]).astype(np.int32) + eqv_node_id = np.array( + [self._get_equivalent_node(nid) for nid in node_id] + ).astype(np.int32) - fval = np.array([self.harmonics[nid,:] for nid in eqv_node_id]) + fval = np.array([self.harmonics[nid, :] for nid in eqv_node_id]) nharm = self.harmonics.shape[1] - fval_points = np.array([np.sum(np.tile(bary_center[i,:],[nharm,1]).T* - fval[i,:,:],axis=0) for i in range(points.shape[0])]) + fval_points = np.array( + [ + np.sum( + np.tile(bary_center[i, :], [nharm, 1]).T * fval[i, :, :], + axis=0, + ) + for i in range(points.shape[0]) + ] + ) return np.atleast_2d(fval_points) - def num_invariant_harmonic(self, - max_degree): - + def num_invariant_harmonic(self, max_degree): """ >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov >> @DATE: 06/29/2021 SS 1.0 original @@ -261,8 +277,8 @@ def num_invariant_harmonic(self, """ if self.symmetry == "cylindrical": v = [] - for i in range(0,max_degree+1,2): - v.append([i,1]) + for i in range(0, max_degree + 1, 2): + v.append([i, 1]) v = np.array(v).astype(np.int32) return v else: @@ -283,8 +299,8 @@ def num_invariant_harmonic(self, else: nn = [x[0] for x in num] mmax = max(nn) - coef = np.zeros([mmax+1,]) - coef[0] = 1. + coef = np.zeros([mmax + 1]) + coef[0] = 1.0 for m in num: coef[m[0]] = m[1] polyn.append(Polynomial(coef)) @@ -293,28 +309,26 @@ def num_invariant_harmonic(self, multiply all of the polynomials in the denominator with the ones in the numerator """ - poly = Polynomial([1.]) + poly = Polynomial([1.0]) for pd in polyd: if not pd: break else: - poly = poly*pd + poly = poly * pd for pn in polyn: if not pn: break else: - poly = poly*pn + poly = poly * pn - poly = poly.truncate(max_degree+1) + poly = poly.truncate(max_degree + 1) idx = np.nonzero(poly.coef) v = poly.coef[idx] - return np.vstack((idx,v)).T.astype(np.int32) + return np.vstack((idx, v)).T.astype(np.int32) - def denominator(self, - m, - max_degree): + def denominator(self, m, max_degree): """ this function computes the Maclaurin expansion of the function 1/(1-t^m) @@ -323,10 +337,10 @@ def denominator(self, 1/(1-x^m) = 1 + x^m + x^2m + x^3m ... """ - coeff = np.zeros([max_degree+1, ]) - ideg = 1+int(max_degree/m) + coeff = np.zeros([max_degree + 1]) + ideg = 1 + int(max_degree / m) for i in np.arange(ideg): - idx = i*m + idx = i * m coeff[idx] = 1.0 return Polynomial(coeff) @@ -337,13 +351,13 @@ class harmonic_model: this class brings all the elements together to compute the texture model given the sample and crystal symmetry. """ - def __init__(self, - pole_figures, - sample_symmetry, - max_degree): + + def __init__(self, pole_figures, sample_symmetry, max_degree): self.pole_figures = pole_figures - self.crystal_symmetry = pole_figures.material.sg.laueGroup_international + self.crystal_symmetry = ( + pole_figures.material.sg.laueGroup_international + ) self.sample_symmetry = sample_symmetry self.max_degree = max_degree self.mesh_crystal = mesh_s2(self.crystal_symmetry) @@ -353,7 +367,11 @@ def __init__(self, self.itercounter = 0 ncoeff = self._num_coefficients() - self.coeff = np.zeros([ncoeff,]) + self.coeff = np.zeros( + [ + ncoeff, + ] + ) def init_harmonic_values(self): """ @@ -369,13 +387,13 @@ def init_harmonic_values(self): pole_figures = self.pole_figures for ii in np.arange(pole_figures.num_pfs): - key = str(pole_figures.hkls[ii,:])[1:-1].replace(" ", "") - hkl = np.atleast_2d(pole_figures.hkls_c[ii,:]) + key = str(pole_figures.hkls[ii, :])[1:-1].replace(" ", "") + hkl = np.atleast_2d(pole_figures.hkls_c[ii, :]) sample_dir = pole_figures.gvecs[key] - self.V_c_allowed[key], self.V_s_allowed[key] = \ - self._compute_harmonic_values_grid(hkl, - sample_dir) + self.V_c_allowed[key], self.V_s_allowed[key] = ( + self._compute_harmonic_values_grid(hkl, sample_dir) + ) self.allowed_degrees = self._allowed_degrees() def init_equiangular_grid(self): @@ -386,9 +404,9 @@ def init_equiangular_grid(self): calc_pole_figures function instead of here. """ angs = [] - for tth in np.arange(0,91,5): + for tth in np.arange(0, 91, 5): for eta in np.arange(0, 360, 5): - angs.append([np.radians(tth), np.radians(eta), 0.]) + angs.append([np.radians(tth), np.radians(eta), 0.0]) if tth == 0: break angs = np.array(angs) @@ -406,7 +424,7 @@ def init_coeff_params(self): self.coeff_loc = {} self.coeff_loc_inv = {} ctr = 0 - for ii,(k,v) in enumerate(self.allowed_degrees.items()): + for ii, (k, v) in enumerate(self.allowed_degrees.items()): if k > 0: for icry in np.arange(v[0]): for isamp in np.arange(v[1]): @@ -415,15 +433,20 @@ def init_coeff_params(self): self.coeff_loc_inv[ctr] = vname idx = self.coeff_loc[vname] ctr += 1 - params.add(vname,value=self.coeff[idx], - vary=True,min=-np.inf,max=np.inf) + params.add( + vname, + value=self.coeff[idx], + vary=True, + min=-np.inf, + max=np.inf, + ) if hasattr(self, 'phon'): val = self.phon else: val = 0.0 - params.add("phon",value=val,vary=True,min=0.0) + params.add("phon", value=val, vary=True, min=0.0) return params @@ -432,8 +455,8 @@ def set_coeff_from_param(self, params): this function takes the the values in the parameters and sets the values of the coefficients """ - self.coeff = np.zeros([len(self.coeff_loc),]) - for ii,k in enumerate(params): + self.coeff = np.zeros([len(self.coeff_loc)]) + for ii, k in enumerate(params): if k.lower() != "phon": loc = self.coeff_loc[k] self.coeff[loc] = params[k].value @@ -448,45 +471,41 @@ def residual_function(self, params): self.set_coeff_from_param(params) pf_recalc = self.recalculate_pole_figures() - for ii,(k,v) in enumerate(self.pole_figures.pfdata.items()): - inp_intensity = np.squeeze(v[:,3]) + for ii, (k, v) in enumerate(self.pole_figures.pfdata.items()): + inp_intensity = np.squeeze(v[:, 3]) calc_intensity = np.squeeze(pf_recalc[k]) - diff = (inp_intensity-calc_intensity) + diff = inp_intensity - calc_intensity if ii == 0: residual = diff vals = inp_intensity - weights = 1./np.sqrt(inp_intensity) + weights = 1.0 / np.sqrt(inp_intensity) weights[np.isnan(weights)] = 0.0 else: - residual = np.hstack((residual,diff)) - vals = np.hstack((vals,inp_intensity)) - ww = 1./np.sqrt(inp_intensity) + residual = np.hstack((residual, diff)) + vals = np.hstack((vals, inp_intensity)) + ww = 1.0 / np.sqrt(inp_intensity) ww[np.isnan(ww)] = 0.0 - weights = np.hstack((weights,ww)) + weights = np.hstack((weights, ww)) - err = (weights*residual)**2 + err = (weights * residual) ** 2 wss = np.sum(err) - den = np.sum((weights*vals)**2) + den = np.sum((weights * vals) ** 2) - Rwp = np.sqrt(wss/den) - if np.mod(self.itercounter,100) == 0: + Rwp = np.sqrt(wss / den) + if np.mod(self.itercounter, 100) == 0: msg = f"iteration# {self.itercounter}, Rwp error = {Rwp*100} %" print(msg) self.itercounter += 1 return err - def _compute_harmonic_values_grid(self, - hkl, - sample_dir): + def _compute_harmonic_values_grid(self, hkl, sample_dir): """ compute the dictionary of invariant harmonic values for a given set of sample directions and hkls """ - ninv_c = self.mesh_crystal.num_invariant_harmonic( - self.max_degree) + ninv_c = self.mesh_crystal.num_invariant_harmonic(self.max_degree) - ninv_s = self.mesh_sample.num_invariant_harmonic( - self.max_degree) + ninv_s = self.mesh_sample.num_invariant_harmonic(self.max_degree) V_c = self.mesh_crystal._get_harmonic_values(hkl) V_s = self.mesh_sample._get_harmonic_values(sample_dir) @@ -498,100 +517,97 @@ def _compute_harmonic_values_grid(self, """ V_c_allowed = {} V_s_allowed = {} - for i in np.arange(0,self.max_degree+1,2): - if i in ninv_c[:,0] and i in ninv_s[:,0]: + for i in np.arange(0, self.max_degree + 1, 2): + if i in ninv_c[:, 0] and i in ninv_s[:, 0]: istc, ienc = self._index_of_harmonics(i, "crystal") - V_c_allowed[i] = V_c[:,istc:ienc] + V_c_allowed[i] = V_c[:, istc:ienc] ists, iens = self._index_of_harmonics(i, "sample") - V_s_allowed[i] = V_s[:,ists:iens] + V_s_allowed[i] = V_s[:, ists:iens] return V_c_allowed, V_s_allowed - def compute_texture_factor(self, - coeff): + def compute_texture_factor(self, coeff): """ first check if the size of the coef vector is consistent with the max degree argumeents for crystal and sample. """ - ncoeff = coeff.shape[0]+1 + ncoeff = coeff.shape[0] + 1 ncoeff_inv = self._num_coefficients() if ncoeff < ncoeff_inv: - msg = (f"inconsistent number of entries in " - f"coefficients based on the degree of " - f"crystal and sample harmonic degrees. " - f"needed {ncoeff_inv}, found {ncoeff}") + msg = ( + f"inconsistent number of entries in " + f"coefficients based on the degree of " + f"crystal and sample harmonic degrees. " + f"needed {ncoeff_inv}, found {ncoeff}" + ) raise ValueError(msg) elif ncoeff > ncoeff_inv: - msg = (f"more coefficients passed than required " - f"based on the degree of crystal and " - f"sample harmonic degrees. " - f"needed {ncoeff_inv}, found {ncoeff}. " - f"ignoring extra terms.") + msg = ( + f"more coefficients passed than required " + f"based on the degree of crystal and " + f"sample harmonic degrees. " + f"needed {ncoeff_inv}, found {ncoeff}. " + f"ignoring extra terms." + ) warn(msg) coeff = coeff[:ncoeff_inv] tex_fact = {} for g in self.pole_figures.hkls: - key = str(g)[1:-1].replace(" ","") + key = str(g)[1:-1].replace(" ", "") nsamp = self.pole_figures.gvecs[key].shape[0] - tex_fact[key] = np.zeros([nsamp,]) - - tex_fact[key] = self._compute_sum(nsamp, - coeff, - self.allowed_degrees, - self.V_c_allowed[key], - self.V_s_allowed[key]) + tex_fact[key] = np.zeros([nsamp]) + + tex_fact[key] = self._compute_sum( + nsamp, + coeff, + self.allowed_degrees, + self.V_c_allowed[key], + self.V_s_allowed[key], + ) return tex_fact - def _index_of_harmonics(self, - deg, - c_or_s): + def _index_of_harmonics(self, deg, c_or_s): """ calculate the start and end index of harmonics of a given degree and crystal or sample symmetry returns the start and end index """ - ninv_c = self.mesh_crystal.num_invariant_harmonic( - self.max_degree) + ninv_c = self.mesh_crystal.num_invariant_harmonic(self.max_degree) - ninv_s = self.mesh_sample.num_invariant_harmonic( - self.max_degree) + ninv_s = self.mesh_sample.num_invariant_harmonic(self.max_degree) - ninv_c_csum = np.r_[0,np.cumsum(ninv_c[:,1])] - ninv_s_csum = np.r_[0,np.cumsum(ninv_s[:,1])] + ninv_c_csum = np.r_[0, np.cumsum(ninv_c[:, 1])] + ninv_s_csum = np.r_[0, np.cumsum(ninv_s[:, 1])] if c_or_s.lower() == "crystal": - idx = np.where(ninv_c[:,0] == deg)[0] - return int(ninv_c_csum[idx]), int(ninv_c_csum[idx+1]) + idx = np.where(ninv_c[:, 0] == deg)[0] + return int(ninv_c_csum[idx]), int(ninv_c_csum[idx + 1]) elif c_or_s.lower() == "sample": - idx = np.where(ninv_s[:,0] == deg)[0] - return int(ninv_s_csum[idx]), int(ninv_s_csum[idx+1]) + idx = np.where(ninv_s[:, 0] == deg)[0] + return int(ninv_s_csum[idx]), int(ninv_s_csum[idx + 1]) else: msg = f"unknown input to c_or_s" raise ValueError(msg) - def _compute_sum(self, - nsamp, - coeff, - allowed_degrees, - V_c_allowed, - V_s_allowed): + def _compute_sum( + self, nsamp, coeff, allowed_degrees, V_c_allowed, V_s_allowed + ): """ compute the degree by degree sum in the generalized axis distribution function """ tmp = copy.deepcopy(allowed_degrees) del tmp[0] - nn = np.cumsum(np.array([tmp[k][0]*tmp[k][1] - for k in tmp])) - ncoeff_csum = np.r_[0,nn] + nn = np.cumsum(np.array([tmp[k][0] * tmp[k][1] for k in tmp])) + ncoeff_csum = np.r_[0, nn] - val = np.ones([nsamp,])+self.phon - for i,(k,v) in enumerate(tmp.items()): + val = np.ones([nsamp]) + self.phon + for i, (k, v) in enumerate(tmp.items()): deg = k kc = V_c_allowed[deg] @@ -601,11 +617,13 @@ def _compute_sum(self, nu = ks.shape[0] ist = ncoeff_csum[i] - ien = ncoeff_csum[i+1] + ien = ncoeff_csum[i + 1] - C = np.reshape(coeff[ist:ien],[mu, nu]) + C = np.reshape(coeff[ist:ien], [mu, nu]) - val = val + np.squeeze(np.dot(kc,np.dot(C,ks))*4*np.pi/(2*k+1)) + val = val + np.squeeze( + np.dot(kc, np.dot(C, ks)) * 4 * np.pi / (2 * k + 1) + ) return val @@ -615,19 +633,17 @@ def _num_coefficients(self): independent coefficients required for the given maximum degree of harmonics """ - ninv_c = self.mesh_crystal.num_invariant_harmonic( - self.max_degree) + ninv_c = self.mesh_crystal.num_invariant_harmonic(self.max_degree) - ninv_s = self.mesh_sample.num_invariant_harmonic( - self.max_degree) + ninv_s = self.mesh_sample.num_invariant_harmonic(self.max_degree) ncoef_inv = 0 - for i in np.arange(0,self.max_degree+1,2): - if i in ninv_c[:,0] and i in ninv_s[:,0]: - idc = int(np.where(ninv_c[:,0] == i)[0]) - ids = int(np.where(ninv_s[:,0] == i)[0]) + for i in np.arange(0, self.max_degree + 1, 2): + if i in ninv_c[:, 0] and i in ninv_s[:, 0]: + idc = int(np.where(ninv_c[:, 0] == i)[0]) + ids = int(np.where(ninv_s[:, 0] == i)[0]) - ncoef_inv += ninv_c[idc,1]*ninv_s[ids,1] + ncoef_inv += ninv_c[idc, 1] * ninv_s[ids, 1] return ncoef_inv def _allowed_degrees(self): @@ -636,11 +652,9 @@ def _allowed_degrees(self): and the corresponding number of harmonics for crystal and sample symmetry """ - ninv_c = self.mesh_crystal.num_invariant_harmonic( - self.max_degree) + ninv_c = self.mesh_crystal.num_invariant_harmonic(self.max_degree) - ninv_s = self.mesh_sample.num_invariant_harmonic( - self.max_degree) + ninv_s = self.mesh_sample.num_invariant_harmonic(self.max_degree) """ some degrees for which the crystal symmetry has @@ -649,21 +663,27 @@ def _allowed_degrees(self): """ allowed_degrees = {} - for i in np.arange(0,self.max_degree+1,2): - if i in ninv_c[:,0] and i in ninv_s[:,0]: - idc = int(np.where(ninv_c[:,0] == i)[0]) - ids = int(np.where(ninv_s[:,0] == i)[0]) + for i in np.arange(0, self.max_degree + 1, 2): + if i in ninv_c[:, 0] and i in ninv_s[:, 0]: + idc = int(np.where(ninv_c[:, 0] == i)[0]) + ids = int(np.where(ninv_s[:, 0] == i)[0]) - allowed_degrees[i] = [ninv_c[idc,1], ninv_s[ids,1]] + allowed_degrees[i] = [ninv_c[idc, 1], ninv_s[ids, 1]] return allowed_degrees def refine(self): params = self.init_coeff_params() - fdict = {'ftol': 1e-6, 'xtol': 1e-6, 'gtol': 1e-6, - 'verbose': 0, 'max_nfev': 20000, 'method':'trf', - 'jac':'3-point'} + fdict = { + 'ftol': 1e-6, + 'xtol': 1e-6, + 'gtol': 1e-6, + 'verbose': 0, + 'max_nfev': 20000, + 'method': 'trf', + 'jac': '3-point', + } fitter = Minimizer(self.residual_function, params) @@ -680,11 +700,7 @@ def recalculate_pole_figures(self): """ return self.compute_texture_factor(self.coeff) - - - def calc_pole_figures(self, - hkls, - grid="equiangular"): + def calc_pole_figures(self, hkls, grid="equiangular"): """ given a set of hkl, coefficients and maximum degree of harmonic function to use for both crystal and sample @@ -724,45 +740,45 @@ def calc_pole_figures(self, eHat_l = self.pole_figures.eHat_l chi = self.pole_figures.chi - t = angs[:,0] - r = angs[:,1] + t = angs[:, 0] + r = angs[:, 1] st = np.sin(t) ct = np.cos(t) sr = np.sin(r) cr = np.cos(r) pfdata = {} for g in hkls: - key = str(g)[1:-1].replace(" ","") - xyz = np.vstack((st*cr,st*sr,ct)).T - v = angs[:,2] - pfdata[key] = np.vstack((xyz.T,v)).T - - args = (mat, hkls, pfdata) - kwargs = {"bHat_l":bHat_l, - "eHat_l":eHat_l, - "chi":chi} + key = str(g)[1:-1].replace(" ", "") + xyz = np.vstack((st * cr, st * sr, ct)).T + v = angs[:, 2] + pfdata[key] = np.vstack((xyz.T, v)).T + + args = (mat, hkls, pfdata) + kwargs = {"bHat_l": bHat_l, "eHat_l": eHat_l, "chi": chi} self.pf_equiangular = pole_figures(*args, **kwargs) - model = harmonic_model(self.pf_equiangular, - self.sample_symmetry, - self.max_degree) + model = harmonic_model( + self.pf_equiangular, self.sample_symmetry, self.max_degree + ) model.coeff = self.coeff model.phon = self.phon pf = model.recalculate_pole_figures() pfdata = {} - for k,v in self.pf_equiangular.pfdata.items(): - pfdata[k] = np.hstack(( - np.degrees(model.pole_figures.angs[k]), - np.atleast_2d(pf[k]).T )) + for k, v in self.pf_equiangular.pfdata.items(): + pfdata[k] = np.hstack( + ( + np.degrees(model.pole_figures.angs[k]), + np.atleast_2d(pf[k]).T, + ) + ) return pfdata - def calc_inverse_pole_figures(self, - sample_dir="ND", - grid="equiangular", - resolution = 5.0): + def calc_inverse_pole_figures( + self, sample_dir="ND", grid="equiangular", resolution=5.0 + ): """ given a sample direction such as TD, RD and ND, calculate the distribution of crystallographic @@ -780,24 +796,21 @@ def calc_inverse_pole_figures(self, we will sample the entire northern hemisphere. the resulting IPDF will have symmetry of crystal """ - ipf = inverse_pole_figures(sample_dir, - sampling=grid, - resolution=resolution) + ipf = inverse_pole_figures( + sample_dir, sampling=grid, resolution=resolution + ) vc, vs = self._compute_ipdf_mesh_vals(ipf) - ipdf = self._compute_ipdf(ipf,vc,vs) + ipdf = self._compute_ipdf(ipf, vc, vs) angs = ipf.angs - return np.hstack((angs,np.atleast_2d(ipdf).T)) + return np.hstack((angs, np.atleast_2d(ipdf).T)) - def _compute_ipdf_mesh_vals(self, - ipf): + def _compute_ipdf_mesh_vals(self, ipf): """ compute the inverse pole density function. """ - ninv_c = self.mesh_crystal.num_invariant_harmonic( - self.max_degree) + ninv_c = self.mesh_crystal.num_invariant_harmonic(self.max_degree) - ninv_s = self.mesh_sample.num_invariant_harmonic( - self.max_degree) + ninv_s = self.mesh_sample.num_invariant_harmonic(self.max_degree) V_c = self.mesh_crystal._get_harmonic_values(ipf.crystal_dir) V_s = self.mesh_sample._get_harmonic_values(ipf.sample_dir) @@ -809,22 +822,18 @@ def _compute_ipdf_mesh_vals(self, """ V_c_allowed = {} V_s_allowed = {} - for i in np.arange(0,self.max_degree+1,2): - if i in ninv_c[:,0] and i in ninv_s[:,0]: + for i in np.arange(0, self.max_degree + 1, 2): + if i in ninv_c[:, 0] and i in ninv_s[:, 0]: istc, ienc = self._index_of_harmonics(i, "crystal") - V_c_allowed[i] = V_c[:,istc:ienc] + V_c_allowed[i] = V_c[:, istc:ienc] ists, iens = self._index_of_harmonics(i, "sample") - V_s_allowed[i] = V_s[:,ists:iens] - - return V_c_allowed,V_s_allowed + V_s_allowed[i] = V_s[:, ists:iens] + return V_c_allowed, V_s_allowed - def _compute_ipdf(self, - ipf, - vc, - vs): + def _compute_ipdf(self, ipf, vc, vs): """ compute the generalized axis distribution function sum for the coefficients @@ -832,15 +841,14 @@ def _compute_ipdf(self, allowed_degrees = self.allowed_degrees tmp = copy.deepcopy(allowed_degrees) del tmp[0] - nn = np.cumsum(np.array([tmp[k][0]*tmp[k][1] - for k in tmp])) - ncoeff_csum = np.r_[0,nn] + nn = np.cumsum(np.array([tmp[k][0] * tmp[k][1] for k in tmp])) + ncoeff_csum = np.r_[0, nn] nsamp = ipf.sample_dir.shape[0] ncryst = ipf.crystal_dir.shape[0] coeff = self.coeff - val = np.ones([ncryst,])+self.phon - for i,(k,v) in enumerate(tmp.items()): + val = np.ones([ncryst]) + self.phon + for i, (k, v) in enumerate(tmp.items()): deg = k kc = vc[deg] @@ -850,11 +858,13 @@ def _compute_ipdf(self, nu = ks.shape[0] ist = ncoeff_csum[i] - ien = ncoeff_csum[i+1] + ien = ncoeff_csum[i + 1] - C = np.reshape(coeff[ist:ien],[mu, nu]) + C = np.reshape(coeff[ist:ien], [mu, nu]) - val = val + np.squeeze(np.dot(kc,np.dot(C,ks))*4*np.pi/(2*k+1)) + val = val + np.squeeze( + np.dot(kc, np.dot(C, ks)) * 4 * np.pi / (2 * k + 1) + ) return val @@ -863,11 +873,10 @@ def write_pole_figures(self, pfdata): take output of the calc_pole_figures routine and write it out as text files """ - for k,v in pfdata.items(): + for k, v in pfdata.items(): fname = f"pf_{k}.txt" np.savetxt(fname, v, fmt="%10.4f", delimiter="\t") - @property def phon(self): return self._phon @@ -876,19 +885,17 @@ def phon(self): def phon(self, val): self._phon = val - @property def J(self): tmp = copy.deepcopy(self.allowed_degrees) del tmp[0] - nn = np.cumsum(np.array([tmp[k][0]*tmp[k][1] - for k in tmp])) - ncoeff_csum = np.r_[0,nn] + nn = np.cumsum(np.array([tmp[k][0] * tmp[k][1] for k in tmp])) + ncoeff_csum = np.r_[0, nn] J = 1.0 - for ii,k in enumerate(tmp): + for ii, k in enumerate(tmp): ist = ncoeff_csum[ii] - ien = ncoeff_csum[ii+1] - J += np.sum(self.coeff[ist:ien]**2)/(2*k+1) + ien = ncoeff_csum[ii + 1] + J += np.sum(self.coeff[ist:ien] ** 2) / (2 * k + 1) return J @@ -905,13 +912,10 @@ class pole_figures: @DATE 10/06/2021 SS changes input from angles to unit vectors and added routines to compute the angles """ - def __init__(self, - material, - hkls, - pfdata, - bHat_l=bVec_ref, - eHat_l=eta_ref, - chi=0.): + + def __init__( + self, material, hkls, pfdata, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0.0 + ): """ material Either a Material object of Material_Rietveld object hkl reciprocal lattice vectors for which pole figures are @@ -936,9 +940,11 @@ def __init__(self, self.chi = chi if hkls.shape[0] != len(pfdata): - msg = (f"pole figure initialization.\n" + msg = ( + f"pole figure initialization.\n" f"# reciprocal reflections = {hkls.shape[0]}.\n" - f"# of entries in pfdata = {len(pfdata)}.") + f"# of entries in pfdata = {len(pfdata)}." + ) raise RuntimeError(msg) self.pfdata = pfdata @@ -952,8 +958,8 @@ def convert_hkls_to_cartesian(self): for ii, g in enumerate(self.hkls): v = self.material.TransSpace(g, "r", "c") - v = v/np.linalg.norm(v) - self.hkls_c[ii,:] = v + v = v / np.linalg.norm(v) + self.hkls_c[ii, :] = v def write_data(self, prefix): """ @@ -965,12 +971,12 @@ def write_data(self, prefix): fname = f"{prefix}_{k}.txt" angs = np.degrees(self.angs[k]) intensities = np.atleast_2d(self.intensities[k]).T - data = np.hstack((angs,intensities)) + data = np.hstack((angs, intensities)) np.savetxt(fname, data, delimiter="\t") @property def num_pfs(self): - """ number of pole figures (read only) """ + """number of pole figures (read only)""" return len(self.pfdata) """ @@ -978,6 +984,7 @@ def num_pfs(self): in the form of a dictionary with keys as the hkl values and the value as the (tth, eta, omega) array. """ + @property def pfdata(self): return self._pfdata @@ -988,16 +995,16 @@ def pfdata(self, val): self._intensities = {} self._gvecs = {} self._angs = {} - for k,v in val.items(): - norm = np.linalg.norm(v[:,0:3],axis=1) - v[:,0:3] = v[:,0:3]/np.tile(norm, [3,1]).T - t = np.arccos(v[:,2]) - rho = np.arctan2(v[:,1],v[:,0]) + for k, v in val.items(): + norm = np.linalg.norm(v[:, 0:3], axis=1) + v[:, 0:3] = v[:, 0:3] / np.tile(norm, [3, 1]).T + t = np.arccos(v[:, 2]) + rho = np.arctan2(v[:, 1], v[:, 0]) - self._gvecs[k] = v[:,0:3] + self._gvecs[k] = v[:, 0:3] self._pfdata[k] = v - self._intensities[k] = v[:,3] - self._angs[k] = np.vstack((t,rho)).T + self._intensities[k] = v[:, 3] + self._angs[k] = np.vstack((t, rho)).T @property def gvecs(self): @@ -1011,15 +1018,14 @@ def angs(self): def intensities(self): return self._intensities + class inverse_pole_figures: """ this class deals with everything related to inverse pole figures. """ - def __init__(self, - sample_dir, - sampling="equiangular", - resolution=5.0): + + def __init__(self, sample_dir, sampling="equiangular", resolution=5.0): """ this is the initialization of the class. the inputs are 1. laue_sym for laue symmetry @@ -1035,29 +1041,27 @@ def __init__(self, self.resolution = resolution self.sampling = sampling - def initialize_crystal_dir(self, - samplingtype, - resolution=5.0): + def initialize_crystal_dir(self, samplingtype, resolution=5.0): """ this function prepares the unit vectors of the stereogram """ if samplingtype.lower() == "equiangular": angs = [] - for tth in np.arange(0,91,resolution): + for tth in np.arange(0, 91, resolution): for eta in np.arange(0, 360, resolution): angs.append([np.radians(tth), np.radians(eta)]) if tth == 0: break angs = np.array(angs) - self.crystal_dir = np.zeros([angs.shape[0],3]) - for i,a in enumerate(angs): + self.crystal_dir = np.zeros([angs.shape[0], 3]) + for i, a in enumerate(angs): t, r = a st = np.sin(t) ct = np.cos(t) sr = np.sin(r) cr = np.cos(r) - self.crystal_dir[i,:] = np.array([st*cr,st*sr,ct]) + self.crystal_dir[i, :] = np.array([st * cr, st * sr, ct]) if samplingtype.lower() == "fem": msg = "sampling type FEM not implemented yet." @@ -1065,7 +1069,7 @@ def initialize_crystal_dir(self, @property def sample_dir(self): - """ sample direction for IPF """ + """sample direction for IPF""" return self._sample_dir @sample_dir.setter @@ -1074,13 +1078,13 @@ def sample_dir(self, val): # sample_dir size = nx3 if isinstance(val, str): if val.upper() == "RD": - self._sample_dir = np.atleast_2d([1.,0.,0.]) + self._sample_dir = np.atleast_2d([1.0, 0.0, 0.0]) self._sample_dir_name = "RD" elif val.upper() == "TD": - self._sample_dir = np.atleast_2d([0.,1.,0.]) + self._sample_dir = np.atleast_2d([0.0, 1.0, 0.0]) self._sample_dir_name = "TD" elif val.upper() == "ND": - self._sample_dir = np.atleast_2d([0.,0.,1.]) + self._sample_dir = np.atleast_2d([0.0, 0.0, 1.0]) self._sample_dir_name = "ND" else: msg = f"unknown direction." @@ -1088,8 +1092,10 @@ def sample_dir(self, val): elif isinstance(val, np.array): v = np.atleast_2d(val) if v.shape[1] != 3: - msg = (f"incorrect shape for sample_dir input.\n" - f"expected nx3, got {val.shape[0]}x{val.shape[1]}") + msg = ( + f"incorrect shape for sample_dir input.\n" + f"expected nx3, got {val.shape[0]}x{val.shape[1]}" + ) raise ValueError(msg) self._sample_dir = v self._sample_dir_name = "array" @@ -1101,8 +1107,10 @@ def resolution(self): @resolution.setter def resolution(self, val): if val < 1.0: - msg = (f"the resolution appears to be very fine.\n" - f"Are you sure the value is in degrees?") + msg = ( + f"the resolution appears to be very fine.\n" + f"Are you sure the value is in degrees?" + ) warn(msg) self._resolution = val @@ -1113,41 +1121,25 @@ def sampling(self): @sampling.setter def sampling(self, val): if val.lower() == "equiangular": - self.initialize_crystal_dir("equiangular", - resolution=self.resolution) + self.initialize_crystal_dir( + "equiangular", resolution=self.resolution + ) elif val.lower() == "fem": self.initialize_crystal_dir("FEM") @property def angs(self): - polar = np.arccos(self.crystal_dir[:,2]) - az = np.arctan2(self.crystal_dir[:,1],self.crystal_dir[:,0]) - return np.degrees(np.vstack((polar,az)).T) - -Polya = { - "m35": - {"numerator":[], - "denominator":[6, 10]}, - - "532": - {"numerator":[[15, 1.]], - "denominator":[6, 10]}, - - "m3m": - {"numerator":[], - "denominator":[4, 6]}, - - "432": - {"numerator":[[9, 1.]], - "denominator":[4, 6]}, + polar = np.arccos(self.crystal_dir[:, 2]) + az = np.arctan2(self.crystal_dir[:, 1], self.crystal_dir[:, 0]) + return np.degrees(np.vstack((polar, az)).T) - "1": - {"numerator":[[1, 1.]], - "denominator":[1, 1]}, - "-1": - {"numerator":[[2, 3.]], - "denominator":[2, 2]}, - - } +Polya = { + "m35": {"numerator": [], "denominator": [6, 10]}, + "532": {"numerator": [[15, 1.0]], "denominator": [6, 10]}, + "m3m": {"numerator": [], "denominator": [4, 6]}, + "432": {"numerator": [[9, 1.0]], "denominator": [4, 6]}, + "1": {"numerator": [[1, 1.0]], "denominator": [1, 1]}, + "-1": {"numerator": [[2, 3.0]], "denominator": [2, 2]}, +} diff --git a/hexrd/powder/wppf/wppfsupport.py b/hexrd/powder/wppf/wppfsupport.py index 6320c3208..a549a3133 100644 --- a/hexrd/powder/wppf/wppfsupport.py +++ b/hexrd/powder/wppf/wppfsupport.py @@ -43,6 +43,7 @@ from hexrd.core import constants import warnings + def _generate_default_parameters_pseudovoight(params): """ generate some default values of peak profile @@ -51,30 +52,23 @@ def _generate_default_parameters_pseudovoight(params): following: 3 -> cagliotti (instrumental broadening) """ - p = {"zero_error":[0., -1., 1., False], - "trns":[0.0, -1.0, 1.0, False], - "shft":[0.0,-1.0,1.0,False], - "U": [81.5, 0., np.inf, False], - "V": [1.0337, 0., np.inf, False], - "W": [5.18275, 0., np.inf, False] - } + p = { + "zero_error": [0.0, -1.0, 1.0, False], + "trns": [0.0, -1.0, 1.0, False], + "shft": [0.0, -1.0, 1.0, False], + "U": [81.5, 0.0, np.inf, False], + "V": [1.0337, 0.0, np.inf, False], + "W": [5.18275, 0.0, np.inf, False], + } for k, v in p.items(): if isinstance(params, Parameters): - params.add(name=k, - value=v[0], - lb=v[1], - ub=v[2], - vary=v[3]) + params.add(name=k, value=v[0], lb=v[1], ub=v[2], vary=v[3]) elif isinstance(params, Parameters_lmfit): - params.add(name=k, - value=v[0], - min=v[1], - max=v[2], - vary=v[3]) - -def _add_phase_dependent_parameters_pseudovoight(params, - mat): + params.add(name=k, value=v[0], min=v[1], max=v[2], vary=v[3]) + + +def _add_phase_dependent_parameters_pseudovoight(params, mat): """ add the particle size broadening term P : Gaussian scherrer broadening @@ -82,111 +76,79 @@ def _add_phase_dependent_parameters_pseudovoight(params, Y : Lorentzian microstrain broadening """ name = mat.name - p = {"P": [0., 0., np.inf, False], - "X": [0.5665, 0., np.inf, False], - "Y": [1.90994, 0., np.inf, False] - } + p = { + "P": [0.0, 0.0, np.inf, False], + "X": [0.5665, 0.0, np.inf, False], + "Y": [1.90994, 0.0, np.inf, False], + } for k, v in p.items(): pname = f"{name}_{k}" if isinstance(params, Parameters): - params.add(name=pname, - value=v[0], - lb=v[1], - ub=v[2], - vary=v[3]) + params.add(name=pname, value=v[0], lb=v[1], ub=v[2], vary=v[3]) elif isinstance(params, Parameters_lmfit): - params.add(name=pname, - value=v[0], - min=v[1], - max=v[2], - vary=v[3]) + params.add(name=pname, value=v[0], min=v[1], max=v[2], vary=v[3]) + def _add_pvfcj_parameters(params): - p = {"HL":[1e-3,1e-7,1e-1,False], - "SL":[1e-3,1e-7,1e-1,False] - } + p = {"HL": [1e-3, 1e-7, 1e-1, False], "SL": [1e-3, 1e-7, 1e-1, False]} for k, v in p.items(): if isinstance(params, Parameters): - params.add(name=k, - value=v[0], - lb=v[1], - ub=v[2], - vary=v[3]) + params.add(name=k, value=v[0], lb=v[1], ub=v[2], vary=v[3]) elif isinstance(params, Parameters_lmfit): - params.add(name=k, - value=v[0], - min=v[1], - max=v[2], - vary=v[3]) + params.add(name=k, value=v[0], min=v[1], max=v[2], vary=v[3]) + def _add_pvpink_parameters(params): - p = {"alpha0":[14.4, -100., 100., False], - "alpha1":[0., -100., 100., False], - "beta0":[3.016, -100., 100., False], - "beta1":[-2.0, -100., 100., False] - } + p = { + "alpha0": [14.4, -100.0, 100.0, False], + "alpha1": [0.0, -100.0, 100.0, False], + "beta0": [3.016, -100.0, 100.0, False], + "beta1": [-2.0, -100.0, 100.0, False], + } for k, v in p.items(): if isinstance(params, Parameters): - params.add(name=k, - value=v[0], - lb=v[1], - ub=v[2], - vary=v[3]) + params.add(name=k, value=v[0], lb=v[1], ub=v[2], vary=v[3]) elif isinstance(params, Parameters_lmfit): - params.add(name=k, - value=v[0], - min=v[1], - max=v[2], - vary=v[3]) - -def _add_chebyshev_background(params, - degree, - init_val): + params.add(name=k, value=v[0], min=v[1], max=v[2], vary=v[3]) + + +def _add_chebyshev_background(params, degree, init_val): """ add coefficients for chebyshev background polynomial. The initial values will be the - same as determined by WPPF.chebyshevfit + same as determined by WPPF.chebyshevfit routine """ - for d in range(degree+1): + for d in range(degree + 1): n = f"bkg_{d}" if isinstance(params, Parameters): - params.add(name=n, - value=init_val[d], - lb=-np.inf, - ub=np.inf, - vary=False) + params.add( + name=n, value=init_val[d], lb=-np.inf, ub=np.inf, vary=False + ) elif isinstance(params, Parameters_lmfit): - params.add(name=n, - value=init_val[d], - min=-np.inf, - max=np.inf, - vary=False) - -def _add_stacking_fault_parameters(params, - mat): + params.add( + name=n, value=init_val[d], min=-np.inf, max=np.inf, vary=False + ) + + +def _add_stacking_fault_parameters(params, mat): """ add stacking fault parameters for cubic systems only """ phase_name = mat.name if mat.sgnum == 225: - sf_alpha_name = f"{phase_name}_sf_alpha" + sf_alpha_name = f"{phase_name}_sf_alpha" twin_beta_name = f"{phase_name}_twin_beta" if isinstance(params, Parameters): - params.add(sf_alpha_name, value=0., lb=0., - ub=1., vary=False) - params.add(twin_beta_name, value=0., lb=0., - ub=1., vary=False) + params.add(sf_alpha_name, value=0.0, lb=0.0, ub=1.0, vary=False) + params.add(twin_beta_name, value=0.0, lb=0.0, ub=1.0, vary=False) elif isinstance(params, Parameters_lmfit): - params.add(sf_alpha_name, value=0., min=0., - max=1., vary=False) - params.add(twin_beta_name, value=0., min=0., - max=1., vary=False) - -def _add_Shkl_terms(params, - mat, - return_dict=None): + params.add(sf_alpha_name, value=0.0, min=0.0, max=1.0, vary=False) + params.add(twin_beta_name, value=0.0, min=0.0, max=1.0, vary=False) + + +def _add_Shkl_terms(params, mat, return_dict=None): """ add the SHKL terms in the anisotropic peak broadening contribution. this depends on the @@ -197,11 +159,7 @@ def _add_Shkl_terms(params, the hexagonal setting """ mname = mat.name - valid_shkl,\ - eq_const,\ - rqd_index, \ - trig_ptype = \ - _required_shkl_names(mat) + valid_shkl, eq_const, rqd_index, trig_ptype = _required_shkl_names(mat) if return_dict is None: @@ -210,37 +168,21 @@ def _add_Shkl_terms(params, ne = f"{mname}_eta_fwhm" if isinstance(params, Parameters): - params.add(name=n, - value=0.0, - lb=0.0, - ub=np.inf, - vary=False) + params.add(name=n, value=0.0, lb=0.0, ub=np.inf, vary=False) elif isinstance(params, Parameters_lmfit): - params.add(name=n, - value=0.0, - min=0.0, - max=np.inf, - vary=False) + params.add(name=n, value=0.0, min=0.0, max=np.inf, vary=False) if isinstance(params, Parameters): - params.add(name=ne, - value=0.5, - lb=0.0, - ub=1.0, - vary=False) + params.add(name=ne, value=0.5, lb=0.0, ub=1.0, vary=False) elif isinstance(params, Parameters_lmfit): - params.add(name=ne, - value=0.5, - min=0.0, - max=1.0, - vary=False) + params.add(name=ne, value=0.5, min=0.0, max=1.0, vary=False) else: res = {} for s in valid_shkl: res[s] = 0.0 return res, trig_ptype -def _add_lp_to_params(params, - mat): + +def _add_lp_to_params(params, mat): """ 03/12/2021 SS 1.0 original given a material, add the required @@ -252,25 +194,24 @@ def _add_lp_to_params(params, name = [_lpname[i] for i in rid] phase_name = mat.name for n, l in zip(name, lp): - nn = phase_name+'_'+n + nn = phase_name + '_' + n """ is n is a,b,c, it is one of the length units else it is an angle """ - if(n in ['a', 'b', 'c']): + if n in ['a', 'b', 'c']: if isinstance(params, Parameters): - params.add(nn, value=l, lb=l-0.025, - ub=l+0.025, vary=False) + params.add(nn, value=l, lb=l - 0.025, ub=l + 0.025, vary=False) elif isinstance(params, Parameters_lmfit): - params.add(nn, value=l, min=l-0.025, - max=l+0.025, vary=False) + params.add( + nn, value=l, min=l - 0.025, max=l + 0.025, vary=False + ) else: if isinstance(params, Parameters): - params.add(nn, value=l, lb=l-1., - ub=l+1., vary=False) + params.add(nn, value=l, lb=l - 1.0, ub=l + 1.0, vary=False) elif isinstance(params, Parameters_lmfit): - params.add(nn, value=l, min=l-1., - max=l+1., vary=False) + params.add(nn, value=l, min=l - 1.0, max=l + 1.0, vary=False) + def _add_atominfo_to_params(params, mat): """ @@ -293,80 +234,47 @@ def _add_atominfo_to_params(params, mat): nn = f"{phase_name}_{elem}{atom_label[i]}_x" if isinstance(params, Parameters): - params.add( - nn, value=atom_pos[i, 0], - lb=0.0, ub=1.0, - vary=False) + params.add(nn, value=atom_pos[i, 0], lb=0.0, ub=1.0, vary=False) elif isinstance(params, Parameters_lmfit): - params.add( - nn, value=atom_pos[i, 0], - min=0.0, max=1.0, - vary=False) + params.add(nn, value=atom_pos[i, 0], min=0.0, max=1.0, vary=False) nn = f"{phase_name}_{elem}{atom_label[i]}_y" if isinstance(params, Parameters): - params.add( - nn, value=atom_pos[i, 1], - lb=0.0, ub=1.0, - vary=False) + params.add(nn, value=atom_pos[i, 1], lb=0.0, ub=1.0, vary=False) elif isinstance(params, Parameters_lmfit): - params.add( - nn, value=atom_pos[i, 1], - min=0.0, max=1.0, - vary=False) + params.add(nn, value=atom_pos[i, 1], min=0.0, max=1.0, vary=False) nn = f"{phase_name}_{elem}{atom_label[i]}_z" if isinstance(params, Parameters): - params.add( - nn, value=atom_pos[i, 2], - lb=0.0, ub=1.0, - vary=False) + params.add(nn, value=atom_pos[i, 2], lb=0.0, ub=1.0, vary=False) elif isinstance(params, Parameters_lmfit): - params.add( - nn, value=atom_pos[i, 2], - min=0.0, max=1.0, - vary=False) + params.add(nn, value=atom_pos[i, 2], min=0.0, max=1.0, vary=False) nn = f"{phase_name}_{elem}{atom_label[i]}_occ" if isinstance(params, Parameters): - params.add(nn, value=occ[i], - lb=0.0, ub=1.0, - vary=False) + params.add(nn, value=occ[i], lb=0.0, ub=1.0, vary=False) elif isinstance(params, Parameters_lmfit): - params.add(nn, value=occ[i], - min=0.0, max=1.0, - vary=False) - if(mat.aniU): + params.add(nn, value=occ[i], min=0.0, max=1.0, vary=False) + if mat.aniU: U = mat.U for j in range(6): - nn = (f"{phase_name}_{elem}{atom_label[i]}" - f"_{_nameU[j]}") + nn = f"{phase_name}_{elem}{atom_label[i]}" f"_{_nameU[j]}" if isinstance(params, Parameters): params.add( - nn, value=U[i, j], - lb=-1e-3, - ub=np.inf, - vary=False) + nn, value=U[i, j], lb=-1e-3, ub=np.inf, vary=False + ) elif isinstance(params, Parameters_lmfit): params.add( - nn, value=U[i, j], - min=-1e-3, - max=np.inf, - vary=False) + nn, value=U[i, j], min=-1e-3, max=np.inf, vary=False + ) else: nn = f"{phase_name}_{elem}{atom_label[i]}_dw" if isinstance(params, Parameters): - params.add( - nn, value=mat.U[i], - lb=0.0, ub=np.inf, - vary=False) + params.add(nn, value=mat.U[i], lb=0.0, ub=np.inf, vary=False) elif isinstance(params, Parameters_lmfit): - params.add( - nn, value=mat.U[i], - min=0.0, max=np.inf, - vary=False) -def _generate_default_parameters_LeBail(mat, - peakshape, - bkgmethod, - init_val=None, - ptype="wppf"): + params.add(nn, value=mat.U[i], min=0.0, max=np.inf, vary=False) + + +def _generate_default_parameters_LeBail( + mat, peakshape, bkgmethod, init_val=None, ptype="wppf" +): """ @author: Saransh Singh, Lawrence Livermore National Lab @date: 03/12/2021 SS 1.0 original @@ -386,25 +294,24 @@ def _generate_default_parameters_LeBail(mat, elif peakshape == 2: _add_pvpink_parameters(params) else: - msg = (f"_generate_default_parameters_LeBail: " - f"unknown peak shape.") + msg = f"_generate_default_parameters_LeBail: " f"unknown peak shape." raise ValueError(msg) if "chebyshev" in bkgmethod: deg = bkgmethod["chebyshev"] if not (init_val is None): - if len(init_val) < deg+1: - msg = (f"size of init_val and degree " - f"of polynomial are not consistent. " - f"setting initial guess to zero.") + if len(init_val) < deg + 1: + msg = ( + f"size of init_val and degree " + f"of polynomial are not consistent. " + f"setting initial guess to zero." + ) warnings.warn(msg) - init_val = np.zeros([deg+1,]) + init_val = np.zeros([deg + 1]) else: - init_val = np.zeros([deg+1,]) + init_val = np.zeros([deg + 1]) - _add_chebyshev_background(params, - deg, - init_val) + _add_chebyshev_background(params, deg, init_val) if isinstance(mat, Phases_LeBail): """ @@ -449,7 +356,7 @@ def _generate_default_parameters_LeBail(mat, _add_Shkl_terms(params, m) _add_lp_to_params(params, m) _add_stacking_fault_parameters(params, m) - + elif isinstance(mat, dict): """ dictionary of materials class @@ -460,144 +367,114 @@ def _generate_default_parameters_LeBail(mat, _add_lp_to_params(params, m) _add_stacking_fault_parameters(params, m) else: - msg = (f"_generate_default_parameters: " - f"incorrect argument. only list, dict or " - f"Material is accpeted.") + msg = ( + f"_generate_default_parameters: " + f"incorrect argument. only list, dict or " + f"Material is accpeted." + ) raise ValueError(msg) return params + def _add_phase_fractions(mat, params): """ - @author: Saransh Singh, Lawrence Livermore National Lab - @date: 04/01/2021 SS 1.0 original - @details: ass phase fraction to params class - given a list/dict/single instance of material class + @author: Saransh Singh, Lawrence Livermore National Lab + @date: 04/01/2021 SS 1.0 original + @details: ass phase fraction to params class + given a list/dict/single instance of material class """ if isinstance(mat, Phases_Rietveld): """ phase file """ pf = mat.phase_fraction - for ii,p in enumerate(mat): - name=f"{p}_phase_fraction" + for ii, p in enumerate(mat): + name = f"{p}_phase_fraction" if isinstance(params, Parameters): - params.add( - name=name, value=pf[ii], - lb=0.0, ub=1.0, - vary=False) + params.add(name=name, value=pf[ii], lb=0.0, ub=1.0, vary=False) elif isinstance(params, Parameters_lmfit): params.add( - name=name, value=pf[ii], - min=0.0, max=1.0, - vary=False) + name=name, value=pf[ii], min=0.0, max=1.0, vary=False + ) elif isinstance(mat, Material): """ just an instance of Materials class this part initializes the lattice parameters in the """ p = mat.name - name=f"{p}_phase_fraction" + name = f"{p}_phase_fraction" if isinstance(params, Parameters): - params.add( - name=name, value=1.0, - lb=0.0, ub=1.0, - vary=False) + params.add(name=name, value=1.0, lb=0.0, ub=1.0, vary=False) elif isinstance(params, Parameters_lmfit): - params.add( - name=name, value=1.0, - min=0.0, max=1.0, - vary=False) + params.add(name=name, value=1.0, min=0.0, max=1.0, vary=False) elif isinstance(mat, list): """ a list of materials class """ - pf = [1./len(mat)]*len(mat) - for ii,m in enumerate(mat): + pf = [1.0 / len(mat)] * len(mat) + for ii, m in enumerate(mat): p = m.name - name=f"{p}_phase_fraction" + name = f"{p}_phase_fraction" if isinstance(params, Parameters): - params.add( - name=name, value=pf[ii], - lb=0.0, ub=1.0, - vary=False) + params.add(name=name, value=pf[ii], lb=0.0, ub=1.0, vary=False) elif isinstance(params, Parameters_lmfit): params.add( - name=name, value=pf[ii], - min=0.0, max=1.0, - vary=False) + name=name, value=pf[ii], min=0.0, max=1.0, vary=False + ) elif isinstance(mat, dict): """ dictionary of materials class """ - pf = [1./len(mat)]*len(mat) - for ii, (k,m) in enumerate(mat.items()): + pf = [1.0 / len(mat)] * len(mat) + for ii, (k, m) in enumerate(mat.items()): p = m.name - name=f"{p}_phase_fraction" + name = f"{p}_phase_fraction" if isinstance(params, Parameters): - params.add( - name=name, value=pf[ii], - lb=0.0, ub=1.0, - vary=False) + params.add(name=name, value=pf[ii], lb=0.0, ub=1.0, vary=False) elif isinstance(params, Parameters_lmfit): params.add( - name=name, value=pf[ii], - min=0.0, max=1.0, - vary=False) + name=name, value=pf[ii], min=0.0, max=1.0, vary=False + ) else: - msg = (f"_generate_default_parameters: " - f"incorrect argument. only list, dict or " - f"Material is accpeted.") + msg = ( + f"_generate_default_parameters: " + f"incorrect argument. only list, dict or " + f"Material is accpeted." + ) raise ValueError(msg) + def _add_extinction_parameters(mat, params): return params + def _add_absorption_parameters(mat, params): return params -def _generate_default_parameters_Rietveld(mat, - peakshape, - bkgmethod, - init_val=None, - ptype="wppf"): + +def _generate_default_parameters_Rietveld( + mat, peakshape, bkgmethod, init_val=None, ptype="wppf" +): """ @author: Saransh Singh, Lawrence Livermore National Lab @date: 03/12/2021 SS 1.0 original @details: generate a default parameter class given a list/dict/ single instance of material class """ - params = _generate_default_parameters_LeBail(mat, - peakshape, - bkgmethod, - init_val, - ptype=ptype) + params = _generate_default_parameters_LeBail( + mat, peakshape, bkgmethod, init_val, ptype=ptype + ) if ptype == "wppf": - params.add(name="scale", - value=1.0, - lb=0.0, - ub=np.inf, - vary=False) - - params.add(name="Ph", - value=1.0, - lb=0.0, - ub=1.0, - vary=False) + params.add(name="scale", value=1.0, lb=0.0, ub=np.inf, vary=False) + + params.add(name="Ph", value=1.0, lb=0.0, ub=1.0, vary=False) elif ptype == "lmfit": - params.add(name="scale", - value=1.0, - min=0.0, - max=np.inf, - vary=False) - - params.add(name="Ph", - value=1.0, - min=0.0, - max=1.0, - vary=False) + params.add(name="scale", value=1.0, min=0.0, max=np.inf, vary=False) + + params.add(name="Ph", value=1.0, min=0.0, max=1.0, vary=False) _add_phase_fractions(mat, params) _add_extinction_parameters(mat, params) @@ -635,29 +512,35 @@ def _generate_default_parameters_Rietveld(mat, _add_atominfo_to_params(params, m) else: - msg = (f"_generate_default_parameters: " - f"incorrect argument. only list, dict or " - f"Material is accpeted.") + msg = ( + f"_generate_default_parameters: " + f"incorrect argument. only list, dict or " + f"Material is accpeted." + ) raise ValueError(msg) return params + +# fmt: off _shkl_name = ["s400", "s040", "s004", "s220", "s202", "s022", "s310", "s103", "s031", "s130", "s301", "s013", "s211", "s121", "s112"] _lpname = ['a', 'b', 'c', 'alpha', 'beta', 'gamma'] _nameU = ['U11', 'U22', 'U33', 'U12', 'U13', 'U23'] - +# fmt: on """ function to take care of equality constraints """ + + def _fill_shkl(x, eq_const): """ fill all values of shkl when only reduced set is passed """ - x_ret = np.zeros([15,]) - for ii,n in enumerate(_shkl_name): + x_ret = np.zeros([15]) + for ii, n in enumerate(_shkl_name): if n in x: x_ret[ii] = x[n] else: @@ -666,15 +549,16 @@ def _fill_shkl(x, eq_const): pass else: for c in eq_const: - x_ret[c[1]] = c[2]*x_ret[c[0]] + x_ret[c[1]] = c[2] * x_ret[c[0]] return x_ret + def _required_shkl_names(mat): latticetype = mat.latticeType sgnum = mat.sgnum mname = mat.name - hmsym = pstr_spacegroup[sgnum-1].strip() + hmsym = pstr_spacegroup[sgnum - 1].strip() trig_ptype = False if latticetype == "trigonal" and hmsym[0] == "P": @@ -691,6 +575,7 @@ def _required_shkl_names(mat): return valid_shkl, eq_constraints, rqd_index, trig_ptype + def _add_texture_coefficients(crystal_sym, sample_sym, name, degree): """ add the texture coefficients for a particular phase @@ -708,6 +593,7 @@ def _add_texture_coefficients(crystal_sym, sample_sym, name, degree): """ pass + def _add_texture_parameters(mat, degree): """ @SS 06/22/2021 1.0 original @@ -749,11 +635,14 @@ def _add_texture_parameters(mat, degree): _add_atominfo_to_params(params, m) else: - msg = (f"_generate_default_parameters: " - f"incorrect argument. only list, dict or " - f"Material is accpeted.") + msg = ( + f"_generate_default_parameters: " + f"incorrect argument. only list, dict or " + f"Material is accpeted." + ) raise ValueError(msg) + """ this dictionary structure holds information for the shkl coefficeints needed for anisotropic broadening of peaks @@ -762,30 +651,49 @@ def _add_texture_parameters(mat, degree): a weight factor (sometimes theres a factor of 2 or 3.) """ _rqd_shkl = { -"cubic": [(0, 3), - ((0,1,1.),(0,2,1.),(3,4,1.),(3,5,1.))], -"hexagonal": [(0, 2, 4), -((0,1,1.),(0,6,2.),(0,9,2.),(0,3,3.), -(4,5,1.),(4,14,1.))], -"trigonal": [(0, 2, 4, 10), -((0,1,1.),(0,6,2.),(0,9,2.),(0,3,3.), -(4,5,1.),(4,14,1.), -(10,8,-1.),(10,12,1.5),(10,13,-1.5))], -"tetragonal": [(0, 2, 3, 4),((0,1,1.),(4,5,1.))], -"orthorhombic": [tuple(range(6)),()], -"monoclinic": [tuple(range(6))+(7, 10, 13),()], -"triclinic": [tuple(range(15)),()] + "cubic": [(0, 3), ((0, 1, 1.0), (0, 2, 1.0), (3, 4, 1.0), (3, 5, 1.0))], + "hexagonal": [ + (0, 2, 4), + ( + (0, 1, 1.0), + (0, 6, 2.0), + (0, 9, 2.0), + (0, 3, 3.0), + (4, 5, 1.0), + (4, 14, 1.0), + ), + ], + "trigonal": [ + (0, 2, 4, 10), + ( + (0, 1, 1.0), + (0, 6, 2.0), + (0, 9, 2.0), + (0, 3, 3.0), + (4, 5, 1.0), + (4, 14, 1.0), + (10, 8, -1.0), + (10, 12, 1.5), + (10, 13, -1.5), + ), + ], + "tetragonal": [(0, 2, 3, 4), ((0, 1, 1.0), (4, 5, 1.0))], + "orthorhombic": [tuple(range(6)), ()], + "monoclinic": [tuple(range(6)) + (7, 10, 13), ()], + "triclinic": [tuple(range(15)), ()], } + def _getnumber(arr): res = np.ones(arr.shape) for i in range(arr.shape[0]): - res[i] = np.sum(arr[0:i+1] == arr[i]) + res[i] = np.sum(arr[0 : i + 1] == arr[i]) res = res.astype(np.int32) return res + def _add_detector_geometry(params, instr): """ this function adds the geometry of the @@ -793,34 +701,49 @@ def _add_detector_geometry(params, instr): such that those can be refined as well """ if isinstance(instr, hexrd.core.instrument.HEDMInstrument): - for key,det in instr.detectors.items(): + for key, det in instr.detectors.items(): tvec = det.tvec tilt = det.tilt pnametvec = [f"{key}_tvec{i}" for i in range(3)] pnametilt = [f"{key}_tilt{i}" for i in range(3)] if isinstance(params, Parameters): - [params.add(name=pnametvec[i],value=tvec[i]) for i in range(3)] - [params.add(name=pnametilt[i],value=tilt[i]) for i in range(3)] + [ + params.add(name=pnametvec[i], value=tvec[i]) + for i in range(3) + ] + [ + params.add(name=pnametilt[i], value=tilt[i]) + for i in range(3) + ] elif isinstance(params, Parameters_lmfit): - [params.add(name=pnametvec[i],value=tvec[i]) for i in range(3)] - [params.add(name=pnametilt[i],value=tilt[i]) for i in range(3)] + [ + params.add(name=pnametvec[i], value=tvec[i]) + for i in range(3) + ] + [ + params.add(name=pnametilt[i], value=tilt[i]) + for i in range(3) + ] else: msg = "input is not an HEDMInstrument class" raise ValueError(msg) -def _add_background(params,lineouts,bkgdegree): + +def _add_background(params, lineouts, bkgdegree): for k in lineouts: pname = [f"{k}_bkg_C{ii}" for ii in range(bkgdegree)] shape = len(pname) if isinstance(params, Parameters): - [params.add(name=pname[i],value=0.0) for i in range(shape)] + [params.add(name=pname[i], value=0.0) for i in range(shape)] elif isinstance(params, Parameters_lmfit): - [params.add(name=pname[i],value=0.0) for i in range(shape)] + [params.add(name=pname[i], value=0.0) for i in range(shape)] + def striphkl(g): - return str(g)[1:-1].replace(" ","") + return str(g)[1:-1].replace(" ", "") + -def _add_intensity_parameters(params,hkls,Icalc,prefix): +def _add_intensity_parameters(params, hkls, Icalc, prefix): """ this routine adds the Icalc values as refinable parameters in the params parameter class @@ -829,19 +752,24 @@ def _add_intensity_parameters(params,hkls,Icalc,prefix): for k in Icalc[p]: shape = Icalc[p][k].shape[0] - pname = [f"{prefix}_{p}_{k}_I{striphkl(g)}" - for i,g in zip(range(shape),hkls[p][k])] + pname = [ + f"{prefix}_{p}_{k}_I{striphkl(g)}" + for i, g in zip(range(shape), hkls[p][k]) + ] if isinstance(params, Parameters): - [params.add(name=pname[i], - value=Icalc[p][k][i], - lb=0.0) for i in range(shape)] + [ + params.add(name=pname[i], value=Icalc[p][k][i], lb=0.0) + for i in range(shape) + ] elif isinstance(params, Parameters_lmfit): - [params.add(name=pname[i], - value=Icalc[p][k][i], - min=0.0) for i in range(shape)] + [ + params.add(name=pname[i], value=Icalc[p][k][i], min=0.0) + for i in range(shape) + ] + + background_methods = { 'spline': None, - 'chebyshev': [ { 'label': 'Chebyshev Polynomial Degree', @@ -849,26 +777,25 @@ def _add_intensity_parameters(params,hkls,Icalc,prefix): 'min': 0, 'max': 99, 'value': 3, - 'tooltip': 'The polynomial degree used ' - 'for polynomial fit.', + 'tooltip': 'The polynomial degree used ' 'for polynomial fit.', } ], 'snip1d': [ { 'label': 'Snip Width', 'type': float, - 'min': 0., + 'min': 0.0, 'value': 1.0, 'tooltip': 'Maximum width of peak to retain for ' - 'background estimation (in degrees).' + 'background estimation (in degrees).', }, { 'label': 'Snip Num Iterations', 'type': int, 'min': 1, 'max': 99, - 'value':2, - 'tooltip': 'number of snip iterations.' - } + 'value': 2, + 'tooltip': 'number of snip iterations.', + }, ], } diff --git a/hexrd/powder/wppf/xtal.py b/hexrd/powder/wppf/xtal.py index 65b35b642..3a7f140b6 100644 --- a/hexrd/powder/wppf/xtal.py +++ b/hexrd/powder/wppf/xtal.py @@ -69,25 +69,25 @@ def _calcxrayformfactor( f_anomalous_data, f_anomalous_data_sizes, ): - """we are using the following form factors for - x-aray scattering: - 1. coherent x-ray scattering, f0 tabulated in - Acta Cryst. (1995). A51,416-431 - 2. Anomalous x-ray scattering (complex (f'+if")) - tabulated in J. Phys. Chem. Ref. Data, 24, 71 (1995) - and J. Phys. Chem. Ref. Data, 29, 597 (2000). - 3. Thompson nuclear scattering, fNT tabulated in - Phys. Lett. B, 69, 281 (1977). - - the anomalous scattering is a complex number (f' + if"), - where the two terms are given by: - f' = f1 + frel - Z - f" = f2 - - f1 and f2 have been tabulated as a function of energy in - Anomalous.h5 in hexrd folder - - overall f = (f0 + f' + if" +fNT) + """we are using the following form factors for + x-aray scattering: + 1. coherent x-ray scattering, f0 tabulated in + Acta Cryst. (1995). A51,416-431 + 2. Anomalous x-ray scattering (complex (f'+if")) + tabulated in J. Phys. Chem. Ref. Data, 24, 71 (1995) + and J. Phys. Chem. Ref. Data, 29, 597 (2000). + 3. Thompson nuclear scattering, fNT tabulated in + Phys. Lett. B, 69, 281 (1977). + + the anomalous scattering is a complex number (f' + if"), + where the two terms are given by: + f' = f1 + frel - Z + f" = f2 + + f1 and f2 have been tabulated as a function of energy in + Anomalous.h5 in hexrd folder + + overall f = (f0 + f' + if" +fNT) """ f_anomalous = _calcanomalousformfactor( diff --git a/scripts/install/install_build_dependencies.py b/scripts/install/install_build_dependencies.py index abc5299f0..3da27d7f3 100755 --- a/scripts/install/install_build_dependencies.py +++ b/scripts/install/install_build_dependencies.py @@ -49,8 +49,10 @@ def download_xtensor(path): shutil.rmtree(target_path) os.makedirs(path, exist_ok=True) - shutil.move(str(Path(temp_dir) / out_dir_name / 'include/xtensor'), - str(Path(path) / 'xtensor/xtensor')) + shutil.move( + str(Path(temp_dir) / out_dir_name / 'include/xtensor'), + str(Path(path) / 'xtensor/xtensor'), + ) return str(target_path) @@ -70,7 +72,8 @@ def download_xtensor_python(path): os.makedirs(path, exist_ok=True) shutil.move( str(Path(temp_dir) / out_dir_name / 'include/xtensor-python'), - str(Path(path) / 'xtensor-python/xtensor-python')) + str(Path(path) / 'xtensor-python/xtensor-python'), + ) return str(target_path) @@ -88,8 +91,10 @@ def download_xtl(path): shutil.rmtree(target_path) os.makedirs(path, exist_ok=True) - shutil.move(str(Path(temp_dir) / out_dir_name / 'include/xtl'), - str(Path(path) / 'xtl/xtl')) + shutil.move( + str(Path(temp_dir) / out_dir_name / 'include/xtl'), + str(Path(path) / 'xtl/xtl'), + ) return str(target_path) @@ -107,8 +112,10 @@ def download_xsimd(path): shutil.rmtree(target_path) os.makedirs(path, exist_ok=True) - shutil.move(str(Path(temp_dir) / out_dir_name / 'include/xsimd'), - str(Path(path) / 'xsimd/xsimd')) + shutil.move( + str(Path(temp_dir) / out_dir_name / 'include/xsimd'), + str(Path(path) / 'xsimd/xsimd'), + ) return str(target_path) @@ -142,8 +149,10 @@ def download_pybind11(path): shutil.rmtree(target_path) os.makedirs(path, exist_ok=True) - shutil.move(str(Path(temp_dir) / out_dir_name / 'include/pybind11'), - str(Path(path) / 'pybind11/pybind11')) + shutil.move( + str(Path(temp_dir) / out_dir_name / 'include/pybind11'), + str(Path(path) / 'pybind11/pybind11'), + ) return str(target_path) diff --git a/setup.py b/setup.py index 96722c297..84d3c246c 100644 --- a/setup.py +++ b/setup.py @@ -138,7 +138,7 @@ def get_cpp_extensions(): ] transforms_ext = Extension( - name='hexrd.extensions.transforms', + name='hexrd.core.extensions.transforms', sources=[str(cpp_transform_pkgdir / 'src/transforms.cpp')], extra_compile_args=extra_compile_args, include_dirs=include_dirs, diff --git a/tests/calibration/test_2xrs_calibration.py b/tests/calibration/test_2xrs_calibration.py index 4c5f66cd0..4e93b5581 100644 --- a/tests/calibration/test_2xrs_calibration.py +++ b/tests/calibration/test_2xrs_calibration.py @@ -7,7 +7,10 @@ from hexrd.core.material.material import load_materials_hdf5 from hexrd.hedm.instrument.hedm_instrument import HEDMInstrument -from hexrd.core.fitting.calibration import InstrumentCalibrator, PowderCalibrator +from hexrd.core.fitting.calibration import ( + InstrumentCalibrator, + PowderCalibrator, +) @pytest.fixture @@ -25,7 +28,8 @@ def test_2xrs_calibration(tardis_2xrs_examples_dir, test_data_dir): # Load the picks with open( - tardis_2xrs_examples_dir / 'tardis_2xrs_example.yml', 'r', + tardis_2xrs_examples_dir / 'tardis_2xrs_example.yml', + 'r', encoding='utf-8', ) as rf: conf = yaml.safe_load(rf) diff --git a/tests/calibration/test_calibration.py b/tests/calibration/test_calibration.py index b204fb639..cfaf31549 100644 --- a/tests/calibration/test_calibration.py +++ b/tests/calibration/test_calibration.py @@ -9,7 +9,11 @@ from hexrd.core.material.material import load_materials_hdf5 from hexrd.hedm.instrument.hedm_instrument import HEDMInstrument -from hexrd.core.fitting.calibration import InstrumentCalibrator, LaueCalibrator, PowderCalibrator +from hexrd.core.fitting.calibration import ( + InstrumentCalibrator, + LaueCalibrator, + PowderCalibrator, +) @pytest.fixture @@ -109,8 +113,12 @@ def test_calibration(calibration_dir, test_data_dir): ] tvecs = { - 'old': [np.array([x0[k] for k in vec_names]) for vec_names in tvec_names], - 'new': [np.array([x1[k] for k in vec_names]) for vec_names in tvec_names], + 'old': [ + np.array([x0[k] for k in vec_names]) for vec_names in tvec_names + ], + 'new': [ + np.array([x1[k] for k in vec_names]) for vec_names in tvec_names + ], } grain_param_names = [f'LiF_grain_param_{n}' for n in range(12)] @@ -133,9 +141,7 @@ def test_calibration(calibration_dir, test_data_dir): ) -def assert_errors_are_better( - tvecs, grain_params, diamond_a_vals, expected -): +def assert_errors_are_better(tvecs, grain_params, diamond_a_vals, expected): """ Make sure error has decreased during fitting """ diff --git a/tests/calibration/test_group_relative_constraints.py b/tests/calibration/test_group_relative_constraints.py index d6ed60a51..a238ca228 100644 --- a/tests/calibration/test_group_relative_constraints.py +++ b/tests/calibration/test_group_relative_constraints.py @@ -49,9 +49,7 @@ def ceria_example_data(ceria_examples_path: Path) -> dict[str, np.ndarray]: @pytest.fixture def dexelas_composite_instrument(ceria_examples_path: Path) -> HEDMInstrument: - instr_path = ( - ceria_examples_path / 'dexelas.yml' - ) + instr_path = ceria_examples_path / 'dexelas.yml' with open(instr_path, 'r') as rf: config = yaml.safe_load(rf) diff --git a/tests/calibration/test_hedm_calibration.py b/tests/calibration/test_hedm_calibration.py index 1d14f13f4..b14bc0852 100644 --- a/tests/calibration/test_hedm_calibration.py +++ b/tests/calibration/test_hedm_calibration.py @@ -41,10 +41,12 @@ def pull_spots_picks(calibration_dir): path = calibration_dir picks = [] for i in range(3): - picks.append({ - 'pick_xys': np.load(path / f'grain{i + 1}_picks.npz'), - 'hkls': np.load(path / f'grain{i + 1}_pick_hkls.npz'), - }) + picks.append( + { + 'pick_xys': np.load(path / f'grain{i + 1}_picks.npz'), + 'hkls': np.load(path / f'grain{i + 1}_pick_hkls.npz'), + } + ) return picks @@ -53,8 +55,9 @@ def grain_params(calibration_dir): return np.load(calibration_dir / 'grain_params.npy') -def test_calibration(dexelas_instrument, ruby_material, pull_spots_picks, - grain_params): +def test_calibration( + dexelas_instrument, ruby_material, pull_spots_picks, grain_params +): instr = dexelas_instrument diff --git a/tests/calibration/test_instrument_relative_constraints.py b/tests/calibration/test_instrument_relative_constraints.py index c3a7beb58..c4affad38 100644 --- a/tests/calibration/test_instrument_relative_constraints.py +++ b/tests/calibration/test_instrument_relative_constraints.py @@ -6,8 +6,13 @@ import pytest from hexrd.core import imageseries -from hexrd.core.fitting.calibration import InstrumentCalibrator, PowderCalibrator -from hexrd.core.fitting.calibration.relative_constraints import RelativeConstraintsType +from hexrd.core.fitting.calibration import ( + InstrumentCalibrator, + PowderCalibrator, +) +from hexrd.core.fitting.calibration.relative_constraints import ( + RelativeConstraintsType, +) from hexrd.core.imageseries.process import ProcessedImageSeries from hexrd.core.instrument import HEDMInstrument from hexrd.core.material import load_materials_hdf5, Material diff --git a/tests/calibration/test_powder_auto_pick.py b/tests/calibration/test_powder_auto_pick.py index 64058c4a6..b2ff063f9 100644 --- a/tests/calibration/test_powder_auto_pick.py +++ b/tests/calibration/test_powder_auto_pick.py @@ -24,9 +24,7 @@ def ceria_examples_path(eiger_examples_path: Path) -> Path: @pytest.fixture def eiger_instrument(ceria_examples_path: Path) -> HEDMInstrument: - instr_path = ( - ceria_examples_path / 'eiger_ceria_calibrated_composite.hexrd' - ) + instr_path = ceria_examples_path / 'eiger_ceria_calibrated_composite.hexrd' with h5py.File(instr_path, 'r') as rf: return HEDMInstrument(rf) @@ -92,7 +90,7 @@ def hkl_idx(hkl: tuple | list) -> int | None: ) calibrator.autopick_points( - fit_tth_tol=1., + fit_tth_tol=1.0, int_cutoff=1e-4, ) diff --git a/tests/config/common.py b/tests/config/common.py index cd1644fa4..1431c3fdd 100644 --- a/tests/config/common.py +++ b/tests/config/common.py @@ -15,7 +15,7 @@ 'file_stem': 'test_%%05d.dat', 'tempdir': tempfile.gettempdir(), 'pathsep': os.path.sep, - } +} class TestConfig(unittest.TestCase): @@ -37,10 +37,8 @@ def tearDownClass(cls): def setUp(self): self.cfgs = config.open(self.file_name) - def tearDown(self): - del(self.cfgs) - + del self.cfgs @classmethod def get_reference_data(cls): diff --git a/tests/config/test_find_orientations.py b/tests/config/test_find_orientations.py index adf0cfaa1..a6c1b4aab 100644 --- a/tests/config/test_find_orientations.py +++ b/tests/config/test_find_orientations.py @@ -6,8 +6,8 @@ from .common import TestConfig, test_data -reference_data = \ -""" +reference_data = ( + """ analysis_name: analysis working_dir: %(tempdir)s material: @@ -63,189 +63,136 @@ find_orientations: orientation_maps: file: null -""" % test_data - +""" + % test_data +) class TestFindOrientationsConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_gvecs(self): self.assertFalse( self.cfgs[0].find_orientations.extract_measured_g_vectors - ) + ) self.assertTrue( self.cfgs[1].find_orientations.extract_measured_g_vectors - ) + ) self.assertTrue( self.cfgs[2].find_orientations.extract_measured_g_vectors - ) - + ) def test_threshold(self): - self.assertEqual( - self.cfgs[0].find_orientations.threshold, - 1 - ) - self.assertEqual( - self.cfgs[1].find_orientations.threshold, - 5 - ) - self.assertEqual( - self.cfgs[2].find_orientations.threshold, - 5 - ) - + self.assertEqual(self.cfgs[0].find_orientations.threshold, 1) + self.assertEqual(self.cfgs[1].find_orientations.threshold, 5) + self.assertEqual(self.cfgs[2].find_orientations.threshold, 5) def test_use_quaternion_grid(self): self.assertEqual( - self.cfgs[0].find_orientations.use_quaternion_grid, - None - ) + self.cfgs[0].find_orientations.use_quaternion_grid, None + ) self.assertRaises( IOError, - getattr, self.cfgs[1].find_orientations, 'use_quaternion_grid' - ) + getattr, + self.cfgs[1].find_orientations, + 'use_quaternion_grid', + ) self.assertEqual( self.cfgs[2].find_orientations.use_quaternion_grid, - test_data['existing_file'] - ) - + test_data['existing_file'], + ) class TestClusteringConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_algorithm(self): self.assertEqual( - self.cfgs[0].find_orientations.clustering.algorithm, - 'dbscan' - ) + self.cfgs[0].find_orientations.clustering.algorithm, 'dbscan' + ) self.assertEqual( - self.cfgs[1].find_orientations.clustering.algorithm, - 'sph-dbscan' - ) + self.cfgs[1].find_orientations.clustering.algorithm, 'sph-dbscan' + ) self.assertEqual( - self.cfgs[2].find_orientations.clustering.algorithm, - 'fclusterdata' - ) + self.cfgs[2].find_orientations.clustering.algorithm, 'fclusterdata' + ) self.assertRaises( RuntimeError, - getattr, self.cfgs[3].find_orientations.clustering, 'algorithm', - ) - + getattr, + self.cfgs[3].find_orientations.clustering, + 'algorithm', + ) def test_completeness(self): self.assertRaises( RuntimeError, - getattr, self.cfgs[0].find_orientations.clustering, 'completeness', - ) + getattr, + self.cfgs[0].find_orientations.clustering, + 'completeness', + ) self.assertEqual( - self.cfgs[1].find_orientations.clustering.completeness, - 0.35 - ) - + self.cfgs[1].find_orientations.clustering.completeness, 0.35 + ) def test_radius(self): self.assertRaises( RuntimeError, - getattr, self.cfgs[0].find_orientations.clustering, 'radius', - ) - self.assertEqual( - self.cfgs[1].find_orientations.clustering.radius, - 1.0 - ) - - + getattr, + self.cfgs[0].find_orientations.clustering, + 'radius', + ) + self.assertEqual(self.cfgs[1].find_orientations.clustering.radius, 1.0) class TestOmegaConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_period(self): self.assertEqual( - self.cfgs[0].find_orientations.omega.period, - [-180, 180] - ) - self.assertEqual( - self.cfgs[1].find_orientations.omega.period, - [0, 360] - ) + self.cfgs[0].find_orientations.omega.period, [-180, 180] + ) + self.assertEqual(self.cfgs[1].find_orientations.omega.period, [0, 360]) ## Do we allow ranges going backwards? - #self.assertEqual( + # self.assertEqual( # self.cfgs[2].find_orientations.omega.period, # [0, -360] # ) self.assertRaises( RuntimeError, - getattr, self.cfgs[3].find_orientations.omega, 'period' - ) - + getattr, + self.cfgs[3].find_orientations.omega, + 'period', + ) def test_tolerance(self): - self.assertEqual( - self.cfgs[0].find_orientations.omega.tolerance, - 0.5 - ) - self.assertEqual( - self.cfgs[1].find_orientations.omega.tolerance, - 1.0 - ) - self.assertEqual( - self.cfgs[2].find_orientations.omega.tolerance, - 3.0 - ) - + self.assertEqual(self.cfgs[0].find_orientations.omega.tolerance, 0.5) + self.assertEqual(self.cfgs[1].find_orientations.omega.tolerance, 1.0) + self.assertEqual(self.cfgs[2].find_orientations.omega.tolerance, 3.0) class TestEtaConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_tolerance(self): - self.assertEqual( - self.cfgs[0].find_orientations.eta.tolerance, - 0.5 - ) - self.assertEqual( - self.cfgs[1].find_orientations.eta.tolerance, - 2.0 - ) - + self.assertEqual(self.cfgs[0].find_orientations.eta.tolerance, 0.5) + self.assertEqual(self.cfgs[1].find_orientations.eta.tolerance, 2.0) def test_mask(self): - self.assertEqual( - self.cfgs[0].find_orientations.eta.mask, - 5 - ) - self.assertEqual( - self.cfgs[1].find_orientations.eta.mask, - 10 - ) - self.assertEqual( - self.cfgs[2].find_orientations.eta.mask, - 10 - ) - + self.assertEqual(self.cfgs[0].find_orientations.eta.mask, 5) + self.assertEqual(self.cfgs[1].find_orientations.eta.mask, 10) + self.assertEqual(self.cfgs[2].find_orientations.eta.mask, 10) def test_range(self): @@ -258,100 +205,83 @@ def test_range(self): class TestSeedSearchConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_hkl_seeds(self): self.assertRaises( RuntimeError, - getattr, self.cfgs[0].find_orientations.seed_search, 'hkl_seeds' - ) + getattr, + self.cfgs[0].find_orientations.seed_search, + 'hkl_seeds', + ) self.assertEqual( - self.cfgs[2].find_orientations.seed_search.hkl_seeds, - [1] - ) + self.cfgs[2].find_orientations.seed_search.hkl_seeds, [1] + ) self.assertEqual( - self.cfgs[3].find_orientations.seed_search.hkl_seeds, - [1, 2] - ) - + self.cfgs[3].find_orientations.seed_search.hkl_seeds, [1, 2] + ) def test_fiber_step(self): self.assertEqual( - self.cfgs[0].find_orientations.seed_search.fiber_step, - 0.5 - ) + self.cfgs[0].find_orientations.seed_search.fiber_step, 0.5 + ) self.assertEqual( - self.cfgs[1].find_orientations.seed_search.fiber_step, - 1.0 - ) + self.cfgs[1].find_orientations.seed_search.fiber_step, 1.0 + ) self.assertEqual( - self.cfgs[2].find_orientations.seed_search.fiber_step, - 2.0 - ) - + self.cfgs[2].find_orientations.seed_search.fiber_step, 2.0 + ) class TestOrientationMapsConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_active_hkls(self): self.assertEqual( - self.cfgs[0].find_orientations.orientation_maps.active_hkls, - None - ) + self.cfgs[0].find_orientations.orientation_maps.active_hkls, None + ) self.assertEqual( - self.cfgs[1].find_orientations.orientation_maps.active_hkls, - [1] - ) + self.cfgs[1].find_orientations.orientation_maps.active_hkls, [1] + ) self.assertEqual( - self.cfgs[2].find_orientations.orientation_maps.active_hkls, - [1, 2] - ) - + self.cfgs[2].find_orientations.orientation_maps.active_hkls, [1, 2] + ) def test_bin_frames(self): self.assertEqual( - self.cfgs[0].find_orientations.orientation_maps.bin_frames, - 1 - ) + self.cfgs[0].find_orientations.orientation_maps.bin_frames, 1 + ) self.assertEqual( - self.cfgs[1].find_orientations.orientation_maps.bin_frames, - 2 - ) - + self.cfgs[1].find_orientations.orientation_maps.bin_frames, 2 + ) def test_file(self): self.assertEqual( self.cfgs[0].find_orientations.orientation_maps.file, - Path(test_data['tempdir']) / "analysis_actmat_eta-ome_maps.npz" + Path(test_data['tempdir']) / "analysis_actmat_eta-ome_maps.npz", ) self.assertEqual( self.cfgs[1].find_orientations.orientation_maps.file, - Path(test_data['tempdir']) / test_data['nonexistent_file'] - ) + Path(test_data['tempdir']) / test_data['nonexistent_file'], + ) self.assertEqual( str(self.cfgs[2].find_orientations.orientation_maps.file), - test_data['existing_file'] - ) + test_data['existing_file'], + ) def test_threshold(self): self.assertRaises( RuntimeError, getattr, self.cfgs[0].find_orientations.orientation_maps, - 'threshold' - ) + 'threshold', + ) self.assertEqual( - self.cfgs[1].find_orientations.orientation_maps.threshold, - 100 - ) + self.cfgs[1].find_orientations.orientation_maps.threshold, 100 + ) diff --git a/tests/config/test_fit_grains.py b/tests/config/test_fit_grains.py index 4d95a3cbf..ae3394c0b 100644 --- a/tests/config/test_fit_grains.py +++ b/tests/config/test_fit_grains.py @@ -3,8 +3,8 @@ from .common import TestConfig, test_data -reference_data = \ -""" +reference_data = ( + """ analysis_name: analysis --- fit_grains: @@ -48,12 +48,13 @@ tthmax: 1.2 sfacmax: 1.3 pintmax: 1.4 -""" % test_data +""" + % test_data +) class TestFitGrainsConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data @@ -65,14 +66,10 @@ def test_do_fit(self): def test_estimate(self): self.assertEqual(self.cfgs[0].fit_grains.estimate, None) # nonexistent file needs to return None + self.assertEqual(self.cfgs[1].fit_grains.estimate, None) self.assertEqual( - self.cfgs[1].fit_grains.estimate, - None - ) - self.assertEqual( - self.cfgs[2].fit_grains.estimate, - test_data['existing_file'] - ) + self.cfgs[2].fit_grains.estimate, test_data['existing_file'] + ) def test_npdiv(self): self.assertEqual(self.cfgs[0].fit_grains.npdiv, 2) @@ -80,9 +77,8 @@ def test_npdiv(self): def test_threshold(self): self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].fit_grains, 'threshold' - ) + RuntimeError, getattr, self.cfgs[0].fit_grains, 'threshold' + ) self.assertEqual(self.cfgs[1].fit_grains.threshold, 1850) def test_tth_max(self): @@ -90,9 +86,8 @@ def test_tth_max(self): self.assertFalse(self.cfgs[1].fit_grains.tth_max) self.assertEqual(self.cfgs[2].fit_grains.tth_max, 15) self.assertRaises( - RuntimeError, - getattr, self.cfgs[3].fit_grains, 'tth_max' - ) + RuntimeError, getattr, self.cfgs[3].fit_grains, 'tth_max' + ) class TestToleranceConfig(TestConfig): @@ -103,45 +98,24 @@ def get_reference_data(cls): def test_eta(self): self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].fit_grains.tolerance, 'eta' - ) - self.assertEqual( - self.cfgs[1].fit_grains.tolerance.eta, - [1, 1] - ) - self.assertEqual( - self.cfgs[2].fit_grains.tolerance.eta, - [1, 2] - ) + RuntimeError, getattr, self.cfgs[0].fit_grains.tolerance, 'eta' + ) + self.assertEqual(self.cfgs[1].fit_grains.tolerance.eta, [1, 1]) + self.assertEqual(self.cfgs[2].fit_grains.tolerance.eta, [1, 2]) def test_omega(self): self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].fit_grains.tolerance, 'omega' - ) - self.assertEqual( - self.cfgs[1].fit_grains.tolerance.omega, - [2, 2] - ) - self.assertEqual( - self.cfgs[2].fit_grains.tolerance.omega, - [3, 4] - ) + RuntimeError, getattr, self.cfgs[0].fit_grains.tolerance, 'omega' + ) + self.assertEqual(self.cfgs[1].fit_grains.tolerance.omega, [2, 2]) + self.assertEqual(self.cfgs[2].fit_grains.tolerance.omega, [3, 4]) def test_tth(self): self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].fit_grains.tolerance, 'tth' - ) - self.assertEqual( - self.cfgs[1].fit_grains.tolerance.tth, - [3, 3] - ) - self.assertEqual( - self.cfgs[2].fit_grains.tolerance.tth, - [5, 6] - ) + RuntimeError, getattr, self.cfgs[0].fit_grains.tolerance, 'tth' + ) + self.assertEqual(self.cfgs[1].fit_grains.tolerance.tth, [3, 3]) + self.assertEqual(self.cfgs[2].fit_grains.tolerance.tth, [5, 6]) class TestExclusions(TestConfig): diff --git a/tests/config/test_image_series.py b/tests/config/test_image_series.py index ece0e333b..5a9d7c683 100644 --- a/tests/config/test_image_series.py +++ b/tests/config/test_image_series.py @@ -3,8 +3,8 @@ from .common import TestConfig, test_data -reference_data = \ -""" +reference_data = ( + """ image_series: format: array data: @@ -12,23 +12,20 @@ args: a1 - filename: f2 args: a2 -""" % test_data +""" + % test_data +) class TestImageSeries(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_format(self): - self.assertEqual( - 'array', - self.cfgs[0].get('image_series:format') - ) + self.assertEqual('array', self.cfgs[0].get('image_series:format')) def test_data(self): diff --git a/tests/config/test_instrument.py b/tests/config/test_instrument.py index ace5661d0..3afcf906c 100644 --- a/tests/config/test_instrument.py +++ b/tests/config/test_instrument.py @@ -2,20 +2,25 @@ import hexrd.core.instrument from .common import TestConfig, test_data + try: - from hexrd.hedm.config.instrument import Instrument, Beam, OscillationStage, Detector + from hexrd.hedm.config.instrument import ( + Instrument, + Beam, + OscillationStage, + Detector, + ) except: pass import pytest pytest.skip( - "This module needs updating--skipping for now", - allow_module_level=True + "This module needs updating--skipping for now", allow_module_level=True ) -reference_data = \ -""" +reference_data = ( + """ beam: {} --- beam: @@ -55,7 +60,9 @@ tilt_angles: [0.00044459111576242654, 0.003958638944891969, -0.47488346109306645] --- instrument: instrument.yaml -""" % test_data +""" + % test_data +) class TestInstrument(TestConfig): @@ -67,30 +74,44 @@ def get_reference_data(cls): def test_beam(self): icfg = Instrument(self.cfgs[1]) b = icfg.beam - self.assertTrue(isinstance(b, hexrd.core.instrument.beam.Beam), "Failed to produce a Beam instance") + self.assertTrue( + isinstance(b, hexrd.core.instrument.beam.Beam), + "Failed to produce a Beam instance", + ) def test_oscillation_stage(self): icfg = Instrument(self.cfgs[2]) ostage = icfg.oscillation_stage - self.assertTrue(isinstance(ostage, hexrd.core.instrument.oscillation_stage.OscillationStage), - "Failed to produce an OscillationStage instance") + self.assertTrue( + isinstance( + ostage, + hexrd.core.instrument.oscillation_stage.OscillationStage, + ), + "Failed to produce an OscillationStage instance", + ) def test_detector(self): cfg = self.cfgs[3] icfg = Detector(cfg, 'GE1') det = icfg.detector(Beam(cfg).beam) - self.assertTrue(isinstance(det, hexrd.core.instrument.PlanarDetector), - "Failed to produce an Detector instance") + self.assertTrue( + isinstance(det, hexrd.core.instrument.PlanarDetector), + "Failed to produce an Detector instance", + ) def test_detector_dict(self): icfg = Instrument(self.cfgs[3]) dd = icfg.detector_dict - self.assertTrue(isinstance(dd, dict), - "Failed to produce an Detector Dictionary instance") + self.assertTrue( + isinstance(dd, dict), + "Failed to produce an Detector Dictionary instance", + ) for k in dd: d = dd[k] - self.assertTrue(isinstance(d, hexrd.core.instrument.PlanarDetector), - "Detector dictionary values are not detector instances") + self.assertTrue( + isinstance(d, hexrd.core.instrument.PlanarDetector), + "Detector dictionary values are not detector instances", + ) class TestBeam(TestConfig): @@ -102,7 +123,9 @@ def get_reference_data(cls): def test_beam_energy_dflt(self): bcfg = Beam(self.cfgs[0]) energy = bcfg._energy - self.assertEqual(energy, Beam.beam_energy_DFLT, "Incorrect default beam energy") + self.assertEqual( + energy, Beam.beam_energy_DFLT, "Incorrect default beam energy" + ) def test_beam_energy(self): bcfg = Beam(self.cfgs[1]) @@ -135,25 +158,37 @@ def get_reference_data(cls): def test_chi_dflt(self): oscfg = OscillationStage(self.cfgs[0]) - self.assertEqual(oscfg._chi, OscillationStage.chi_DFLT, "Incorrect default chi for oscillation stage") + self.assertEqual( + oscfg._chi, + OscillationStage.chi_DFLT, + "Incorrect default chi for oscillation stage", + ) def test_chi(self): oscfg = OscillationStage(self.cfgs[2]) - self.assertEqual(oscfg._chi, 0.05, "Incorrect default chi for oscillation stage") + self.assertEqual( + oscfg._chi, 0.05, "Incorrect default chi for oscillation stage" + ) def test_tvec_dflt(self): oscfg = OscillationStage(self.cfgs[0]) tvec_dflt = OscillationStage.tvec_DFLT tvec = oscfg._tvec - self.assertEqual(tvec[0], tvec_dflt[0], "Incorrect default translation vector") - self.assertEqual(tvec[1], tvec_dflt[1], "Incorrect default translation vector") - self.assertEqual(tvec[2], tvec_dflt[2], "Incorrect default translation vector") + self.assertEqual( + tvec[0], tvec_dflt[0], "Incorrect default translation vector" + ) + self.assertEqual( + tvec[1], tvec_dflt[1], "Incorrect default translation vector" + ) + self.assertEqual( + tvec[2], tvec_dflt[2], "Incorrect default translation vector" + ) def test_tvec(self): oscfg = OscillationStage(self.cfgs[2]) tvec = oscfg._tvec - self.assertEqual(tvec[0], 1., "Incorrect translation vector") - self.assertEqual(tvec[1], 2., "Incorrect translation vector") - self.assertEqual(tvec[2], 3., "Incorrect translation vector") + self.assertEqual(tvec[0], 1.0, "Incorrect translation vector") + self.assertEqual(tvec[1], 2.0, "Incorrect translation vector") + self.assertEqual(tvec[2], 3.0, "Incorrect translation vector") diff --git a/tests/config/test_material.py b/tests/config/test_material.py index dfdfbc36c..7d0bef5eb 100644 --- a/tests/config/test_material.py +++ b/tests/config/test_material.py @@ -3,8 +3,7 @@ from hexrd.hedm.config.utils import get_exclusion_parameters -reference_data = \ -""" +reference_data = """ material: definitions: %(existing_file)s # active: # not set to test error @@ -48,28 +47,22 @@ class TestMaterialConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data % test_data - def test_definitions(self): self.assertEqual( - self.cfgs[0].material.definitions, - test_data['existing_file'] - ) + self.cfgs[0].material.definitions, test_data['existing_file'] + ) self.assertRaises( - IOError, - getattr, self.cfgs[1].material, 'definitions' - ) - + IOError, getattr, self.cfgs[1].material, 'definitions' + ) def test_active(self): self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].material, 'active' - ) + RuntimeError, getattr, self.cfgs[0].material, 'active' + ) self.assertEqual(self.cfgs[1].material.active, 'ruby') self.assertEqual(self.cfgs[2].material.active, 'CeO2') diff --git a/tests/config/test_root.py b/tests/config/test_root.py index b68ad2096..3e73887dc 100644 --- a/tests/config/test_root.py +++ b/tests/config/test_root.py @@ -7,8 +7,8 @@ from hexrd.hedm import config -reference_data = \ -""" +reference_data = ( + """ analysis_name: analysis #working_dir: # not set to test defaulting to cwd --- @@ -29,7 +29,9 @@ multiprocessing: -1000 --- multiprocessing: foo -""" % test_data +""" + % test_data +) class TestRootConfig(TestConfig): @@ -41,8 +43,8 @@ def get_reference_data(cls): def test_analysis_dir(self): self.assertEqual( str(self.cfgs[0].analysis_dir), - os.path.join(os.getcwd(), 'analysis') - ) + os.path.join(os.getcwd(), 'analysis'), + ) def test_analysis_name(self): self.assertEqual(self.cfgs[0].analysis_name, 'analysis') @@ -66,9 +68,12 @@ def test_working_dir(self): str(self.cfgs[7].working_dir), test_data['existing_path'] ) self.assertRaises( - IOError, setattr, self.cfgs[7], 'working_dir', - test_data['nonexistent_path'] - ) + IOError, + setattr, + self.cfgs[7], + 'working_dir', + test_data['nonexistent_path'], + ) @skipIf(mp.cpu_count() < 2, 'test requires at least two cores') def test_multiprocessing(self): @@ -76,11 +81,11 @@ def test_multiprocessing(self): self.assertEqual(self.cfgs[0].multiprocessing, ncpus - 1) self.assertEqual(self.cfgs[1].multiprocessing, ncpus - 1) self.assertEqual(self.cfgs[2].multiprocessing, ncpus) - self.assertEqual(self.cfgs[3].multiprocessing, ncpus//2) + self.assertEqual(self.cfgs[3].multiprocessing, ncpus // 2) self.assertEqual(self.cfgs[4].multiprocessing, 2) self.assertEqual(self.cfgs[5].multiprocessing, ncpus) self.assertEqual(self.cfgs[6].multiprocessing, 1) - self.assertEqual(self.cfgs[7].multiprocessing, ncpus-1) + self.assertEqual(self.cfgs[7].multiprocessing, ncpus - 1) self.cfgs[7].multiprocessing = 1 self.assertEqual(self.cfgs[7].multiprocessing, 1) self.cfgs[7].multiprocessing = 'all' @@ -89,11 +94,10 @@ def test_multiprocessing(self): self.assertEqual(self.cfgs[7].multiprocessing, 2) self.assertRaises( RuntimeError, setattr, self.cfgs[7], 'multiprocessing', 'foo' - ) + ) self.assertRaises( RuntimeError, setattr, self.cfgs[7], 'multiprocessing', -2 - ) - + ) class TestSingleConfig(TestConfig): diff --git a/tests/find_orientations_testing.py b/tests/find_orientations_testing.py index 7d7fa43b4..6e3042c7a 100755 --- a/tests/find_orientations_testing.py +++ b/tests/find_orientations_testing.py @@ -67,12 +67,21 @@ def compare_quaternion_lists(new_quats, ref_quats, tol=0.05): + "is greater than threshold" ) + # ============================================================================= # ETA-OMEGA MAPS # ============================================================================= -EOMap = namedtuple('EOMap', - ['data', 'eta', 'eta_edges', 'omega', 'omega_edges', - 'hkl_indices', 'plane_data'] +EOMap = namedtuple( + 'EOMap', + [ + 'data', + 'eta', + 'eta_edges', + 'omega', + 'omega_edges', + 'hkl_indices', + 'plane_data', + ], ) _keys = [ @@ -83,7 +92,7 @@ def compare_quaternion_lists(new_quats, ref_quats, tol=0.05): 'omegas', 'omeEdges', 'planeData_args', - 'planeData_hkls' + 'planeData_hkls', ] @@ -97,7 +106,7 @@ def load(npz): e['omegas'], e['omeEdges'], e['iHKLList'], - plane_data(e) + plane_data(e), ) @@ -121,103 +130,110 @@ def plane_data(e): class Comparison: - def __init__(self, e1, e2): - self.e1 = e1 - self.e2 = e2 - self.tol = 1.0e-6 - - def compare(self): - """Compare whether maps are same or not""" - same = self.eta()[0] and self.omega()[0] and self.data()[0] - return same - - def eta(self): - """compare etas""" - eta1 = self.e1.eta - eta2 = self.e2.eta - l1, l2 = len(eta1), len(eta2) - if l1 != l2: - msg = "eta: lengths differ: %d and %d" % (l1, l2) - logging.info(msg) - return False, msg - - nrmdiff = np.linalg.norm(eta1 - eta2) - if nrmdiff < self.tol: - return True, "eta: same" - else: - msg = "eta: norm of difference: %s" % nrmdiff + def __init__(self, e1, e2): + self.e1 = e1 + self.e2 = e2 + self.tol = 1.0e-6 + + def compare(self): + """Compare whether maps are same or not""" + same = self.eta()[0] and self.omega()[0] and self.data()[0] + return same + + def eta(self): + """compare etas""" + eta1 = self.e1.eta + eta2 = self.e2.eta + l1, l2 = len(eta1), len(eta2) + if l1 != l2: + msg = "eta: lengths differ: %d and %d" % (l1, l2) + logging.info(msg) + return False, msg + + nrmdiff = np.linalg.norm(eta1 - eta2) + if nrmdiff < self.tol: + return True, "eta: same" + else: + msg = "eta: norm of difference: %s" % nrmdiff + logging.info(msg) + return False, msg + + def omega(self): + """compare omegas""" + omega1 = self.e1.omega + omega2 = self.e2.omega + l1, l2 = len(omega1), len(omega2) + if l1 != l2: + msg = "omega: lengths differ: %d and %d" % (l1, l2) + logging.info(msg) + return False, msg + + nrmdiff = np.linalg.norm(omega1 - omega2) + if nrmdiff < self.tol: + return True, "omega: same" + else: + msg = "omega: norm of difference: %s" % nrmdiff + logging.info(msg) + return False, msg + + def hkl_indices(self): + hkl1, hkl2 = self.e1.hkl_indices, self.e2.hkl_indices + n1, n2 = len(hkl1), len(hkl2) + if n1 != n2: + return False, "hkl: lengths differ: %d and %d" % (n1, n2) + for i in range(n1): + if hkl1[i] != hkl2[i]: + return False, "hkl: indices not the same" + + return True, "hkl: same" + + def data(self): + d1, d2 = self.e1.data, self.e2.data + if d1.shape != d2.shape: + msg = "data shapes do not match: " % (d1.shape, d2.shape) + logging.info(msg) + return False, msg + + for ind in range(d1.shape[0]): + d1i, d2i = d1[ind], d2[ind] + nnan1 = np.count_nonzero(np.isnan(d1i)) + nnan2 = np.count_nonzero(np.isnan(d2i)) + # print("number nans: ", nnan1, nnan2) + if nnan1 > 0: + d1i = np.nan_to_num(d1i) + if nnan2 > 0: + d2i = np.nan_to_num(d1i) + + nnz1 = np.count_nonzero(d1i) + nnz2 = np.count_nonzero(d2i) + if nnz1 != nnz2: + msg = "data: map %d: number nonzero differ: %d, %d" % ( + ind, + nnz1, + nnz2, + ) logging.info(msg) return False, msg - def omega(self): - """compare omegas""" - omega1 = self.e1.omega - omega2 = self.e2.omega - l1, l2 = len(omega1), len(omega2) - if l1 != l2: - msg = "omega: lengths differ: %d and %d" % (l1, l2) + overlapping = d1i.astype(bool) | d2i.astype(bool) + nnz = np.count_nonzero(overlapping) + if nnz != nnz1: + msg = "data: map %d: overlaps differ: %d, %d" % ( + ind, + nnz1, + nnz, + ) logging.info(msg) return False, msg - nrmdiff = np.linalg.norm(omega1 - omega2) - if nrmdiff < self.tol: - return True, "omega: same" + d1over = d1i[overlapping] + d2over = d2i[overlapping] + diff = np.linalg.norm(d1over - d2over) + if diff < self.tol: + return True, "data: same" else: - msg = "omega: norm of difference: %s" % nrmdiff - logging.info(msg) - return False, msg - - def hkl_indices(self): - hkl1, hkl2 = self.e1.hkl_indices, self.e2.hkl_indices - n1, n2 = len(hkl1), len(hkl2) - if n1 != n2: - return False, "hkl: lengths differ: %d and %d" % (n1, n2) - for i in range(n1): - if hkl1[i] != hkl2[i]: - return False, "hkl: indices not the same" - - return True, "hkl: same" - - def data(self): - d1, d2 = self.e1.data, self.e2.data - if d1.shape != d2.shape: - msg = "data shapes do not match: " % (d1.shape, d2.shape) + msg = "data: map %s: map values differ" % (ind) logging.info(msg) return False, msg - for ind in range(d1.shape[0]): - d1i, d2i = d1[ind], d2[ind] - nnan1 = np.count_nonzero(np.isnan(d1i)) - nnan2 = np.count_nonzero(np.isnan(d2i)) - # print("number nans: ", nnan1, nnan2) - if nnan1 > 0: - d1i = np.nan_to_num(d1i) - if nnan2 > 0: - d2i = np.nan_to_num(d1i) - - nnz1 = np.count_nonzero(d1i) - nnz2 = np.count_nonzero(d2i) - if nnz1 != nnz2: - msg = "data: map %d: number nonzero differ: %d, %d" % (ind, nnz1, nnz2) - logging.info(msg) - return False, msg - - overlapping = d1i.astype(bool) | d2i.astype(bool) - nnz = np.count_nonzero(overlapping) - if nnz != nnz1: - msg = "data: map %d: overlaps differ: %d, %d" % (ind, nnz1, nnz) - logging.info(msg) - return False, msg - - d1over = d1i[overlapping] - d2over = d2i[overlapping] - diff = np.linalg.norm(d1over - d2over) - if diff < self.tol: - return True, "data: same" - else: - msg = "data: map %s: map values differ" % (ind) - logging.info(msg) - return False, msg - - - return True, "data: same" + return True, "data: same" diff --git a/tests/fit_grains_check.py b/tests/fit_grains_check.py index e37930706..085a82db7 100755 --- a/tests/fit_grains_check.py +++ b/tests/fit_grains_check.py @@ -15,8 +15,9 @@ from hexrd.core import rotations as rot -def compare_grain_fits(fit_grain_params, ref_grain_params, - mtol=1.e-4, ctol=1.e-3, vtol=1.e-4): +def compare_grain_fits( + fit_grain_params, ref_grain_params, mtol=1.0e-4, ctol=1.0e-3, vtol=1.0e-4 +): """ Executes comparison between reference and fit grain parameters for ff-HEDM for the same initial parameters. @@ -47,11 +48,10 @@ def compare_grain_fits(fit_grain_params, ref_grain_params, ii = 0 for fg, rg in zip(fit_grain_params, ref_grain_params): # test_orientation - quats = rot.quatOfExpMap( - np.vstack([fg[:3], rg[:3]]).T + quats = rot.quatOfExpMap(np.vstack([fg[:3], rg[:3]]).T) + ang, mis = rot.misorientation( + quats[:, 0].reshape(4, 1), quats[:, 1].reshape(4, 1) ) - ang, mis = rot.misorientation(quats[:, 0].reshape(4, 1), - quats[:, 1].reshape(4, 1)) if ang <= mtol: cresult = True else: @@ -64,10 +64,12 @@ def compare_grain_fits(fit_grain_params, ref_grain_params, return False # test strain - vmat_fit = mutil.symmToVecMV(np.linalg.inv(mutil.vecMVToSymm(fg[6:])), - scale=False) - vmat_ref = mutil.symmToVecMV(np.linalg.inv(mutil.vecMVToSymm(rg[6:])), - scale=False) + vmat_fit = mutil.symmToVecMV( + np.linalg.inv(mutil.vecMVToSymm(fg[6:])), scale=False + ) + vmat_ref = mutil.symmToVecMV( + np.linalg.inv(mutil.vecMVToSymm(rg[6:])), scale=False + ) if np.linalg.norm(vmat_fit - vmat_ref, ord=1) > vtol: print("stretch components for grain %d do not agree." % ii) return False @@ -79,26 +81,33 @@ def compare_grain_fits(fit_grain_params, ref_grain_params, if __name__ == '__main__': parser = argparse.ArgumentParser( - description="Montage of spot data for a specifed G-vector family") + description="Montage of spot data for a specifed G-vector family" + ) - parser.add_argument('cfg_file', - help="yaml HEDM config filename", - type=str) - parser.add_argument('gt_ref', - help="reference grain table filename", - type=str) + parser.add_argument('cfg_file', help="yaml HEDM config filename", type=str) + parser.add_argument( + 'gt_ref', help="reference grain table filename", type=str + ) - parser.add_argument('-m', '--misorientation', - help="misorientation threshold", - type=float, default=1.e-4) + parser.add_argument( + '-m', + '--misorientation', + help="misorientation threshold", + type=float, + default=1.0e-4, + ) - parser.add_argument('-c', '--centroid', - help="centroid threshold", - type=float, default=1.e-3) + parser.add_argument( + '-c', + '--centroid', + help="centroid threshold", + type=float, + default=1.0e-3, + ) - parser.add_argument('-v', '--stretch', - help="stretch threshold", - type=float, default=1.e-4) + parser.add_argument( + '-v', '--stretch', help="stretch threshold", type=float, default=1.0e-4 + ) args = parser.parse_args() @@ -112,14 +121,19 @@ def compare_grain_fits(fit_grain_params, ref_grain_params, cfg = config.open(cfg_file)[0] grains_table = np.loadtxt(gt_ref, ndmin=2) ref_grain_params = grains_table[:, 3:15] - gresults = fit_grains(cfg, - grains_table, - show_progress=False, - ids_to_refine=None, - write_spots_files=False) + gresults = fit_grains( + cfg, + grains_table, + show_progress=False, + ids_to_refine=None, + write_spots_files=False, + ) cresult = compare_grain_fits( - np.vstack([i[-1] for i in gresults]), ref_grain_params, - mtol=mtol, ctol=ctol, vtol=vtol + np.vstack([i[-1] for i in gresults]), + ref_grain_params, + mtol=mtol, + ctol=ctol, + vtol=vtol, ) if cresult: print("test passed") diff --git a/tests/imageseries/common.py b/tests/imageseries/common.py index 540f8f614..c5c56d05a 100644 --- a/tests/imageseries/common.py +++ b/tests/imageseries/common.py @@ -13,31 +13,35 @@ class ImageSeriesTest(unittest.TestCase): # random array from randint # a = np.random.randint(20, size=(3, 5, 7)) random_array = np.array( - [[[2, 4, 5, 0, 14, 16, 17], - [18, 17, 5, 19, 2, 8, 17], - [0, 16, 10, 18, 13, 16, 9], - [2, 15, 13, 14, 12, 19, 9], - [0, 3, 4, 11, 8, 8, 3]], - - [[8, 17, 15, 0, 0, 5, 17], - [7, 4, 8, 17, 2, 5, 3], - [14, 1, 12, 4, 6, 19, 2], - [13, 7, 5, 6, 17, 17, 6], - [16, 4, 10, 3, 6, 0, 14]], - - [[17, 3, 8, 3, 15, 6, 18], - [13, 1, 3, 5, 9, 11, 15], - [1, 11, 15, 1, 19, 2, 0], - [5, 0, 12, 11, 12, 10, 11], - [6, 4, 16, 2, 16, 9, 18]]] + [ + [ + [2, 4, 5, 0, 14, 16, 17], + [18, 17, 5, 19, 2, 8, 17], + [0, 16, 10, 18, 13, 16, 9], + [2, 15, 13, 14, 12, 19, 9], + [0, 3, 4, 11, 8, 8, 3], + ], + [ + [8, 17, 15, 0, 0, 5, 17], + [7, 4, 8, 17, 2, 5, 3], + [14, 1, 12, 4, 6, 19, 2], + [13, 7, 5, 6, 17, 17, 6], + [16, 4, 10, 3, 6, 0, 14], + ], + [ + [17, 3, 8, 3, 15, 6, 18], + [13, 1, 3, 5, 9, 11, 15], + [1, 11, 15, 1, 19, 2, 0], + [5, 0, 12, 11, 12, 10, 11], + [6, 4, 16, 2, 16, 9, 18], + ], + ] ) def make_array_ims(): """returns both the array and the array imageseries""" - is_a = imageseries.open( - None, 'array', data=random_array, meta=make_meta() - ) + is_a = imageseries.open(None, 'array', data=random_array, meta=make_meta()) return random_array, is_a @@ -47,7 +51,7 @@ def make_meta(): def make_omega_meta(n): - return np.linspace((0,0), (1, 1), n) + return np.linspace((0, 0), (1, 1), n) def compare(ims1, ims2): @@ -57,8 +61,8 @@ def compare(ims1, ims2): if ims1.dtype != ims2.dtype: raise ValueError( - "types do not match: %s is not %s" % - (repr(ims1.dtype), repr(ims2.dtype)) + "types do not match: %s is not %s" + % (repr(ims1.dtype), repr(ims2.dtype)) ) maxdiff = 0.0 diff --git a/tests/imageseries/test_formats.py b/tests/imageseries/test_formats.py index bece458fc..a0a8cf4cb 100644 --- a/tests/imageseries/test_formats.py +++ b/tests/imageseries/test_formats.py @@ -37,56 +37,59 @@ def test_fmth5(self): is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) diff = compare(self.is_a, is_h) - self.assertAlmostEqual(diff, 0., "h5 reconstruction failed") + self.assertAlmostEqual(diff, 0.0, "h5 reconstruction failed") self.assertTrue(compare_meta(self.is_a, is_h)) def test_fmth5_nparray(self): """HDF5 format with numpy array metadata""" key = 'np-array' - npa = np.array([0,2.0,1.3]) + npa = np.array([0, 2.0, 1.3]) self.is_a.metadata[key] = npa imageseries.write(self.is_a, self.h5file, self.fmt, path=self.h5path) is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) meta = is_h.metadata diff = np.linalg.norm(meta[key] - npa) - self.assertAlmostEqual(diff, 0., "h5 numpy array metadata failed") + self.assertAlmostEqual(diff, 0.0, "h5 numpy array metadata failed") def test_fmth5_nocompress(self): """HDF5 options: no compression""" - imageseries.write(self.is_a, self.h5file, self.fmt, - path=self.h5path, gzip=0) + imageseries.write( + self.is_a, self.h5file, self.fmt, path=self.h5path, gzip=0 + ) is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) diff = compare(self.is_a, is_h) - self.assertAlmostEqual(diff, 0., "h5 reconstruction failed") + self.assertAlmostEqual(diff, 0.0, "h5 reconstruction failed") self.assertTrue(compare_meta(self.is_a, is_h)) def test_fmth5_compress_err(self): """HDF5 options: compression level out of range""" with self.assertRaises(ValueError): - imageseries.write(self.is_a, self.h5file, self.fmt, - path=self.h5path, gzip=10) + imageseries.write( + self.is_a, self.h5file, self.fmt, path=self.h5path, gzip=10 + ) def test_fmth5_chunk(self): """HDF5 options: chunk size""" - imageseries.write(self.is_a, self.h5file, self.fmt, - path=self.h5path, chunk_rows=0) + imageseries.write( + self.is_a, self.h5file, self.fmt, path=self.h5path, chunk_rows=0 + ) is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) diff = compare(self.is_a, is_h) - self.assertAlmostEqual(diff, 0., "h5 reconstruction failed") + self.assertAlmostEqual(diff, 0.0, "h5 reconstruction failed") self.assertTrue(compare_meta(self.is_a, is_h)) class TestFormatFrameCache(ImageSeriesFormatTest): def setUp(self): - self.fcfile = os.path.join(self.tmpdir, 'frame-cache.npz') + self.fcfile = os.path.join(self.tmpdir, 'frame-cache.npz') self.fmt = 'frame-cache' self.thresh = 0.5 self.style = 'npz' - self.cache_file='frame-cache.npz' + self.cache_file = 'frame-cache.npz' _, self.is_a = make_array_ims() def tearDown(self): @@ -94,44 +97,59 @@ def tearDown(self): def test_fmtfc(self): """save/load frame-cache format""" - imageseries.write(self.is_a, self.fcfile, self.fmt, style=self.style, - threshold=self.thresh, cache_file=self.cache_file) + imageseries.write( + self.is_a, + self.fcfile, + self.fmt, + style=self.style, + threshold=self.thresh, + cache_file=self.cache_file, + ) is_fc = imageseries.open(self.fcfile, self.fmt, style=self.style) diff = compare(self.is_a, is_fc) - self.assertAlmostEqual(diff, 0., "frame-cache reconstruction failed") + self.assertAlmostEqual(diff, 0.0, "frame-cache reconstruction failed") self.assertTrue(compare_meta(self.is_a, is_fc)) def test_fmtfc_nocache_file(self): """save/load frame-cache format with no cache_file arg""" imageseries.write( - self.is_a, self.fcfile, self.fmt, - threshold=self.thresh, style=self.style + self.is_a, + self.fcfile, + self.fmt, + threshold=self.thresh, + style=self.style, ) is_fc = imageseries.open(self.fcfile, self.fmt, style=self.style) diff = compare(self.is_a, is_fc) - self.assertAlmostEqual(diff, 0., "frame-cache reconstruction failed") + self.assertAlmostEqual(diff, 0.0, "frame-cache reconstruction failed") self.assertTrue(compare_meta(self.is_a, is_fc)) def test_fmtfc_nparray(self): """frame-cache format with numpy array metadata""" key = 'np-array' - npa = np.array([0,2.0,1.3]) + npa = np.array([0, 2.0, 1.3]) self.is_a.metadata[key] = npa - imageseries.write(self.is_a, self.fcfile, self.fmt, style=self.style, - threshold=self.thresh, cache_file=self.cache_file + imageseries.write( + self.is_a, + self.fcfile, + self.fmt, + style=self.style, + threshold=self.thresh, + cache_file=self.cache_file, ) is_fc = imageseries.open(self.fcfile, self.fmt, style=self.style) meta = is_fc.metadata diff = np.linalg.norm(meta[key] - npa) - self.assertAlmostEqual(diff, 0., - "frame-cache numpy array metadata failed") + self.assertAlmostEqual( + diff, 0.0, "frame-cache numpy array metadata failed" + ) class TestFormatFrameCache_FCH5(TestFormatFrameCache): def setUp(self): - self.fcfile = os.path.join(self.tmpdir, 'frame-cache.fch5') + self.fcfile = os.path.join(self.tmpdir, 'frame-cache.fch5') self.fmt = 'frame-cache' self.style = 'fch5' self.thresh = 0.5 @@ -143,7 +161,13 @@ def test_fmtfc_nested_metadata(self): metadata = {'int': 1, 'array': np.array([1, 2, 3])} self.is_a.metadata["key"] = metadata - imageseries.write(self.is_a, self.fcfile, self.fmt, style=self.style, - threshold=self.thresh, cache_file=self.cache_file) + imageseries.write( + self.is_a, + self.fcfile, + self.fmt, + style=self.style, + threshold=self.thresh, + cache_file=self.cache_file, + ) is_fc = imageseries.open(self.fcfile, self.fmt, style=self.style) self.assertTrue(compare_meta(self.is_a, is_fc)) diff --git a/tests/imageseries/test_omega.py b/tests/imageseries/test_omega.py index 51575196c..e8d74d0a1 100644 --- a/tests/imageseries/test_omega.py +++ b/tests/imageseries/test_omega.py @@ -5,6 +5,7 @@ from hexrd.core import imageseries from hexrd.core.imageseries.omega import OmegaSeriesError, OmegaImageSeries + class TestOmegaSeries(ImageSeriesTest): @staticmethod @@ -26,7 +27,7 @@ def test_nframes_mismatch(self): def test_negative_delta(self): om = np.zeros((3, 2)) - om[0,1] = -0.5 + om[0, 1] = -0.5 m = dict(omega=om, dtype=float) ims = self.make_ims(3, m) with self.assertRaises(OmegaSeriesError): @@ -34,10 +35,10 @@ def test_negative_delta(self): def test_one_wedge(self): nf = 5 - a = np.linspace(0, nf+1, nf+1) + a = np.linspace(0, nf + 1, nf + 1) om = np.zeros((nf, 2)) - om[:,0] = a[:-1] - om[:,1] = a[1:] + om[:, 0] = a[:-1] + om[:, 1] = a[1:] m = dict(omega=om, dtype=float) ims = self.make_ims(nf, m) oms = OmegaImageSeries(ims) @@ -45,10 +46,10 @@ def test_one_wedge(self): def test_two_wedges(self): nf = 5 - a = np.linspace(0, nf+1, nf+1) + a = np.linspace(0, nf + 1, nf + 1) om = np.zeros((nf, 2)) - om[:,0] = a[:-1] - om[:,1] = a[1:] + om[:, 0] = a[:-1] + om[:, 1] = a[1:] om[3:, :] += 0.1 m = dict(omega=om, dtype=float) ims = self.make_ims(nf, m) @@ -57,10 +58,10 @@ def test_two_wedges(self): def test_compare_omegas(self): nf = 5 - a = np.linspace(0, nf+1, nf+1) + a = np.linspace(0, nf + 1, nf + 1) om = np.zeros((nf, 2)) - om[:,0] = a[:-1] - om[:,1] = a[1:] + om[:, 0] = a[:-1] + om[:, 1] = a[1:] om[3:, :] += 0.1 m = dict(omega=om, dtype=float) ims = self.make_ims(nf, m) @@ -68,21 +69,21 @@ def test_compare_omegas(self): domega = om - oms.omegawedges.omegas dnorm = np.linalg.norm(domega) - msg='omegas from wedges do not match originals' - self.assertAlmostEqual(dnorm, 0., msg=msg) + msg = 'omegas from wedges do not match originals' + self.assertAlmostEqual(dnorm, 0.0, msg=msg) def test_wedge_delta(self): nf = 5 - a = np.linspace(0, nf+1, nf+1) + a = np.linspace(0, nf + 1, nf + 1) om = np.zeros((nf, 2)) - om[:,0] = a[:-1] - om[:,1] = a[1:] + om[:, 0] = a[:-1] + om[:, 1] = a[1:] om[3:, :] += 0.1 m = dict(omega=om, dtype=float) ims = self.make_ims(nf, m) oms = OmegaImageSeries(ims) - mydelta =om[nf - 1, 1] - om[nf - 1, 0] + mydelta = om[nf - 1, 1] - om[nf - 1, 0] d = oms.wedge(oms.nwedges - 1) self.assertAlmostEqual(d['delta'], mydelta) diff --git a/tests/imageseries/test_process.py b/tests/imageseries/test_process.py index 322117fce..81b25e1eb 100644 --- a/tests/imageseries/test_process.py +++ b/tests/imageseries/test_process.py @@ -5,6 +5,7 @@ from hexrd.core import imageseries from hexrd.core.imageseries import process, ImageSeries + class TestImageSeriesProcess(ImageSeriesTest): def _runfliptest(self, a, flip, aflip): @@ -14,7 +15,7 @@ def _runfliptest(self, a, flip, aflip): is_aflip = imageseries.open(None, 'array', data=aflip) diff = compare(is_aflip, is_p) msg = "flipped [%s] image series failed" % flip - self.assertAlmostEqual(diff, 0., msg=msg) + self.assertAlmostEqual(diff, 0.0, msg=msg) def test_process(self): """Processed image series""" @@ -22,7 +23,7 @@ def test_process(self): is_p = process.ProcessedImageSeries(is_a, []) diff = compare(is_a, is_p) msg = "processed image series failed to reproduce original" - self.assertAlmostEqual(diff, 0., msg) + self.assertAlmostEqual(diff, 0.0, msg) def test_process_flip_t(self): """Processed image series: flip transpose""" @@ -60,7 +61,7 @@ def test_process_flip_r90(self): self._runfliptest(a, flip, aflip) def test_process_flip_r270(self): - """Processed image series: flip clockwise 90 """ + """Processed image series: flip clockwise 90""" flip = 'cw90' a, _ = make_array_ims() aflip = np.transpose(a, (0, 2, 1))[:, :, ::-1] @@ -71,12 +72,12 @@ def test_process_dark(self): a, _ = make_array_ims() dark = np.ones_like(a[0]) is_a = imageseries.open(None, 'array', data=a) - apos = np.where(a >= 1, a-1, 0) + apos = np.where(a >= 1, a - 1, 0) is_a1 = imageseries.open(None, 'array', data=apos) ops = [('dark', dark)] is_p = process.ProcessedImageSeries(is_a, ops) diff = compare(is_a1, is_p) - self.assertAlmostEqual(diff, 0., msg="dark image failed") + self.assertAlmostEqual(diff, 0.0, msg="dark image failed") def test_process_framelist(self): a, _ = make_array_ims() @@ -87,10 +88,9 @@ def test_process_framelist(self): is_p = process.ProcessedImageSeries(is_a, ops, frame_list=frames) is_a2 = imageseries.open(None, 'array', data=a[tuple(frames), ...]) diff = compare(is_a2, is_p) - self.assertAlmostEqual(diff, 0., msg="frame list failed") + self.assertAlmostEqual(diff, 0.0, msg="frame list failed") self.assertEqual(len(is_p), len(is_p.metadata["omega"])) - def test_process_shape(self): a, _ = make_array_ims() is_a = imageseries.open(None, 'array', data=a) diff --git a/tests/imageseries/test_stats.py b/tests/imageseries/test_stats.py index 76e3c0c8a..0e2b5d679 100644 --- a/tests/imageseries/test_stats.py +++ b/tests/imageseries/test_stats.py @@ -18,7 +18,7 @@ def test_stats_average(self): is_avg = stats.average(is_a) np_avg = np.average(a, axis=0).astype(np.float32) err = np.linalg.norm(np_avg - is_avg) - self.assertAlmostEqual(err, 0., msg="stats.average failed") + self.assertAlmostEqual(err, 0.0, msg="stats.average failed") self.assertEqual(is_avg.dtype, np.float32) def test_stats_median(self): @@ -27,7 +27,7 @@ def test_stats_median(self): ismed = stats.median(is_a) amed = np.median(a, axis=0) err = np.linalg.norm(amed - ismed) - self.assertAlmostEqual(err, 0., msg="stats.median failed") + self.assertAlmostEqual(err, 0.0, msg="stats.median failed") self.assertEqual(ismed.dtype, np.float32) def test_stats_max(self): @@ -36,27 +36,25 @@ def test_stats_max(self): ismax = stats.max(is_a) amax = np.max(a, axis=0) err = np.linalg.norm(amax - ismax) - self.assertAlmostEqual(err, 0., msg="stats.max failed") + self.assertAlmostEqual(err, 0.0, msg="stats.max failed") self.assertEqual(ismax.dtype, is_a.dtype) - def test_stats_min(self): """imageseries.stats: min""" a, is_a = make_array_ims() ismin = stats.min(is_a) amin = np.min(a, axis=0) err = np.linalg.norm(amin - ismin) - self.assertAlmostEqual(err, 0., msg="stats.min failed") + self.assertAlmostEqual(err, 0.0, msg="stats.min failed") self.assertEqual(ismin.dtype, is_a.dtype) - def test_stats_percentile(self): """imageseries.stats: percentile""" a, is_a = make_array_ims() isp90 = stats.percentile(is_a, 90) ap90 = np.percentile(a, 90, axis=0).astype(np.float32) err = np.linalg.norm(ap90 - isp90) - self.assertAlmostEqual(err, 0., msg="stats.percentile failed") + self.assertAlmostEqual(err, 0.0, msg="stats.percentile failed") self.assertEqual(isp90.dtype, np.float32) # These tests compare chunked operations (iterators) to non-chunked ops @@ -71,13 +69,13 @@ def test_stats_average_chunked(self): for ismed1 in stats.average_iter(is_a, 1): pass err = np.linalg.norm(a_avg - ismed1) - self.assertAlmostEqual(err, 0., msg="stats.average failed (1 chunk)") + self.assertAlmostEqual(err, 0.0, msg="stats.average failed (1 chunk)") # Run with 2 chunks for ismed2 in stats.average_iter(is_a, 2): pass err = np.linalg.norm(a_avg - ismed2) - self.assertAlmostEqual(err, 0., msg="stats.average failed") + self.assertAlmostEqual(err, 0.0, msg="stats.average failed") def test_stats_median_chunked(self): """imageseries.stats: chunked median""" @@ -89,20 +87,20 @@ def test_stats_median_chunked(self): for ismed1 in stats.median_iter(is_a, 1): pass err = np.linalg.norm(a_med - ismed1) - self.assertAlmostEqual(err, 0., msg="stats.average failed (1 chunk)") + self.assertAlmostEqual(err, 0.0, msg="stats.average failed (1 chunk)") # Run with 2 chunks for ismed2 in stats.median_iter(is_a, 2): pass err = np.linalg.norm(a_med - ismed2) - self.assertAlmostEqual(err, 0., msg="stats.average failed (2 chunks)") + self.assertAlmostEqual(err, 0.0, msg="stats.average failed (2 chunks)") # Run with 3 chunks, with buffer for ismed3 in stats.median_iter(is_a, 3, use_buffer=True): pass err = np.linalg.norm(a_med - ismed3) self.assertAlmostEqual( - err, 0., msg="stats.average failed (3 chunks, buffer)" + err, 0.0, msg="stats.average failed (3 chunks, buffer)" ) # Run with 3 chunks, no buffer @@ -110,5 +108,5 @@ def test_stats_median_chunked(self): pass err = np.linalg.norm(a_med - ismed3) self.assertAlmostEqual( - err, 0., msg="stats.average failed (3 chunks, no buffer)" + err, 0.0, msg="stats.average failed (3 chunks, no buffer)" ) diff --git a/tests/planedata/test_init.py b/tests/planedata/test_init.py index 673f5a651..d102771b9 100644 --- a/tests/planedata/test_init.py +++ b/tests/planedata/test_init.py @@ -16,17 +16,9 @@ def test_init_with_data_and_from_copy(): pd2 = PlaneData(hkls, pd) pd3 = PlaneData(None, pd) pd4 = PlaneData( - None, - pd, - doTThSort=False, - exclusions=[False, False, False] - ) - pd5 = PlaneData( - None, - pd, - tThMax=6.0, - tThWidth=3.0 + None, pd, doTThSort=False, exclusions=[False, False, False] ) + pd5 = PlaneData(None, pd, tThMax=6.0, tThWidth=3.0) assert pd2.hkls.shape == pd3.hkls.shape and np.all(pd2.hkls == pd3.hkls) assert pd2.hkls.shape == pd4.hkls.shape and np.all(pd2.hkls == pd4.hkls) diff --git a/tests/planedata/test_with_data.py b/tests/planedata/test_with_data.py index 82a8d2ccf..bbb3876ba 100644 --- a/tests/planedata/test_with_data.py +++ b/tests/planedata/test_with_data.py @@ -24,8 +24,9 @@ def materials(test_data_dir): for mat_name in material_names: # Load {test_data_dir}/materials/{mat_name}.cif mat = Material( - mat_name, str(test_data_dir) + "/materials/" + mat_name + ".cif", - sgsetting=0 + mat_name, + str(test_data_dir) + "/materials/" + mat_name + ".cif", + sgsetting=0, ) mats[mat_name] = mat.planeData return mats @@ -70,8 +71,9 @@ def test_plane_data_with_data(test_data_dir, materials): assertEqualNumpyArrays(pd.powder_intensity, obj['powder_intensity']) # With the identity symmetry, zero rotation may have some sign issues, # but the rotation matrix should be pretty much the exact same - assertEqualNumpyArrays(rotMatOfQuat(pd.getQSym()), - rotMatOfQuat(obj['q_sym'])) + assertEqualNumpyArrays( + rotMatOfQuat(pd.getQSym()), rotMatOfQuat(obj['q_sym']) + ) assert pd.nHKLs == obj['nHKLs'] assert pd.getNhklRef() == obj['nhklRef'] assertEqualNumpyArrays(pd.getMultiplicity(), obj['multiplicity']) diff --git a/tests/test_absorption_correction.py b/tests/test_absorption_correction.py index c8fa5c840..26a764f8c 100644 --- a/tests/test_absorption_correction.py +++ b/tests/test_absorption_correction.py @@ -66,8 +66,7 @@ def test_absorption_correction(simulated_tardis_dir, test_data_dir): transmissions = instr.calc_transmission() # Normalize so that the max transmission across all detectors is 1 - max_transmission = max( - [np.nanmax(v) for v in transmissions.values()]) + max_transmission = max([np.nanmax(v) for v in transmissions.values()]) transmissions = {k: v / max_transmission for k, v in transmissions.items()} # Now compare to our reference diff --git a/tests/test_find_orientations.py b/tests/test_find_orientations.py index e74ac2c60..5580f9aa5 100644 --- a/tests/test_find_orientations.py +++ b/tests/test_find_orientations.py @@ -8,7 +8,10 @@ import coloredlogs -from hexrd.hedm.findorientations import find_orientations, generate_eta_ome_maps +from hexrd.hedm.findorientations import ( + find_orientations, + generate_eta_ome_maps, +) from hexrd.hedm import config # TODO: Check that this test is still sensible after PlaneData change. @@ -22,7 +25,8 @@ handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.INFO) formatter = coloredlogs.ColoredFormatter( - '%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s') + '%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s' +) handler.setFormatter(formatter) root.addHandler(handler) @@ -71,8 +75,9 @@ def example_repo_config_with_eta_ome_maps(test_config, reference_eta_ome_maps): @pytest.fixture def reference_orientations_path(example_repo_results_path): - filename = \ + filename = ( 'accepted_orientations_results_mruby_composite_hexrd06_py27_ruby.dat' + ) return example_repo_results_path / filename @@ -96,13 +101,13 @@ def to_eomap(eta_ome_maps): eta_ome_maps.omegas, eta_ome_maps.omeEdges, eta_ome_maps.iHKLList, - plane_data(eta_ome_maps.planeData) + plane_data(eta_ome_maps.planeData), ) -def test_generate_eta_ome_maps(example_repo_include_path, - test_config, - reference_eta_ome_maps): +def test_generate_eta_ome_maps( + example_repo_include_path, test_config, reference_eta_ome_maps +): os.chdir(example_repo_include_path) eta_ome_maps = generate_eta_ome_maps(test_config, save=False) eta_ome_maps = to_eomap(eta_ome_maps) @@ -112,19 +117,20 @@ def test_generate_eta_ome_maps(example_repo_include_path, assert comparison.compare() -def test_find_orientations(example_repo_include_path, - example_repo_config_with_eta_ome_maps, - reference_orientations): +def test_find_orientations( + example_repo_include_path, + example_repo_config_with_eta_ome_maps, + reference_orientations, +): os.chdir(example_repo_include_path) - results = find_orientations( - example_repo_config_with_eta_ome_maps - ) + results = find_orientations(example_repo_config_with_eta_ome_maps) orientations = results['qbar'] try: - test_utils.compare_quaternion_lists(orientations.T, - reference_orientations) + test_utils.compare_quaternion_lists( + orientations.T, reference_orientations + ) except RuntimeError as err: pytest.fail(str(err)) diff --git a/tests/test_fit-grains.py b/tests/test_fit-grains.py index f7f2a67a0..7a537f3a6 100644 --- a/tests/test_fit-grains.py +++ b/tests/test_fit-grains.py @@ -21,7 +21,8 @@ handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.INFO) formatter = coloredlogs.ColoredFormatter( - '%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s') + '%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s' +) handler.setFormatter(formatter) root.addHandler(handler) @@ -73,21 +74,30 @@ def test_config(single_ge_config_path, single_ge_include_path): return conf -def test_fit_grains(single_ge_include_path, test_config, grains_file_path, - grains_reference_file_path): +def test_fit_grains( + single_ge_include_path, + test_config, + grains_file_path, + grains_reference_file_path, +): os.chdir(str(single_ge_include_path)) grains_table = np.loadtxt(grains_reference_file_path, ndmin=2) ref_grain_params = grains_table[:, 3:15] - gresults = fit_grains(test_config, - grains_table, - show_progress=False, - ids_to_refine=None, - write_spots_files=False) + gresults = fit_grains( + test_config, + grains_table, + show_progress=False, + ids_to_refine=None, + write_spots_files=False, + ) cresult = compare_grain_fits( - np.vstack([i[-1] for i in gresults]), ref_grain_params, - mtol=1.e-4, ctol=1.e-3, vtol=1.e-4 + np.vstack([i[-1] for i in gresults]), + ref_grain_params, + mtol=1.0e-4, + ctol=1.0e-3, + vtol=1.0e-4, ) assert cresult diff --git a/tests/test_graindata.py b/tests/test_graindata.py index ad582c2e1..f7dc27350 100644 --- a/tests/test_graindata.py +++ b/tests/test_graindata.py @@ -1,4 +1,5 @@ """Testing GrainData class""" + from pathlib import Path import pytest @@ -9,22 +10,24 @@ @pytest.fixture def exp90(): - return (np.pi/2) * np.identity(3) + return (np.pi / 2) * np.identity(3) @pytest.fixture def quats90(): - c45, s45 = np.cos(np.pi / 4), np.sin(np.pi /4) + c45, s45 = np.cos(np.pi / 4), np.sin(np.pi / 4) return [[c45, s45, 0, 0], [c45, 0, s45, 0], [c45, 0, 0, s45]] @pytest.fixture def rmats90(): - return np.array([ - [[1, 0, 0], [0, 0,- 1], [0, 1, 0]], - [[0, 0, 1], [0, 1, 0], [-1, 0, 0]], - [[0, -1, 0], [1, 0, 0], [0, 0, 1]] - ]) + return np.array( + [ + [[1, 0, 0], [0, 0, -1], [0, 1, 0]], + [[0, 0, 1], [0, 1, 0], [-1, 0, 0]], + [[0, -1, 0], [1, 0, 0], [0, 0, 1]], + ] + ) @pytest.fixture diff --git a/tests/test_material.py b/tests/test_material.py index a84179f18..d772684ab 100644 --- a/tests/test_material.py +++ b/tests/test_material.py @@ -5,7 +5,7 @@ from hexrd.core.material import Material, load_materials_hdf5 # Tolerance for comparing floats -FLOAT_TOL = 1.e-8 +FLOAT_TOL = 1.0e-8 # Use consistent units to simplify testing DEFAULT_LENGTH_UNIT = 'angstrom' @@ -92,14 +92,22 @@ def test_load_materials(test_materials_file): def test_remove_duplicate_atoms(test_material_file_duplicate_atoms): mats = load_materials_hdf5(test_material_file_duplicate_atoms) - apos_xtal1 = np.array([[0., 0., 0., 1.]]) - apos_xtal2 = np.array([[0., 0., 0., 0.5], - [0., 0., 0., 0.5]]) - apos_xtal3 = np.array([[0., 0., 0., 1./3.], - [0., 0., 0., 1./3.], - [0., 0., 0., 1./3.], - [0.5, 0., 0., 1.], - [0.5, 0.5, 0.25, 1.]]) + apos_xtal1 = np.array([[0.0, 0.0, 0.0, 1.0]]) + apos_xtal2 = np.array( + [ + [0.0, 0.0, 0.0, 0.5], + [0.0, 0.0, 0.0, 0.5], + ] + ) + apos_xtal3 = np.array( + [ + [0.0, 0.0, 0.0, 1.0 / 3.0], + [0.0, 0.0, 0.0, 1.0 / 3.0], + [0.0, 0.0, 0.0, 1.0 / 3.0], + [0.5, 0.0, 0.0, 1.0], + [0.5, 0.5, 0.25, 1.0], + ] + ) mats['xtal1'].unitcell.remove_duplicate_atoms() assert np.all(np.isclose(mats['xtal1'].atom_pos, apos_xtal1)) @@ -146,11 +154,11 @@ def test_sfac(self, default_material): pd = default_material.planeData pd.exclude() sfacmax_pd = pd.structFact.max() - sfac = pd.structFact/sfacmax_pd + sfac = pd.structFact / sfacmax_pd assert (sfac.min() < sfacmin) and (sfac.max() > sfacmax) pd.exclude(sfacmin=sfacmin, sfacmax=sfacmax) - sfac = pd.structFact/sfacmax_pd + sfac = pd.structFact / sfacmax_pd assert (sfac.min() >= sfacmin) and (sfac.max() <= sfacmax) def test_pint(self, default_material): @@ -160,9 +168,9 @@ def test_pint(self, default_material): pd = default_material.planeData pd.exclude() pintmax_pd = pd.powder_intensity.max() - pint = np.array(pd.powder_intensity)/pintmax_pd + pint = np.array(pd.powder_intensity) / pintmax_pd assert (pint.min() < pintmin) and (pint.max() > pintmax) pd.exclude(pintmin=pintmin, pintmax=pintmax) - pint = np.array(pd.powder_intensity)/pintmax_pd + pint = np.array(pd.powder_intensity) / pintmax_pd assert (pint.min() >= pintmin) and (pint.max() <= pintmax) diff --git a/tests/test_matrix_utils.py b/tests/test_matrix_utils.py index a7840025a..269c26017 100644 --- a/tests/test_matrix_utils.py +++ b/tests/test_matrix_utils.py @@ -11,21 +11,21 @@ def test_vec_mv_cob_matrix(): T = np.zeros((len(R), 6, 6), dtype='float64') sqr2 = np.sqrt(2) # Hardcoded implementation - T[:, 0, 0] = R[:, 0, 0]**2 - T[:, 0, 1] = R[:, 0, 1]**2 - T[:, 0, 2] = R[:, 0, 2]**2 + T[:, 0, 0] = R[:, 0, 0] ** 2 + T[:, 0, 1] = R[:, 0, 1] ** 2 + T[:, 0, 2] = R[:, 0, 2] ** 2 T[:, 0, 3] = sqr2 * R[:, 0, 1] * R[:, 0, 2] T[:, 0, 4] = sqr2 * R[:, 0, 0] * R[:, 0, 2] T[:, 0, 5] = sqr2 * R[:, 0, 0] * R[:, 0, 1] - T[:, 1, 0] = R[:, 1, 0]**2 - T[:, 1, 1] = R[:, 1, 1]**2 - T[:, 1, 2] = R[:, 1, 2]**2 + T[:, 1, 0] = R[:, 1, 0] ** 2 + T[:, 1, 1] = R[:, 1, 1] ** 2 + T[:, 1, 2] = R[:, 1, 2] ** 2 T[:, 1, 3] = sqr2 * R[:, 1, 1] * R[:, 1, 2] T[:, 1, 4] = sqr2 * R[:, 1, 0] * R[:, 1, 2] T[:, 1, 5] = sqr2 * R[:, 1, 0] * R[:, 1, 1] - T[:, 2, 0] = R[:, 2, 0]**2 - T[:, 2, 1] = R[:, 2, 1]**2 - T[:, 2, 2] = R[:, 2, 2]**2 + T[:, 2, 0] = R[:, 2, 0] ** 2 + T[:, 2, 1] = R[:, 2, 1] ** 2 + T[:, 2, 2] = R[:, 2, 2] ** 2 T[:, 2, 3] = sqr2 * R[:, 2, 1] * R[:, 2, 2] T[:, 2, 4] = sqr2 * R[:, 2, 0] * R[:, 2, 2] T[:, 2, 5] = sqr2 * R[:, 2, 0] * R[:, 2, 1] diff --git a/tests/test_polar_view.py b/tests/test_polar_view.py index 501ed00a1..f61050f8e 100644 --- a/tests/test_polar_view.py +++ b/tests/test_polar_view.py @@ -65,8 +65,7 @@ def test_polar_view( pixel_size = (0.01, 5.0) pv = PolarView(tth_range, instr, eta_min, eta_max, pixel_size) - img = pv.warp_image(img_dict, pad_with_nans=True, - do_interpolation=True) + img = pv.warp_image(img_dict, pad_with_nans=True, do_interpolation=True) # This is a masked array. Just fill it with nans. img = img.filled(np.nan) @@ -76,10 +75,17 @@ def test_polar_view( assert np.allclose(img, ref, equal_nan=True) # Also generate it using the cache - pv = PolarView(tth_range, instr, eta_min, eta_max, pixel_size, - cache_coordinate_map=True) - fast_img = pv.warp_image(img_dict, pad_with_nans=True, - do_interpolation=True) + pv = PolarView( + tth_range, + instr, + eta_min, + eta_max, + pixel_size, + cache_coordinate_map=True, + ) + fast_img = pv.warp_image( + img_dict, pad_with_nans=True, do_interpolation=True + ) # This should also be identical fast_img = fast_img.filled(np.nan) diff --git a/tests/test_powder.py b/tests/test_powder.py index e9e5f1dd3..84ec59c68 100644 --- a/tests/test_powder.py +++ b/tests/test_powder.py @@ -23,9 +23,7 @@ def ceria_examples_path(eiger_examples_path: Path) -> Path: @pytest.fixture def eiger_instrument(ceria_examples_path: Path) -> HEDMInstrument: - instr_path = ( - ceria_examples_path / 'eiger_ceria_calibrated_composite.hexrd' - ) + instr_path = ceria_examples_path / 'eiger_ceria_calibrated_composite.hexrd' with h5py.File(instr_path, 'r') as rf: return HEDMInstrument(rf) @@ -164,8 +162,7 @@ def test_simulate_powder_pattern_image( eta_max = 180 pixel_size = (0.1, 0.1) pv = PolarView(tth_range, instr, eta_min, eta_max, pixel_size) - img = pv.warp_image(img_dict, pad_with_nans=True, - do_interpolation=True) + img = pv.warp_image(img_dict, pad_with_nans=True, do_interpolation=True) lineout = img.mean(axis=0).filled(np.nan) diff --git a/tests/test_rotations.py b/tests/test_rotations.py index 325cca4f0..be7ae4d4e 100644 --- a/tests/test_rotations.py +++ b/tests/test_rotations.py @@ -1,4 +1,5 @@ """Test rotations module""" + import numpy as np import pytest @@ -13,8 +14,17 @@ def test_misorientations(): # their own members. # laue_groups = [ - "ci", "c2h", "d2h", "c4h", "d4h", "c3i", - "d3d", "c6h", "d6h", "th", "oh" + "ci", + "c2h", + "d2h", + "c4h", + "d4h", + "c3i", + "d3d", + "c6h", + "d6h", + "th", + "oh", ] for lg in laue_groups: print("group: ", lg) @@ -22,5 +32,5 @@ def test_misorientations(): q1 = qsym[:, -1:] ang, mis = rotations.misorientation(q1, qsym, (qsym,)) assert np.allclose(ang, 0.0) - assert np.allclose(mis[0, :], 1.) - assert np.allclose(mis[1:, :], 0.) + assert np.allclose(mis[0, :], 1.0) + assert np.allclose(mis[1:, :], 0.0) diff --git a/tests/test_snip.py b/tests/test_snip.py index 44cec86b8..a90688df0 100644 --- a/tests/test_snip.py +++ b/tests/test_snip.py @@ -55,8 +55,7 @@ def test_snip1d( pixel_size = (0.1, 1.0) pv = PolarView(tth_range, instr, eta_min, eta_max, pixel_size) - img = pv.warp_image(img_dict, pad_with_nans=True, - do_interpolation=True) + img = pv.warp_image(img_dict, pad_with_nans=True, do_interpolation=True) snip_width = 100 numiter = 2 diff --git a/tests/test_utils_json.py b/tests/test_utils_json.py index 068906a7a..73061af74 100644 --- a/tests/test_utils_json.py +++ b/tests/test_utils_json.py @@ -2,7 +2,11 @@ import numpy as np -from hexrd.core.utils.json import NumpyDecoder, NumpyEncoder, NumpyToNativeEncoder +from hexrd.core.utils.json import ( + NumpyDecoder, + NumpyEncoder, + NumpyToNativeEncoder, +) def test_decode_encode(): @@ -46,16 +50,16 @@ def test_numpy_to_native(): output = json.loads(encoded) assert ( - isinstance(output['inside'], list) and - output['inside'] == to_test['inside'].tolist() + isinstance(output['inside'], list) + and output['inside'] == to_test['inside'].tolist() ) assert ( - isinstance(output['float'], float) and - output['float'] == to_test['float'].item() + isinstance(output['float'], float) + and output['float'] == to_test['float'].item() ) assert ( - isinstance(output['nested']['float'], list) and - output['nested']['float'] == to_test['nested']['float'].tolist() + isinstance(output['nested']['float'], list) + and output['nested']['float'] == to_test['nested']['float'].tolist() ) diff --git a/tests/test_utils_yaml.py b/tests/test_utils_yaml.py index 2b9a385a8..c901293da 100644 --- a/tests/test_utils_yaml.py +++ b/tests/test_utils_yaml.py @@ -20,26 +20,24 @@ def test_numpy_to_native(): output = yaml.safe_load(encoded) assert ( - isinstance(output['inside'], list) and - output['inside'] == to_test['inside'].tolist() + isinstance(output['inside'], list) + and output['inside'] == to_test['inside'].tolist() ) assert ( - isinstance(output['nested']['float16'], list) and - output['nested']['float16'] == to_test['nested']['float16'].tolist() + isinstance(output['nested']['float16'], list) + and output['nested']['float16'] + == to_test['nested']['float16'].tolist() ) assert ( - isinstance(output['float32'], float) and - output['float32'] == to_test['float32'].item() + isinstance(output['float32'], float) + and output['float32'] == to_test['float32'].item() ) assert ( - isinstance(output['float64'], float) and - output['float64'] == to_test['float64'].item() + isinstance(output['float64'], float) + and output['float64'] == to_test['float64'].item() ) assert ( - isinstance(output['int64'], int) and - output['int64'] == to_test['int64'].item() - ) - assert ( - isinstance(output['str'], str) and - output['str'] == to_test['str'] + isinstance(output['int64'], int) + and output['int64'] == to_test['int64'].item() ) + assert isinstance(output['str'], str) and output['str'] == to_test['str'] diff --git a/tests/test_wppf.py b/tests/test_wppf.py index 644383820..29ab61a39 100644 --- a/tests/test_wppf.py +++ b/tests/test_wppf.py @@ -63,8 +63,9 @@ def _fix_all_params(parameters: Parameters): param.vary = False -def test_wppf_rietveld(expt_spectrum, spline_picks, ceo2_material, - rietveld_params): +def test_wppf_rietveld( + expt_spectrum, spline_picks, ceo2_material, rietveld_params +): beam_wavelength = 0.15358835358711712 params = rietveld_params diff --git a/tests/transforms/common.py b/tests/transforms/common.py index 5f10d79af..3afa7b7a0 100644 --- a/tests/transforms/common.py +++ b/tests/transforms/common.py @@ -30,15 +30,15 @@ def convert_axis_angle_to_rmat(axis, angle): s = math.sin(angle) t = 1.0 - c - m[0, 0] = c + axis[0]*axis[0]*t - m[0, 1] = axis[0]*axis[1]*t - axis[2]*s - m[0, 2] = axis[0]*axis[2]*t + axis[1]*s - m[1, 0] = axis[0]*axis[1]*t + axis[2]*s - m[1, 1] = c + axis[1]*axis[1]*t - m[1, 2] = axis[1]*axis[2]*t - axis[0]*s - m[2, 0] = axis[0]*axis[2]*t - axis[1]*s - m[2, 1] = axis[1]*axis[2]*t + axis[0]*s - m[2, 2] = c + axis[2]*axis[2]*t + m[0, 0] = c + axis[0] * axis[0] * t + m[0, 1] = axis[0] * axis[1] * t - axis[2] * s + m[0, 2] = axis[0] * axis[2] * t + axis[1] * s + m[1, 0] = axis[0] * axis[1] * t + axis[2] * s + m[1, 1] = c + axis[1] * axis[1] * t + m[1, 2] = axis[1] * axis[2] * t - axis[0] * s + m[2, 0] = axis[0] * axis[2] * t - axis[1] * s + m[2, 1] = axis[1] * axis[2] * t + axis[0] * s + m[2, 2] = c + axis[2] * axis[2] * t return m @@ -68,9 +68,11 @@ def random_rotation_matrix(): r22 = 2 * (q0 * q0 + q3 * q3) - 1 # 3x3 rotation matrix + # fmt: off rot_matrix = np.array([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]]) + # fmt: on return rot_matrix diff --git a/tests/transforms/test_angles_to_dvec_from_file.py b/tests/transforms/test_angles_to_dvec_from_file.py index 1d9a12338..c4f8a7c0e 100644 --- a/tests/transforms/test_angles_to_dvec_from_file.py +++ b/tests/transforms/test_angles_to_dvec_from_file.py @@ -5,14 +5,14 @@ from __future__ import absolute_import import numpy as np from hexrd.core.transforms.new_capi.xf_new_capi import angles_to_dvec + # from common import random_rotation_matrix, random_unit_vectors def test_angles_to_dvec_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_angles_to_dvec.npy', - allow_pickle=True + test_data_dir / 'test_correct_angles_to_dvec.npy', allow_pickle=True ) for obj in arr: @@ -21,7 +21,7 @@ def test_angles_to_dvec_from_file(test_data_dir): obj["beam_vec"], obj["eta_vec"], obj["chi"], - obj["rmat_c"] + obj["rmat_c"], ) assert np.allclose(result, obj["result"]) diff --git a/tests/transforms/test_angles_to_gvec_from_file.py b/tests/transforms/test_angles_to_gvec_from_file.py index a0c4eee5f..0273d7a40 100644 --- a/tests/transforms/test_angles_to_gvec_from_file.py +++ b/tests/transforms/test_angles_to_gvec_from_file.py @@ -6,14 +6,14 @@ from __future__ import absolute_import import numpy as np from hexrd.core.transforms.new_capi.xf_new_capi import angles_to_gvec + # from common import random_rotation_matrix, random_unit_vectors def test_angles_to_gvec_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_angles_to_gvec.npy', - allow_pickle=True + test_data_dir / 'test_correct_angles_to_gvec.npy', allow_pickle=True ) for obj in arr: @@ -22,7 +22,7 @@ def test_angles_to_gvec_from_file(test_data_dir): obj["beam_vec"], obj["eta_vec"], obj["chi"], - obj["rmat_c"] + obj["rmat_c"], ) assert np.allclose(result, obj["result"]) diff --git a/tests/transforms/test_gvec_to_xy_from_file.py b/tests/transforms/test_gvec_to_xy_from_file.py index 1b402bf19..9f9bd5ebc 100644 --- a/tests/transforms/test_gvec_to_xy_from_file.py +++ b/tests/transforms/test_gvec_to_xy_from_file.py @@ -5,24 +5,26 @@ from __future__ import absolute_import import numpy as np from hexrd.core.transforms.new_capi.xf_new_capi import gvec_to_xy + # from common import random_rotation_matrix, random_unit_vectors def test_gvec_to_xy_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_gvec_to_xy.npy', - allow_pickle=True + test_data_dir / 'test_correct_gvec_to_xy.npy', allow_pickle=True ) for obj in arr: - result = gvec_to_xy(obj["gvec_c"], - obj["rmat_d"], - obj["rmat_s"], - obj["rmat_c"], - obj["tvec_d"], - obj["tvec_s"], - obj["tvec_c"], - obj["beam_vec"]) + result = gvec_to_xy( + obj["gvec_c"], + obj["rmat_d"], + obj["rmat_s"], + obj["rmat_c"], + obj["tvec_d"], + obj["tvec_s"], + obj["tvec_c"], + obj["beam_vec"], + ) assert np.allclose(result, obj["result"], equal_nan=True) diff --git a/tests/transforms/test_make_beam_rmat_from_file.py b/tests/transforms/test_make_beam_rmat_from_file.py index 663c5505b..c3c8ea7bd 100644 --- a/tests/transforms/test_make_beam_rmat_from_file.py +++ b/tests/transforms/test_make_beam_rmat_from_file.py @@ -6,22 +6,19 @@ from __future__ import absolute_import import numpy as np from hexrd.core.transforms.new_capi.xf_new_capi import make_beam_rmat + # from common import random_unit_vectors def test_make_beam_rmat_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_make_beam_rmat.npy', - allow_pickle=True + test_data_dir / 'test_correct_make_beam_rmat.npy', allow_pickle=True ) for obj in arr: - result = make_beam_rmat( - obj["bvec_l"], - obj["evec_l"] - ) + result = make_beam_rmat(obj["bvec_l"], obj["evec_l"]) assert np.allclose(result.T.dot(obj['bvec_l']), [0, 0, -1]) assert np.allclose(result.T.dot(obj['evec_l'])[1], 0) assert np.allclose(result, obj["result"]) diff --git a/tests/transforms/test_make_binary_rmat.py b/tests/transforms/test_make_binary_rmat.py index a13d1860f..ff809cb12 100644 --- a/tests/transforms/test_make_binary_rmat.py +++ b/tests/transforms/test_make_binary_rmat.py @@ -19,8 +19,8 @@ def test_make_binary_rmat(): # Two binary rmats should be the identity assert np.allclose(rmat @ rmat, np.eye(3)) assert np.allclose(rmat.T @ rmat, np.eye(3)), "It is orthogonal" - assert np.all((np.abs(rmat) - 1 < 1e-10) | (np.abs(rmat) < 1e-10)), "It is binary" - rmat_expected = rotMatOfQuat( - quatOfAngleAxis(np.pi, np.c_[axis]) - ) + assert np.all( + (np.abs(rmat) - 1 < 1e-10) | (np.abs(rmat) < 1e-10) + ), "It is binary" + rmat_expected = rotMatOfQuat(quatOfAngleAxis(np.pi, np.c_[axis])) assert np.allclose(rmat, rmat_expected) diff --git a/tests/transforms/test_make_detector_rmat_from_file.py b/tests/transforms/test_make_detector_rmat_from_file.py index cc418e988..7d8343ee1 100644 --- a/tests/transforms/test_make_detector_rmat_from_file.py +++ b/tests/transforms/test_make_detector_rmat_from_file.py @@ -12,17 +12,16 @@ def test_make_detector_rmat_from_file(test_data_dir): # Load the array from a file arr = np.load( test_data_dir / 'test_correct_make_detector_rmat.npy', - allow_pickle=True + allow_pickle=True, ) for obj in arr: - result = make_detector_rmat( - obj["tilt_angles"] - ) + result = make_detector_rmat(obj["tilt_angles"]) assert np.allclose(result, obj["result"]) + # def test_correct_make_detector_rmat(test_data_dir): # arr = []; diff --git a/tests/transforms/test_make_rmat_of_expmap_from_file.py b/tests/transforms/test_make_rmat_of_expmap_from_file.py index 9a2b749d9..678194a68 100644 --- a/tests/transforms/test_make_rmat_of_expmap_from_file.py +++ b/tests/transforms/test_make_rmat_of_expmap_from_file.py @@ -12,17 +12,16 @@ def test_make_rmat_of_expmap_from_file(test_data_dir): # Load the array from a file arr = np.load( test_data_dir / 'test_correct_make_rmat_of_expmap.npy', - allow_pickle=True + allow_pickle=True, ) for obj in arr: - result = make_rmat_of_expmap( - obj["expmap"] - ) + result = make_rmat_of_expmap(obj["expmap"]) assert np.allclose(result, obj["result"]) + # def test_correct_make_sample_rmat(test_data_dir): # arr = []; diff --git a/tests/transforms/test_make_sample_rmat_from_file.py b/tests/transforms/test_make_sample_rmat_from_file.py index abad45188..49657994e 100644 --- a/tests/transforms/test_make_sample_rmat_from_file.py +++ b/tests/transforms/test_make_sample_rmat_from_file.py @@ -10,19 +10,16 @@ def test_make_sample_rmat_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_make_sample_rmat.npy', - allow_pickle=True + test_data_dir / 'test_correct_make_sample_rmat.npy', allow_pickle=True ) for obj in arr: - result = make_sample_rmat( - obj["chi"], - obj["omega"] - ) + result = make_sample_rmat(obj["chi"], obj["omega"]) assert np.allclose(result, obj["result"]) + # def test_correct_make_sample_rmat(test_data_dir): # arr = []; diff --git a/tests/transforms/test_quat_distance_from_file.py b/tests/transforms/test_quat_distance_from_file.py index 808f3dca9..b7d35e655 100644 --- a/tests/transforms/test_quat_distance_from_file.py +++ b/tests/transforms/test_quat_distance_from_file.py @@ -5,6 +5,7 @@ from __future__ import absolute_import import numpy as np from hexrd.core.transforms.new_capi.xf_new_capi import quat_distance + # from common import random_unit_vectors # from hexrd.core.rotations import quatOfLaueGroup @@ -12,17 +13,13 @@ def test_quat_distance_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_quat_distance.npy', - allow_pickle=True + test_data_dir / 'test_correct_quat_distance.npy', allow_pickle=True ) for obj in arr: - result = quat_distance( - obj["q1"], - obj["q2"], - obj["q_sym"] - ) + result = quat_distance(obj["q1"], obj["q2"], obj["q_sym"]) assert np.allclose(result, obj["result"]) + # def test_correct_quat_distance(test_data_dir): # arr = []; diff --git a/tests/transforms/test_xy_to_gvec_from_file.py b/tests/transforms/test_xy_to_gvec_from_file.py index 0e366e6ec..1093d2981 100644 --- a/tests/transforms/test_xy_to_gvec_from_file.py +++ b/tests/transforms/test_xy_to_gvec_from_file.py @@ -5,14 +5,14 @@ from __future__ import absolute_import import numpy as np from hexrd.core.transforms.xfcapi import xy_to_gvec + # from common import random_rotation_matrix, random_unit_vectors def test_xy_to_gvec_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_xy_to_gvec.npy', - allow_pickle=True + test_data_dir / 'test_correct_xy_to_gvec.npy', allow_pickle=True ) for obj in arr: @@ -23,7 +23,7 @@ def test_xy_to_gvec_from_file(test_data_dir): obj["tvec_d"], obj["tvec_s"], obj["tvec_c"], - obj["rmat_b"] + obj["rmat_b"], ) assert np.allclose(result[0], obj["result"][0]) From c59627a6a2f9301c89f1655f43f6a15d48c0b640 Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Wed, 12 Feb 2025 13:02:20 -0500 Subject: [PATCH 09/19] remove circular imports --- hexrd/core/distortion/dexela_2923_quad.py | 1 - hexrd/extensions/.gitignore | 1 - hexrd/hed/xrdutil/phutil.py | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) delete mode 100644 hexrd/extensions/.gitignore diff --git a/hexrd/core/distortion/dexela_2923_quad.py b/hexrd/core/distortion/dexela_2923_quad.py index ab84da33e..2c9ae55e1 100644 --- a/hexrd/core/distortion/dexela_2923_quad.py +++ b/hexrd/core/distortion/dexela_2923_quad.py @@ -1,6 +1,5 @@ import numpy as np import numba -from hexrd.core import constants from .distortionabc import DistortionABC from .registry import _RegisterDistortionClass diff --git a/hexrd/extensions/.gitignore b/hexrd/extensions/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/hexrd/extensions/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/hexrd/hed/xrdutil/phutil.py b/hexrd/hed/xrdutil/phutil.py index d6fc130f4..59aa0294c 100644 --- a/hexrd/hed/xrdutil/phutil.py +++ b/hexrd/hed/xrdutil/phutil.py @@ -13,7 +13,7 @@ from numba import njit from hexrd.core import constants as ct -from hexrd.hed.instrument import Detector +from hexrd.core.instrument import Detector from hexrd.core.transforms import xfcapi from hexrd.core.utils.concurrent import distribute_tasks From 697d55195f68571eb7caf3df1e4c82077a918ea2 Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Mon, 31 Mar 2025 14:55:06 -0400 Subject: [PATCH 10/19] fix minor import issues --- hexrd/core/fitting/calibration/laue.py | 4 ++-- hexrd/hed/instrument/hedm_instrument.py | 2 +- hexrd/hedm/instrument/hedm_instrument.py | 6 +++++- hexrd/laue/instrument/hedm_instrument.py | 6 +++++- hexrd/powder/instrument/hedm_instrument.py | 6 +++++- 5 files changed, 18 insertions(+), 6 deletions(-) diff --git a/hexrd/core/fitting/calibration/laue.py b/hexrd/core/fitting/calibration/laue.py index 68eac602c..68256eb08 100644 --- a/hexrd/core/fitting/calibration/laue.py +++ b/hexrd/core/fitting/calibration/laue.py @@ -17,8 +17,8 @@ from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl # TODO: Resolve extra-workflow-dependency -from hexrd.powder.fitting.calibration.calibrator import Calibrator -from hexrd.powder.fitting.calibration.lmfit_param_handling import ( +from .calibrator import Calibrator +from .lmfit_param_handling import ( create_grain_params, DEFAULT_EULER_CONVENTION, rename_to_avoid_collision, diff --git a/hexrd/hed/instrument/hedm_instrument.py b/hexrd/hed/instrument/hedm_instrument.py index 6880b1508..2845de0dd 100644 --- a/hexrd/hed/instrument/hedm_instrument.py +++ b/hexrd/hed/instrument/hedm_instrument.py @@ -82,7 +82,7 @@ from hexrd.powder.wppf import LeBail from hexrd.core.instrument.cylindrical_detector import CylindricalDetector -from hexrd.core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from hexrd.core.instrument.detector import beam_energy_DFLT, max_workers_DFLT, Detector from hexrd.core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon diff --git a/hexrd/hedm/instrument/hedm_instrument.py b/hexrd/hedm/instrument/hedm_instrument.py index 5c52f282f..1eaacbe91 100644 --- a/hexrd/hedm/instrument/hedm_instrument.py +++ b/hexrd/hedm/instrument/hedm_instrument.py @@ -80,7 +80,11 @@ from hexrd.powder.wppf import LeBail from hexrd.core.instrument.cylindrical_detector import CylindricalDetector -from hexrd.core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from hexrd.core.instrument.detector import ( + beam_energy_DFLT, + max_workers_DFLT, + Detector, +) from hexrd.core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon diff --git a/hexrd/laue/instrument/hedm_instrument.py b/hexrd/laue/instrument/hedm_instrument.py index 44915c515..520586804 100644 --- a/hexrd/laue/instrument/hedm_instrument.py +++ b/hexrd/laue/instrument/hedm_instrument.py @@ -80,7 +80,11 @@ from hexrd.powder.wppf import LeBail from hexrd.core.instrument.cylindrical_detector import CylindricalDetector -from hexrd.core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from hexrd.core.instrument.detector import ( + beam_energy_DFLT, + max_workers_DFLT, + Detector, +) from hexrd.core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon diff --git a/hexrd/powder/instrument/hedm_instrument.py b/hexrd/powder/instrument/hedm_instrument.py index 44915c515..520586804 100644 --- a/hexrd/powder/instrument/hedm_instrument.py +++ b/hexrd/powder/instrument/hedm_instrument.py @@ -80,7 +80,11 @@ from hexrd.powder.wppf import LeBail from hexrd.core.instrument.cylindrical_detector import CylindricalDetector -from hexrd.core.instrument.detector import beam_energy_DFLT, max_workers_DFLT +from hexrd.core.instrument.detector import ( + beam_energy_DFLT, + max_workers_DFLT, + Detector, +) from hexrd.core.instrument.planar_detector import PlanarDetector from skimage.draw import polygon From 2de0acaa3e63d93e0464542df9a9de1436efab6e Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Mon, 31 Mar 2025 17:04:38 -0400 Subject: [PATCH 11/19] Some files need to be recopied to core to update them. Also fixed some more imports --- hexrd/core/fitting/calibration/__init__.py | 14 +- hexrd/core/fitting/calibration/grain.py | 214 +++++++++ hexrd/core/fitting/calibration/instrument.py | 6 +- hexrd/core/fitting/calibration/laue.py | 131 +----- hexrd/core/fitting/calibration/multigrain.py | 443 ------------------ .../core/fitting/calibration/structureless.py | 2 +- hexrd/core/fitting/grains.py | 407 ++++++++++++++++ hexrd/hedm/fitting/calibration/__init__.py | 19 + hexrd/hedm/fitting/calibration/multigrain.py | 443 ------------------ 9 files changed, 669 insertions(+), 1010 deletions(-) create mode 100644 hexrd/core/fitting/calibration/grain.py delete mode 100644 hexrd/core/fitting/calibration/multigrain.py create mode 100644 hexrd/core/fitting/grains.py create mode 100644 hexrd/hedm/fitting/calibration/__init__.py delete mode 100644 hexrd/hedm/fitting/calibration/multigrain.py diff --git a/hexrd/core/fitting/calibration/__init__.py b/hexrd/core/fitting/calibration/__init__.py index 74c112ee5..7ad5cd25f 100644 --- a/hexrd/core/fitting/calibration/__init__.py +++ b/hexrd/core/fitting/calibration/__init__.py @@ -1,17 +1,9 @@ -# TODO: Resolve extra-core dependencies -# from ....powder.fitting.calibration.instrument import InstrumentCalibrator -# from ....laue.fitting.calibration.laue import LaueCalibrator -# from ....hedm.fitting.calibration.multigrain import calibrate_instrument_from_sx, generate_parameter_names -# from ....powder.fitting.calibration.powder import PowderCalibrator -# from ....powder.fitting.calibration.structureless import StructurelessCalibrator - -# These were temporarily copied over from the above imports from .instrument import InstrumentCalibrator +from .laue import LaueCalibrator +from .lmfit_param_handling import fix_detector_y from .powder import PowderCalibrator from .structureless import StructurelessCalibrator -from .multigrain import calibrate_instrument_from_sx, generate_parameter_names -from .laue import LaueCalibrator - +from .grain import GrainCalibrator # For backward-compatibility, since it used to be named this: StructureLessCalibrator = StructurelessCalibrator diff --git a/hexrd/core/fitting/calibration/grain.py b/hexrd/core/fitting/calibration/grain.py new file mode 100644 index 000000000..91e14aa45 --- /dev/null +++ b/hexrd/core/fitting/calibration/grain.py @@ -0,0 +1,214 @@ +import logging + +import numpy as np + +from hexrd.core import matrixutil as mutil +from hexrd.core.rotations import angularDifference +from hexrd.core.transforms import xfcapi + +from .abstract_grain import AbstractGrainCalibrator +from .lmfit_param_handling import ( + DEFAULT_EULER_CONVENTION, +) +from .. import grains as grainutil + +logger = logging.getLogger(__name__) + + +class GrainCalibrator(AbstractGrainCalibrator): + """This is for HEDM grain calibration""" + + type = 'grain' + + def __init__( + self, + instr, + material, + grain_params, + ome_period, + index=0, + default_refinements=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + ): + super().__init__( + instr, + material, + grain_params, + default_refinements, + calibration_picks, + euler_convention, + ) + self.ome_period = ome_period + self.index = index + + @property + def name(self): + return f'{self.material.name}_{self.index}' + + def autopick_points(self): + # We could call `pull_spots()` here to perform auto-picking. + raise NotImplementedError + + def _evaluate(self): + data_dict = self.data_dict + + # grab reflection data from picks input + pick_hkls_dict = {} + pick_xys_dict = {} + for det_key in self.instr.detectors: + # find valid reflections and recast hkls to int + xys = np.asarray(data_dict['pick_xys'][det_key], dtype=float) + hkls = np.asarray(data_dict['hkls'][det_key], dtype=int) + + valid_idx = ~np.isnan(xys[:, 0]) + + # fill local dicts + pick_hkls_dict[det_key] = [np.atleast_2d(hkls[valid_idx, :])] + pick_xys_dict[det_key] = [np.atleast_2d(xys[valid_idx, :])] + + return pick_hkls_dict, pick_xys_dict + + def residual(self): + pick_hkls_dict, pick_xys_dict = self._evaluate() + + return sxcal_obj_func( + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.ome_period, + ) + + def model(self): + pick_hkls_dict, pick_xys_dict = self._evaluate() + + return sxcal_obj_func( + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.ome_period, + sim_only=True, + ) + + +# Objective function for multigrain fitting +def sxcal_obj_func( + grain_params, instr, xyo_det, hkls_idx, bmat, ome_period, sim_only=False +): + ngrains = len(grain_params) + + # assign some useful params + wavelength = instr.beam_wavelength + bvec = instr.beam_vector + chi = instr.chi + tvec_s = instr.tvec + + # right now just stuck on the end and assumed + # to all be the same length... FIX THIS + xy_unwarped = {} + meas_omes = {} + calc_omes = {} + calc_xy = {} + + # loop over panels + npts_tot = 0 + for det_key, panel in instr.detectors.items(): + rmat_d = panel.rmat + tvec_d = panel.tvec + + xy_unwarped[det_key] = [] + meas_omes[det_key] = [] + calc_omes[det_key] = [] + calc_xy[det_key] = [] + + for ig, grain in enumerate(grain_params): + ghkls = hkls_idx[det_key][ig] + xyo = xyo_det[det_key][ig] + + npts_tot += len(xyo) + + xy_unwarped[det_key].append(xyo[:, :2]) + meas_omes[det_key].append(xyo[:, 2]) + if panel.distortion is not None: # do unwarping + xy_unwarped[det_key][ig] = panel.distortion.apply( + xy_unwarped[det_key][ig] + ) + + # transform G-vectors: + # 1) convert inv. stretch tensor from MV notation in to 3x3 + # 2) take reciprocal lattice vectors from CRYSTAL to SAMPLE frame + # 3) apply stretch tensor + # 4) normalize reciprocal lattice vectors in SAMPLE frame + # 5) transform unit reciprocal lattice vetors back to CRYSAL frame + rmat_c = xfcapi.make_rmat_of_expmap(grain[:3]) + tvec_c = grain[3:6] + vinv_s = grain[6:] + gvec_c = np.dot(bmat, ghkls.T) + vmat_s = mutil.vecMVToSymm(vinv_s) + ghat_s = mutil.unitVector(np.dot(vmat_s, np.dot(rmat_c, gvec_c))) + ghat_c = np.dot(rmat_c.T, ghat_s) + + match_omes, calc_omes_tmp = grainutil.matchOmegas( + xyo, + ghkls.T, + chi, + rmat_c, + bmat, + wavelength, + vInv=vinv_s, + beamVec=bvec, + omePeriod=ome_period, + ) + + rmat_s_arr = xfcapi.make_sample_rmat( + chi, np.ascontiguousarray(calc_omes_tmp) + ) + calc_xy_tmp = xfcapi.gvec_to_xy( + ghat_c.T, rmat_d, rmat_s_arr, rmat_c, tvec_d, tvec_s, tvec_c + ) + if np.any(np.isnan(calc_xy_tmp)): + logger.warning( + "infeasible parameters: may want to scale back " + "finite difference step size" + ) + + calc_omes[det_key].append(calc_omes_tmp) + calc_xy[det_key].append(calc_xy_tmp) + + # return values + if sim_only: + retval = {} + for det_key in calc_xy.keys(): + # ??? calc_xy is always 2-d + retval[det_key] = [] + for ig in range(ngrains): + retval[det_key].append( + np.vstack( + [calc_xy[det_key][ig].T, calc_omes[det_key][ig]] + ).T + ) + else: + meas_xy_all = [] + calc_xy_all = [] + meas_omes_all = [] + calc_omes_all = [] + for det_key in xy_unwarped.keys(): + meas_xy_all.append(np.vstack(xy_unwarped[det_key])) + calc_xy_all.append(np.vstack(calc_xy[det_key])) + meas_omes_all.append(np.hstack(meas_omes[det_key])) + calc_omes_all.append(np.hstack(calc_omes[det_key])) + meas_xy_all = np.vstack(meas_xy_all) + calc_xy_all = np.vstack(calc_xy_all) + meas_omes_all = np.hstack(meas_omes_all) + calc_omes_all = np.hstack(calc_omes_all) + + diff_vecs_xy = calc_xy_all - meas_xy_all + diff_ome = angularDifference(calc_omes_all, meas_omes_all) + retval = np.hstack( + [diff_vecs_xy, diff_ome.reshape(npts_tot, 1)] + ).flatten() + return retval diff --git a/hexrd/core/fitting/calibration/instrument.py b/hexrd/core/fitting/calibration/instrument.py index ee3f88297..c1344b989 100644 --- a/hexrd/core/fitting/calibration/instrument.py +++ b/hexrd/core/fitting/calibration/instrument.py @@ -11,7 +11,7 @@ update_instrument_from_params, validate_params_list, ) -from hexrd.core.fitting.calibration.relative_constraints import ( +from .relative_constraints import ( create_relative_constraints, RelativeConstraints, RelativeConstraintsType, @@ -30,7 +30,6 @@ def __init__( self, *args, engineering_constraints=None, - set_refinements_from_instrument_flags=True, euler_convention=DEFAULT_EULER_CONVENTION, relative_constraints_type=RelativeConstraintsType.none, ): @@ -63,9 +62,6 @@ def __init__( self.euler_convention = euler_convention self.params = self.make_lmfit_params() - if set_refinements_from_instrument_flags: - self.instr.set_calibration_flags_to_lmfit_params(self.params) - self.fitter = lmfit.Minimizer( self.minimizer_function, self.params, nan_policy='omit' ) diff --git a/hexrd/core/fitting/calibration/laue.py b/hexrd/core/fitting/calibration/laue.py index 68256eb08..855c43727 100644 --- a/hexrd/core/fitting/calibration/laue.py +++ b/hexrd/core/fitting/calibration/laue.py @@ -8,24 +8,26 @@ from skimage import filters from skimage.feature import blob_log -# TODO: Resolve extra-workflow-dependency from hexrd.hedm import xrdutil from hexrd.core.constants import fwhm_to_sigma from hexrd.core.instrument import switch_xray_source -from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler from hexrd.core.transforms import xfcapi -from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl -# TODO: Resolve extra-workflow-dependency -from .calibrator import Calibrator -from .lmfit_param_handling import ( - create_grain_params, - DEFAULT_EULER_CONVENTION, - rename_to_avoid_collision, -) +from .abstract_grain import AbstractGrainCalibrator +from .lmfit_param_handling import DEFAULT_EULER_CONVENTION -class LaueCalibrator(Calibrator): +class LaueCalibrator(AbstractGrainCalibrator): + """A Laue calibrator "is-a" specific case for a grain calibrator. + + Just like a grain calibrator, a Laue calibrator is calibrating + grain parameters. + + There are some unique properties for Laue, though, such as having a + varying energy range rather than a constant energy value. Also, we + do not utilize any omega periods. + """ + type = 'laue' def __init__( @@ -41,22 +43,23 @@ def __init__( euler_convention=DEFAULT_EULER_CONVENTION, xray_source: Optional[str] = None, ): - self.instr = instr - self.material = material - self.grain_params = grain_params - self.default_refinements = default_refinements + super().__init__( + instr, + material, + grain_params, + default_refinements, + calibration_picks, + euler_convention, + ) self.energy_cutoffs = [min_energy, max_energy] - self.euler_convention = euler_convention self.xray_source = xray_source - self.data_dict = None - if calibration_picks is not None: - self.calibration_picks = calibration_picks - self._tth_distortion = tth_distortion self._update_tth_distortion_panels() - self.param_names = [] + @property + def name(self): + return self.material.name @property def tth_distortion(self): @@ -78,63 +81,6 @@ def _update_tth_distortion_panels(self): for det_key, obj in self._tth_distortion.items(): obj.panel = self.instr.detectors[det_key] - def create_lmfit_params(self, current_params): - params = create_grain_params( - self.material.name, - self.grain_params_euler, - self.default_refinements, - ) - - # Ensure there are no name collisions - params, _ = rename_to_avoid_collision(params, current_params) - self.param_names = [x[0] for x in params] - - return params - - def update_from_lmfit_params(self, params_dict): - grain_params = [] - for i, name in enumerate(self.param_names): - grain_params.append(params_dict[name].value) - - self.grain_params_euler = np.asarray(grain_params) - - @property - def grain_params_euler(self): - # Grain parameters with orientation set using Euler angle convention - if self.euler_convention is None: - return self.grain_params - - grain_params = self.grain_params.copy() - rme = RotMatEuler(np.zeros(3), **self.euler_convention) - rme.rmat = xfcapi.make_rmat_of_expmap(grain_params[:3]) - grain_params[:3] = np.degrees(rme.angles) - return grain_params - - @grain_params_euler.setter - def grain_params_euler(self, v): - # Grain parameters with orientation set using Euler angle convention - grain_params = v.copy() - if self.euler_convention is not None: - rme = RotMatEuler( - np.zeros( - 3, - ), - **self.euler_convention - ) - rme.angles = np.radians(grain_params[:3]) - phi, n = angleAxisOfRotMat(rme.rmat) - grain_params[:3] = phi * n.flatten() - - self.grain_params = grain_params - - @property - def plane_data(self): - return self.material.planeData - - @property - def bmatx(self): - return self.plane_data.latVecOps['B'] - @property def energy_cutoffs(self): return self._energy_cutoffs @@ -147,35 +93,6 @@ def energy_cutoffs(self, x): self.plane_data.wavelength = self.energy_cutoffs[-1] self.plane_data.exclusions = None - @property - def calibration_picks(self): - # Convert this from our internal data dict format - picks = {} - for det_key in self.instr.detectors: - picks[det_key] = {} - - # find valid reflections and recast hkls to int - xys = self.data_dict['pick_xys'][det_key] - hkls = self.data_dict['hkls'][det_key] - - for hkl, xy in zip(hkls, xys): - picks[det_key][hkl_to_str(hkl)] = xy - - return picks - - @calibration_picks.setter - def calibration_picks(self, v): - # Convert this to our internal data dict format - data_dict = { - 'pick_xys': {}, - 'hkls': {}, - } - for det_key, det_picks in v.items(): - data_dict['hkls'][det_key] = [str_to_hkl(x) for x in det_picks] - data_dict['pick_xys'][det_key] = list(det_picks.values()) - - self.data_dict = data_dict - def autopick_points( self, raw_img_dict, diff --git a/hexrd/core/fitting/calibration/multigrain.py b/hexrd/core/fitting/calibration/multigrain.py deleted file mode 100644 index 9e7bf59a2..000000000 --- a/hexrd/core/fitting/calibration/multigrain.py +++ /dev/null @@ -1,443 +0,0 @@ -import logging -import os - -import numpy as np -from scipy.optimize import leastsq, least_squares - -from hexrd.core import constants as cnst -from hexrd.core import matrixutil as mutil -from hexrd.core import rotations -from hexrd.core.transforms import xfcapi - -from .. import grains as grainutil - -logger = logging.getLogger() -logger.setLevel('INFO') - -# grains -# fmt: off -grain_flags_DFLT = np.array( - [1, 1, 1, - 1, 0, 1, - 0, 0, 0, 0, 0, 0], - dtype=bool -) -# fmt: on - -ext_eta_tol = np.radians(5.0) # for HEDM cal, may make this a user param - - -def calibrate_instrument_from_sx( - instr, - grain_params, - bmat, - xyo_det, - hkls_idx, - param_flags=None, - grain_flags=None, - ome_period=None, - xtol=cnst.sqrt_epsf, - ftol=cnst.sqrt_epsf, - factor=10.0, - sim_only=False, - use_robust_lsq=False, -): - """ - arguments xyo_det, hkls_idx are DICTs over panels - - """ - grain_params = np.atleast_2d(grain_params) - ngrains = len(grain_params) - pnames = generate_parameter_names(instr, grain_params) - - # reset parameter flags for instrument as specified - if param_flags is None: - param_flags = instr.calibration_flags - else: - # will throw an AssertionError if wrong length - instr.calibration_flags = param_flags - - # re-map omegas if need be - if ome_period is not None: - for det_key in instr.detectors: - for ig in range(ngrains): - xyo_det[det_key][ig][:, 2] = rotations.mapAngle( - xyo_det[det_key][ig][:, 2], ome_period - ) - - # first grab the instrument parameters - # 7 global - # 6*num_panels for the detectors - # num_panels*ndp in case of distortion - plist_full = instr.calibration_parameters - - # now handle grains - # reset parameter flags for grains as specified - if grain_flags is None: - grain_flags = np.tile(grain_flags_DFLT, ngrains) - - plist_full = np.concatenate([plist_full, np.hstack(grain_params)]) - plf_copy = np.copy(plist_full) - - # concatenate refinement flags - refine_flags = np.hstack([param_flags, grain_flags]) - plist_fit = plist_full[refine_flags] - fit_args = ( - plist_full, - param_flags, - grain_flags, - instr, - xyo_det, - hkls_idx, - bmat, - ome_period, - ) - if sim_only: - return sxcal_obj_func( - plist_fit, - plist_full, - param_flags, - grain_flags, - instr, - xyo_det, - hkls_idx, - bmat, - ome_period, - sim_only=True, - ) - else: - logger.info("Set up to refine:") - for i in np.where(refine_flags)[0]: - logger.info("\t%s = %1.7e" % (pnames[i], plist_full[i])) - - # run optimization - if use_robust_lsq: - result = least_squares( - sxcal_obj_func, - plist_fit, - args=fit_args, - xtol=xtol, - ftol=ftol, - loss='soft_l1', - method='trf', - ) - x = result.x - resd = result.fun - mesg = result.message - ierr = result.status - else: - # do least squares problem - x, cov_x, infodict, mesg, ierr = leastsq( - sxcal_obj_func, - plist_fit, - args=fit_args, - factor=factor, - xtol=xtol, - ftol=ftol, - full_output=1, - ) - resd = infodict['fvec'] - if ierr not in [1, 2, 3, 4]: - raise RuntimeError(f"solution not found: {ierr=}") - else: - logger.info(f"optimization fininshed successfully with {ierr=}") - logger.info(mesg) - - # ??? output message handling? - fit_params = plist_full - fit_params[refine_flags] = x - - # run simulation with optimized results - sim_final = sxcal_obj_func( - x, - plist_full, - param_flags, - grain_flags, - instr, - xyo_det, - hkls_idx, - bmat, - ome_period, - sim_only=True, - ) - - # ??? reset instrument here? - instr.update_from_parameter_list(fit_params) - - # report final - logger.info("Optimization Reults:") - for i in np.where(refine_flags)[0]: - logger.info( - "\t%s = %1.7e --> %1.7e" - % (pnames[i], plf_copy[i], fit_params[i]) - ) - - return fit_params, resd, sim_final - - -def generate_parameter_names(instr, grain_params): - pnames = [ - '{:>24s}'.format('beam energy'), - '{:>24s}'.format('beam azimuth'), - '{:>24s}'.format('beam polar'), - '{:>24s}'.format('chi'), - '{:>24s}'.format('tvec_s[0]'), - '{:>24s}'.format('tvec_s[1]'), - '{:>24s}'.format('tvec_s[2]'), - ] - - for det_key, panel in instr.detectors.items(): - pnames += [ - '{:>24s}'.format('%s tilt[0]' % det_key), - '{:>24s}'.format('%s tilt[1]' % det_key), - '{:>24s}'.format('%s tilt[2]' % det_key), - '{:>24s}'.format('%s tvec[0]' % det_key), - '{:>24s}'.format('%s tvec[1]' % det_key), - '{:>24s}'.format('%s tvec[2]' % det_key), - ] - # now add distortion if there - if panel.distortion is not None: - for j in range(len(panel.distortion.params)): - pnames.append('{:>24s}'.format('%s dparam[%d]' % (det_key, j))) - - grain_params = np.atleast_2d(grain_params) - for ig, grain in enumerate(grain_params): - pnames += [ - '{:>24s}'.format('grain %d xi[0]' % ig), - '{:>24s}'.format('grain %d xi[1]' % ig), - '{:>24s}'.format('grain %d xi[2]' % ig), - '{:>24s}'.format('grain %d tvec_c[0]' % ig), - '{:>24s}'.format('grain %d tvec_c[1]' % ig), - '{:>24s}'.format('grain %d tvec_c[2]' % ig), - '{:>24s}'.format('grain %d vinv_s[0]' % ig), - '{:>24s}'.format('grain %d vinv_s[1]' % ig), - '{:>24s}'.format('grain %d vinv_s[2]' % ig), - '{:>24s}'.format('grain %d vinv_s[3]' % ig), - '{:>24s}'.format('grain %d vinv_s[4]' % ig), - '{:>24s}'.format('grain %d vinv_s[5]' % ig), - ] - - return pnames - - -def sxcal_obj_func( - plist_fit, - plist_full, - param_flags, - grain_flags, - instr, - xyo_det, - hkls_idx, - bmat, - ome_period, - sim_only=False, - return_value_flag=None, -): - """ """ - npi = len(instr.calibration_parameters) - NP_GRN = 12 - - # stack flags and force bool repr - refine_flags = np.array(np.hstack([param_flags, grain_flags]), dtype=bool) - - # fill out full parameter list - # !!! no scaling for now - plist_full[refine_flags] = plist_fit - - # instrument update - instr.update_from_parameter_list(plist_full) - - # assign some useful params - wavelength = instr.beam_wavelength - bvec = instr.beam_vector - chi = instr.chi - tvec_s = instr.tvec - - # right now just stuck on the end and assumed - # to all be the same length... FIX THIS - xy_unwarped = {} - meas_omes = {} - calc_omes = {} - calc_xy = {} - - # grain params - grain_params = plist_full[npi:] - if np.mod(len(grain_params), NP_GRN) != 0: - raise RuntimeError("parameter list length is not consistent") - ngrains = len(grain_params) // NP_GRN - grain_params = grain_params.reshape((ngrains, NP_GRN)) - - # loop over panels - npts_tot = 0 - for det_key, panel in instr.detectors.items(): - rmat_d = panel.rmat - tvec_d = panel.tvec - - xy_unwarped[det_key] = [] - meas_omes[det_key] = [] - calc_omes[det_key] = [] - calc_xy[det_key] = [] - - for ig, grain in enumerate(grain_params): - ghkls = hkls_idx[det_key][ig] - xyo = xyo_det[det_key][ig] - - npts_tot += len(xyo) - - xy_unwarped[det_key].append(xyo[:, :2]) - meas_omes[det_key].append(xyo[:, 2]) - if panel.distortion is not None: # do unwarping - xy_unwarped[det_key][ig] = panel.distortion.apply( - xy_unwarped[det_key][ig] - ) - - # transform G-vectors: - # 1) convert inv. stretch tensor from MV notation in to 3x3 - # 2) take reciprocal lattice vectors from CRYSTAL to SAMPLE frame - # 3) apply stretch tensor - # 4) normalize reciprocal lattice vectors in SAMPLE frame - # 5) transform unit reciprocal lattice vetors back to CRYSAL frame - rmat_c = xfcapi.make_rmat_of_expmap(grain[:3]) - tvec_c = grain[3:6] - vinv_s = grain[6:] - gvec_c = np.dot(bmat, ghkls.T) - vmat_s = mutil.vecMVToSymm(vinv_s) - ghat_s = mutil.unitVector(np.dot(vmat_s, np.dot(rmat_c, gvec_c))) - ghat_c = np.dot(rmat_c.T, ghat_s) - - match_omes, calc_omes_tmp = grainutil.matchOmegas( - xyo, - ghkls.T, - chi, - rmat_c, - bmat, - wavelength, - vInv=vinv_s, - beamVec=bvec, - omePeriod=ome_period, - ) - - rmat_s_arr = xfcapi.make_sample_rmat( - chi, np.ascontiguousarray(calc_omes_tmp) - ) - calc_xy_tmp = xfcapi.gvec_to_xy( - ghat_c.T, rmat_d, rmat_s_arr, rmat_c, tvec_d, tvec_s, tvec_c - ) - if np.any(np.isnan(calc_xy_tmp)): - logger.warning( - "infeasible parameters: may want to scale back " - "finite difference step size" - ) - - calc_omes[det_key].append(calc_omes_tmp) - calc_xy[det_key].append(calc_xy_tmp) - - # return values - if sim_only: - retval = {} - for det_key in calc_xy.keys(): - # ??? calc_xy is always 2-d - retval[det_key] = [] - for ig in range(ngrains): - retval[det_key].append( - np.vstack( - [calc_xy[det_key][ig].T, calc_omes[det_key][ig]] - ).T - ) - else: - meas_xy_all = [] - calc_xy_all = [] - meas_omes_all = [] - calc_omes_all = [] - for det_key in xy_unwarped.keys(): - meas_xy_all.append(np.vstack(xy_unwarped[det_key])) - calc_xy_all.append(np.vstack(calc_xy[det_key])) - meas_omes_all.append(np.hstack(meas_omes[det_key])) - calc_omes_all.append(np.hstack(calc_omes[det_key])) - meas_xy_all = np.vstack(meas_xy_all) - calc_xy_all = np.vstack(calc_xy_all) - meas_omes_all = np.hstack(meas_omes_all) - calc_omes_all = np.hstack(calc_omes_all) - - diff_vecs_xy = calc_xy_all - meas_xy_all - diff_ome = rotations.angularDifference(calc_omes_all, meas_omes_all) - retval = np.hstack( - [diff_vecs_xy, diff_ome.reshape(npts_tot, 1)] - ).flatten() - if return_value_flag == 1: - retval = sum(abs(retval)) - elif return_value_flag == 2: - denom = npts_tot - len(plist_fit) - 1.0 - if denom != 0: - nu_fac = 1.0 / denom - else: - nu_fac = 1.0 - nu_fac = 1 / (npts_tot - len(plist_fit) - 1.0) - retval = nu_fac * sum(retval**2) - return retval - - -def parse_reflection_tables(cfg, instr, grain_ids, refit_idx=None): - """ - make spot dictionaries - """ - hkls = {} - xyo_det = {} - idx_0 = {} - for det_key, panel in instr.detectors.items(): - hkls[det_key] = [] - xyo_det[det_key] = [] - idx_0[det_key] = [] - for ig, grain_id in enumerate(grain_ids): - spots_filename = os.path.join( - cfg.analysis_dir, - os.path.join(det_key, 'spots_%05d.out' % grain_id), - ) - - # load pull_spots output table - gtable = np.loadtxt(spots_filename, ndmin=2) - if len(gtable) == 0: - gtable = np.nan * np.ones((1, 17)) - - # apply conditions for accepting valid data - valid_reflections = gtable[:, 0] >= 0 # is indexed - not_saturated = gtable[:, 6] < panel.saturation_level - # throw away extremem etas - p90 = rotations.angularDifference(gtable[:, 8], cnst.piby2) - m90 = rotations.angularDifference(gtable[:, 8], -cnst.piby2) - accept_etas = np.logical_or(p90 > ext_eta_tol, m90 > ext_eta_tol) - logger.info(f"panel '{det_key}', grain {grain_id}") - logger.info( - f"{sum(valid_reflections)} of {len(gtable)} " - "reflections are indexed" - ) - logger.info( - f"{sum(not_saturated)} of {sum(valid_reflections)}" - " valid reflections be are below" - + f" saturation threshold of {panel.saturation_level}" - ) - logger.info( - f"{sum(accept_etas)} of {len(gtable)}" - " reflections be are greater than " - + f" {np.degrees(ext_eta_tol)} from the rotation axis" - ) - - # valid reflections index - if refit_idx is None: - idx = np.logical_and( - valid_reflections, - np.logical_and(not_saturated, accept_etas), - ) - idx_0[det_key].append(idx) - else: - idx = refit_idx[det_key][ig] - idx_0[det_key].append(idx) - logger.info( - f"input reflection specify {sum(idx)} of " - f"{len(gtable)} total valid reflections" - ) - - hkls[det_key].append(gtable[idx, 2:5]) - meas_omes = gtable[idx, 12].reshape(sum(idx), 1) - xyo_det[det_key].append(np.hstack([gtable[idx, -2:], meas_omes])) - return hkls, xyo_det, idx_0 diff --git a/hexrd/core/fitting/calibration/structureless.py b/hexrd/core/fitting/calibration/structureless.py index 5704b88a0..f417b666e 100644 --- a/hexrd/core/fitting/calibration/structureless.py +++ b/hexrd/core/fitting/calibration/structureless.py @@ -14,7 +14,7 @@ tth_parameter_prefixes, update_instrument_from_params, ) -from hexrd.core.fitting.calibration.relative_constraints import ( +from .relative_constraints import ( create_relative_constraints, RelativeConstraints, RelativeConstraintsType, diff --git a/hexrd/core/fitting/grains.py b/hexrd/core/fitting/grains.py new file mode 100644 index 000000000..034bfe8c0 --- /dev/null +++ b/hexrd/core/fitting/grains.py @@ -0,0 +1,407 @@ +"""Grain fitting functions""" + +import numpy as np + +from scipy import optimize + +from hexrd.core import matrixutil as mutil + +from hexrd.core.transforms import xfcapi +from hexrd.core import constants +from hexrd.core import rotations + +from hexrd.hedm.xrdutil import extract_detector_transformation + +return_value_flag = None + +epsf = np.finfo(float).eps # ~2.2e-16 +sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 + +bVec_ref = constants.beam_vec +eta_ref = constants.eta_vec +vInv_ref = np.r_[1.0, 1.0, 1.0, 0.0, 0.0, 0.0] + + +# for grain parameters +gFlag_ref = np.ones(12, dtype=bool) +gScl_ref = np.ones(12, dtype=bool) + + +def fitGrain( + gFull, + instrument, + reflections_dict, + bMat, + wavelength, + gFlag=gFlag_ref, + gScl=gScl_ref, + omePeriod=None, + factor=0.1, + xtol=sqrt_epsf, + ftol=sqrt_epsf, +): + """ + Perform least-squares optimization of grain parameters. + + Parameters + ---------- + gFull : TYPE + DESCRIPTION. + instrument : TYPE + DESCRIPTION. + reflections_dict : TYPE + DESCRIPTION. + bMat : TYPE + DESCRIPTION. + wavelength : TYPE + DESCRIPTION. + gFlag : TYPE, optional + DESCRIPTION. The default is gFlag_ref. + gScl : TYPE, optional + DESCRIPTION. The default is gScl_ref. + omePeriod : TYPE, optional + DESCRIPTION. The default is None. + factor : TYPE, optional + DESCRIPTION. The default is 0.1. + xtol : TYPE, optional + DESCRIPTION. The default is sqrt_epsf. + ftol : TYPE, optional + DESCRIPTION. The default is sqrt_epsf. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + retval : TYPE + DESCRIPTION. + + """ + # FIXME: will currently fail if omePeriod is specifed + if omePeriod is not None: + # xyo_det[:, 2] = rotations.mapAngle(xyo_det[:, 2], omePeriod) + raise RuntimeError + + gFit = gFull[gFlag] + + fitArgs = ( + gFull, + gFlag, + instrument, + reflections_dict, + bMat, + wavelength, + omePeriod, + ) + results = optimize.leastsq( + objFuncFitGrain, + gFit, + args=fitArgs, + diag=1.0 / gScl[gFlag].flatten(), + factor=0.1, + xtol=xtol, + ftol=ftol, + ) + + gFit_opt = results[0] + + retval = gFull + retval[gFlag] = gFit_opt + return retval + + +def objFuncFitGrain( + gFit, + gFull, + gFlag, + instrument, + reflections_dict, + bMat, + wavelength, + omePeriod, + simOnly=False, + return_value_flag=return_value_flag, +): + """ + Calculate residual between measured and simulated ff-HEDM G-vectors. + + gFull[0] = expMap_c[0] + gFull[1] = expMap_c[1] + gFull[2] = expMap_c[2] + gFull[3] = tVec_c[0] + gFull[4] = tVec_c[1] + gFull[5] = tVec_c[2] + gFull[6] = vInv_MV[0] + gFull[7] = vInv_MV[1] + gFull[8] = vInv_MV[2] + gFull[9] = vInv_MV[3] + gFull[10] = vInv_MV[4] + gFull[11] = vInv_MV[5] + + OLD CALL + objFuncFitGrain(gFit, gFull, gFlag, + detectorParams, + xyo_det, hkls_idx, bMat, wavelength, + bVec, eVec, + dFunc, dParams, + omePeriod, + simOnly=False, return_value_flag=return_value_flag) + + Parameters + ---------- + gFit : TYPE + DESCRIPTION. + gFull : TYPE + DESCRIPTION. + gFlag : TYPE + DESCRIPTION. + instrument : TYPE + DESCRIPTION. + reflections_dict : TYPE + DESCRIPTION. + bMat : TYPE + DESCRIPTION. + wavelength : TYPE + DESCRIPTION. + omePeriod : TYPE + DESCRIPTION. + simOnly : TYPE, optional + DESCRIPTION. The default is False. + return_value_flag : TYPE, optional + DESCRIPTION. The default is return_value_flag. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + retval : TYPE + DESCRIPTION. + + """ + bVec = instrument.beam_vector + eVec = instrument.eta_vector + + # fill out parameters + gFull[gFlag] = gFit + + # map parameters to functional arrays + rMat_c = xfcapi.make_rmat_of_expmap(gFull[:3]) + tVec_c = gFull[3:6].reshape(3, 1) + vInv_s = gFull[6:] + vMat_s = mutil.vecMVToSymm(vInv_s) # NOTE: Inverse of V from F = V * R + + # loop over instrument panels + # CAVEAT: keeping track of key ordering in the "detectors" attribute of + # instrument here because I am not sure if instatiating them using + # dict.fromkeys() preserves the same order if using iteration... + # + calc_omes_dict = dict.fromkeys(instrument.detectors, []) + calc_xy_dict = dict.fromkeys(instrument.detectors) + meas_xyo_all = [] + det_keys_ordered = [] + for det_key, panel in instrument.detectors.items(): + det_keys_ordered.append(det_key) + + rMat_d, tVec_d, chi, tVec_s = extract_detector_transformation( + instrument.detector_parameters[det_key] + ) + + results = reflections_dict[det_key] + if len(results) == 0: + continue + + """ + extract data from results list fields: + refl_id, gvec_id, hkl, sum_int, max_int, pred_ang, meas_ang, meas_xy + + or array from spots tables: + 0:5 ID PID H K L + 5:7 sum(int) max(int) + 7:10 pred tth pred eta pred ome + 10:13 meas tth meas eta meas ome + 13:15 pred X pred Y + 15:17 meas X meas Y + """ + if isinstance(results, list): + # WARNING: hkls and derived vectors below must be columnwise; + # strictly necessary??? change affected APIs instead? + # + hkls = np.atleast_2d(np.vstack([x[2] for x in results])).T + + meas_xyo = np.atleast_2d( + np.vstack([np.r_[x[7], x[6][-1]] for x in results]) + ) + elif isinstance(results, np.ndarray): + hkls = np.atleast_2d(results[:, 2:5]).T + meas_xyo = np.atleast_2d(results[:, [15, 16, 12]]) + + # distortion handling + if panel.distortion is not None: + meas_omes = meas_xyo[:, 2] + xy_unwarped = panel.distortion.apply(meas_xyo[:, :2]) + meas_xyo = np.vstack([xy_unwarped.T, meas_omes]).T + + # append to meas_omes + meas_xyo_all.append(meas_xyo) + + # G-vectors: + # 1. calculate full g-vector components in CRYSTAL frame from B + # 2. rotate into SAMPLE frame and apply stretch + # 3. rotate back into CRYSTAL frame and normalize to unit magnitude + # IDEA: make a function for this sequence of operations with option for + # choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB) + gVec_c = np.dot(bMat, hkls) + gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) + gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s)) + + # !!!: check that this operates on UNWARPED xy + match_omes, calc_omes = matchOmegas( + meas_xyo, + hkls, + chi, + rMat_c, + bMat, + wavelength, + vInv=vInv_s, + beamVec=bVec, + etaVec=eVec, + omePeriod=omePeriod, + ) + + # append to omes dict + calc_omes_dict[det_key] = calc_omes + + # TODO: try Numba implementations + rMat_s = xfcapi.make_sample_rmat(chi, calc_omes) + calc_xy = xfcapi.gvec_to_xy( + gHat_c.T, + rMat_d, + rMat_s, + rMat_c, + tVec_d, + tVec_s, + tVec_c, + beam_vec=bVec, + ) + + # append to xy dict + calc_xy_dict[det_key] = calc_xy + + # stack results to concatenated arrays + calc_omes_all = np.hstack([calc_omes_dict[k] for k in det_keys_ordered]) + tmp = [] + for k in det_keys_ordered: + if calc_xy_dict[k] is not None: + tmp.append(calc_xy_dict[k]) + calc_xy_all = np.vstack(tmp) + meas_xyo_all = np.vstack(meas_xyo_all) + + npts = len(meas_xyo_all) + if np.any(np.isnan(calc_xy)): + raise RuntimeError( + "infeasible pFull: may want to scale" + + "back finite difference step size" + ) + + # return values + if simOnly: + # return simulated values + if return_value_flag in [None, 1]: + retval = np.hstack([calc_xy_all, calc_omes_all.reshape(npts, 1)]) + else: + rd = dict.fromkeys(det_keys_ordered) + for det_key in det_keys_ordered: + rd[det_key] = { + 'calc_xy': calc_xy_dict[det_key], + 'calc_omes': calc_omes_dict[det_key], + } + retval = rd + else: + # return residual vector + # IDEA: try angles instead of xys? + diff_vecs_xy = calc_xy_all - meas_xyo_all[:, :2] + diff_ome = rotations.angularDifference( + calc_omes_all, meas_xyo_all[:, 2] + ) + retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1)]).flatten() + if return_value_flag == 1: + # return scalar sum of squared residuals + retval = sum(abs(retval)) + elif return_value_flag == 2: + # return DOF-normalized chisq + # TODO: check this calculation + denom = 3 * npts - len(gFit) - 1.0 + if denom != 0: + nu_fac = 1.0 / denom + else: + nu_fac = 1.0 + retval = nu_fac * sum(retval**2) + return retval + + +def matchOmegas( + xyo_det, + hkls_idx, + chi, + rMat_c, + bMat, + wavelength, + vInv=vInv_ref, + beamVec=bVec_ref, + etaVec=eta_ref, + omePeriod=None, +): + """ + For a given list of (x, y, ome) points, outputs the index into the results + from oscillAnglesOfHKLs, including the calculated omega values. + """ + # get omegas for rMat_s calculation + if omePeriod is not None: + meas_omes = rotations.mapAngle(xyo_det[:, 2], omePeriod) + else: + meas_omes = xyo_det[:, 2] + + oangs0, oangs1 = xfcapi.oscill_angles_of_hkls( + hkls_idx.T, + chi, + rMat_c, + bMat, + wavelength, + v_inv=vInv, + beam_vec=beamVec, + eta_vec=etaVec, + ) + if np.any(np.isnan(oangs0)): + # debugging + # TODO: remove this + # import pdb + # pdb.set_trace() + nanIdx = np.where(np.isnan(oangs0[:, 0]))[0] + errorString = "Infeasible parameters for hkls:\n" + for i in range(len(nanIdx)): + errorString += "%d %d %d\n" % tuple(hkls_idx[:, nanIdx[i]]) + errorString += "you may need to deselect this hkl family." + raise RuntimeError(errorString) + else: + # CAPI version gives vstacked angles... must be (2, nhkls) + calc_omes = np.vstack([oangs0[:, 2], oangs1[:, 2]]) + if omePeriod is not None: + calc_omes = np.vstack( + [ + rotations.mapAngle(oangs0[:, 2], omePeriod), + rotations.mapAngle(oangs1[:, 2], omePeriod), + ] + ) + # do angular difference + diff_omes = rotations.angularDifference( + np.tile(meas_omes, (2, 1)), calc_omes + ) + match_omes = np.argsort(diff_omes, axis=0) == 0 + calc_omes = calc_omes.T.flatten()[match_omes.T.flatten()] + + return match_omes, calc_omes diff --git a/hexrd/hedm/fitting/calibration/__init__.py b/hexrd/hedm/fitting/calibration/__init__.py new file mode 100644 index 000000000..1d2652b41 --- /dev/null +++ b/hexrd/hedm/fitting/calibration/__init__.py @@ -0,0 +1,19 @@ +from .grain import GrainCalibrator +from hexrd.core.fitting.calibration import ( + fix_detector_y, + InstrumentCalibrator, + LaueCalibrator, + StructureLessCalibrator, + StructurelessCalibrator, + PowderCalibrator, +) + +__all__ = [ + 'fix_detector_y', + 'GrainCalibrator', + 'InstrumentCalibrator', + 'LaueCalibrator', + 'PowderCalibrator', + 'StructurelessCalibrator', + 'StructureLessCalibrator', +] diff --git a/hexrd/hedm/fitting/calibration/multigrain.py b/hexrd/hedm/fitting/calibration/multigrain.py deleted file mode 100644 index 9e7bf59a2..000000000 --- a/hexrd/hedm/fitting/calibration/multigrain.py +++ /dev/null @@ -1,443 +0,0 @@ -import logging -import os - -import numpy as np -from scipy.optimize import leastsq, least_squares - -from hexrd.core import constants as cnst -from hexrd.core import matrixutil as mutil -from hexrd.core import rotations -from hexrd.core.transforms import xfcapi - -from .. import grains as grainutil - -logger = logging.getLogger() -logger.setLevel('INFO') - -# grains -# fmt: off -grain_flags_DFLT = np.array( - [1, 1, 1, - 1, 0, 1, - 0, 0, 0, 0, 0, 0], - dtype=bool -) -# fmt: on - -ext_eta_tol = np.radians(5.0) # for HEDM cal, may make this a user param - - -def calibrate_instrument_from_sx( - instr, - grain_params, - bmat, - xyo_det, - hkls_idx, - param_flags=None, - grain_flags=None, - ome_period=None, - xtol=cnst.sqrt_epsf, - ftol=cnst.sqrt_epsf, - factor=10.0, - sim_only=False, - use_robust_lsq=False, -): - """ - arguments xyo_det, hkls_idx are DICTs over panels - - """ - grain_params = np.atleast_2d(grain_params) - ngrains = len(grain_params) - pnames = generate_parameter_names(instr, grain_params) - - # reset parameter flags for instrument as specified - if param_flags is None: - param_flags = instr.calibration_flags - else: - # will throw an AssertionError if wrong length - instr.calibration_flags = param_flags - - # re-map omegas if need be - if ome_period is not None: - for det_key in instr.detectors: - for ig in range(ngrains): - xyo_det[det_key][ig][:, 2] = rotations.mapAngle( - xyo_det[det_key][ig][:, 2], ome_period - ) - - # first grab the instrument parameters - # 7 global - # 6*num_panels for the detectors - # num_panels*ndp in case of distortion - plist_full = instr.calibration_parameters - - # now handle grains - # reset parameter flags for grains as specified - if grain_flags is None: - grain_flags = np.tile(grain_flags_DFLT, ngrains) - - plist_full = np.concatenate([plist_full, np.hstack(grain_params)]) - plf_copy = np.copy(plist_full) - - # concatenate refinement flags - refine_flags = np.hstack([param_flags, grain_flags]) - plist_fit = plist_full[refine_flags] - fit_args = ( - plist_full, - param_flags, - grain_flags, - instr, - xyo_det, - hkls_idx, - bmat, - ome_period, - ) - if sim_only: - return sxcal_obj_func( - plist_fit, - plist_full, - param_flags, - grain_flags, - instr, - xyo_det, - hkls_idx, - bmat, - ome_period, - sim_only=True, - ) - else: - logger.info("Set up to refine:") - for i in np.where(refine_flags)[0]: - logger.info("\t%s = %1.7e" % (pnames[i], plist_full[i])) - - # run optimization - if use_robust_lsq: - result = least_squares( - sxcal_obj_func, - plist_fit, - args=fit_args, - xtol=xtol, - ftol=ftol, - loss='soft_l1', - method='trf', - ) - x = result.x - resd = result.fun - mesg = result.message - ierr = result.status - else: - # do least squares problem - x, cov_x, infodict, mesg, ierr = leastsq( - sxcal_obj_func, - plist_fit, - args=fit_args, - factor=factor, - xtol=xtol, - ftol=ftol, - full_output=1, - ) - resd = infodict['fvec'] - if ierr not in [1, 2, 3, 4]: - raise RuntimeError(f"solution not found: {ierr=}") - else: - logger.info(f"optimization fininshed successfully with {ierr=}") - logger.info(mesg) - - # ??? output message handling? - fit_params = plist_full - fit_params[refine_flags] = x - - # run simulation with optimized results - sim_final = sxcal_obj_func( - x, - plist_full, - param_flags, - grain_flags, - instr, - xyo_det, - hkls_idx, - bmat, - ome_period, - sim_only=True, - ) - - # ??? reset instrument here? - instr.update_from_parameter_list(fit_params) - - # report final - logger.info("Optimization Reults:") - for i in np.where(refine_flags)[0]: - logger.info( - "\t%s = %1.7e --> %1.7e" - % (pnames[i], plf_copy[i], fit_params[i]) - ) - - return fit_params, resd, sim_final - - -def generate_parameter_names(instr, grain_params): - pnames = [ - '{:>24s}'.format('beam energy'), - '{:>24s}'.format('beam azimuth'), - '{:>24s}'.format('beam polar'), - '{:>24s}'.format('chi'), - '{:>24s}'.format('tvec_s[0]'), - '{:>24s}'.format('tvec_s[1]'), - '{:>24s}'.format('tvec_s[2]'), - ] - - for det_key, panel in instr.detectors.items(): - pnames += [ - '{:>24s}'.format('%s tilt[0]' % det_key), - '{:>24s}'.format('%s tilt[1]' % det_key), - '{:>24s}'.format('%s tilt[2]' % det_key), - '{:>24s}'.format('%s tvec[0]' % det_key), - '{:>24s}'.format('%s tvec[1]' % det_key), - '{:>24s}'.format('%s tvec[2]' % det_key), - ] - # now add distortion if there - if panel.distortion is not None: - for j in range(len(panel.distortion.params)): - pnames.append('{:>24s}'.format('%s dparam[%d]' % (det_key, j))) - - grain_params = np.atleast_2d(grain_params) - for ig, grain in enumerate(grain_params): - pnames += [ - '{:>24s}'.format('grain %d xi[0]' % ig), - '{:>24s}'.format('grain %d xi[1]' % ig), - '{:>24s}'.format('grain %d xi[2]' % ig), - '{:>24s}'.format('grain %d tvec_c[0]' % ig), - '{:>24s}'.format('grain %d tvec_c[1]' % ig), - '{:>24s}'.format('grain %d tvec_c[2]' % ig), - '{:>24s}'.format('grain %d vinv_s[0]' % ig), - '{:>24s}'.format('grain %d vinv_s[1]' % ig), - '{:>24s}'.format('grain %d vinv_s[2]' % ig), - '{:>24s}'.format('grain %d vinv_s[3]' % ig), - '{:>24s}'.format('grain %d vinv_s[4]' % ig), - '{:>24s}'.format('grain %d vinv_s[5]' % ig), - ] - - return pnames - - -def sxcal_obj_func( - plist_fit, - plist_full, - param_flags, - grain_flags, - instr, - xyo_det, - hkls_idx, - bmat, - ome_period, - sim_only=False, - return_value_flag=None, -): - """ """ - npi = len(instr.calibration_parameters) - NP_GRN = 12 - - # stack flags and force bool repr - refine_flags = np.array(np.hstack([param_flags, grain_flags]), dtype=bool) - - # fill out full parameter list - # !!! no scaling for now - plist_full[refine_flags] = plist_fit - - # instrument update - instr.update_from_parameter_list(plist_full) - - # assign some useful params - wavelength = instr.beam_wavelength - bvec = instr.beam_vector - chi = instr.chi - tvec_s = instr.tvec - - # right now just stuck on the end and assumed - # to all be the same length... FIX THIS - xy_unwarped = {} - meas_omes = {} - calc_omes = {} - calc_xy = {} - - # grain params - grain_params = plist_full[npi:] - if np.mod(len(grain_params), NP_GRN) != 0: - raise RuntimeError("parameter list length is not consistent") - ngrains = len(grain_params) // NP_GRN - grain_params = grain_params.reshape((ngrains, NP_GRN)) - - # loop over panels - npts_tot = 0 - for det_key, panel in instr.detectors.items(): - rmat_d = panel.rmat - tvec_d = panel.tvec - - xy_unwarped[det_key] = [] - meas_omes[det_key] = [] - calc_omes[det_key] = [] - calc_xy[det_key] = [] - - for ig, grain in enumerate(grain_params): - ghkls = hkls_idx[det_key][ig] - xyo = xyo_det[det_key][ig] - - npts_tot += len(xyo) - - xy_unwarped[det_key].append(xyo[:, :2]) - meas_omes[det_key].append(xyo[:, 2]) - if panel.distortion is not None: # do unwarping - xy_unwarped[det_key][ig] = panel.distortion.apply( - xy_unwarped[det_key][ig] - ) - - # transform G-vectors: - # 1) convert inv. stretch tensor from MV notation in to 3x3 - # 2) take reciprocal lattice vectors from CRYSTAL to SAMPLE frame - # 3) apply stretch tensor - # 4) normalize reciprocal lattice vectors in SAMPLE frame - # 5) transform unit reciprocal lattice vetors back to CRYSAL frame - rmat_c = xfcapi.make_rmat_of_expmap(grain[:3]) - tvec_c = grain[3:6] - vinv_s = grain[6:] - gvec_c = np.dot(bmat, ghkls.T) - vmat_s = mutil.vecMVToSymm(vinv_s) - ghat_s = mutil.unitVector(np.dot(vmat_s, np.dot(rmat_c, gvec_c))) - ghat_c = np.dot(rmat_c.T, ghat_s) - - match_omes, calc_omes_tmp = grainutil.matchOmegas( - xyo, - ghkls.T, - chi, - rmat_c, - bmat, - wavelength, - vInv=vinv_s, - beamVec=bvec, - omePeriod=ome_period, - ) - - rmat_s_arr = xfcapi.make_sample_rmat( - chi, np.ascontiguousarray(calc_omes_tmp) - ) - calc_xy_tmp = xfcapi.gvec_to_xy( - ghat_c.T, rmat_d, rmat_s_arr, rmat_c, tvec_d, tvec_s, tvec_c - ) - if np.any(np.isnan(calc_xy_tmp)): - logger.warning( - "infeasible parameters: may want to scale back " - "finite difference step size" - ) - - calc_omes[det_key].append(calc_omes_tmp) - calc_xy[det_key].append(calc_xy_tmp) - - # return values - if sim_only: - retval = {} - for det_key in calc_xy.keys(): - # ??? calc_xy is always 2-d - retval[det_key] = [] - for ig in range(ngrains): - retval[det_key].append( - np.vstack( - [calc_xy[det_key][ig].T, calc_omes[det_key][ig]] - ).T - ) - else: - meas_xy_all = [] - calc_xy_all = [] - meas_omes_all = [] - calc_omes_all = [] - for det_key in xy_unwarped.keys(): - meas_xy_all.append(np.vstack(xy_unwarped[det_key])) - calc_xy_all.append(np.vstack(calc_xy[det_key])) - meas_omes_all.append(np.hstack(meas_omes[det_key])) - calc_omes_all.append(np.hstack(calc_omes[det_key])) - meas_xy_all = np.vstack(meas_xy_all) - calc_xy_all = np.vstack(calc_xy_all) - meas_omes_all = np.hstack(meas_omes_all) - calc_omes_all = np.hstack(calc_omes_all) - - diff_vecs_xy = calc_xy_all - meas_xy_all - diff_ome = rotations.angularDifference(calc_omes_all, meas_omes_all) - retval = np.hstack( - [diff_vecs_xy, diff_ome.reshape(npts_tot, 1)] - ).flatten() - if return_value_flag == 1: - retval = sum(abs(retval)) - elif return_value_flag == 2: - denom = npts_tot - len(plist_fit) - 1.0 - if denom != 0: - nu_fac = 1.0 / denom - else: - nu_fac = 1.0 - nu_fac = 1 / (npts_tot - len(plist_fit) - 1.0) - retval = nu_fac * sum(retval**2) - return retval - - -def parse_reflection_tables(cfg, instr, grain_ids, refit_idx=None): - """ - make spot dictionaries - """ - hkls = {} - xyo_det = {} - idx_0 = {} - for det_key, panel in instr.detectors.items(): - hkls[det_key] = [] - xyo_det[det_key] = [] - idx_0[det_key] = [] - for ig, grain_id in enumerate(grain_ids): - spots_filename = os.path.join( - cfg.analysis_dir, - os.path.join(det_key, 'spots_%05d.out' % grain_id), - ) - - # load pull_spots output table - gtable = np.loadtxt(spots_filename, ndmin=2) - if len(gtable) == 0: - gtable = np.nan * np.ones((1, 17)) - - # apply conditions for accepting valid data - valid_reflections = gtable[:, 0] >= 0 # is indexed - not_saturated = gtable[:, 6] < panel.saturation_level - # throw away extremem etas - p90 = rotations.angularDifference(gtable[:, 8], cnst.piby2) - m90 = rotations.angularDifference(gtable[:, 8], -cnst.piby2) - accept_etas = np.logical_or(p90 > ext_eta_tol, m90 > ext_eta_tol) - logger.info(f"panel '{det_key}', grain {grain_id}") - logger.info( - f"{sum(valid_reflections)} of {len(gtable)} " - "reflections are indexed" - ) - logger.info( - f"{sum(not_saturated)} of {sum(valid_reflections)}" - " valid reflections be are below" - + f" saturation threshold of {panel.saturation_level}" - ) - logger.info( - f"{sum(accept_etas)} of {len(gtable)}" - " reflections be are greater than " - + f" {np.degrees(ext_eta_tol)} from the rotation axis" - ) - - # valid reflections index - if refit_idx is None: - idx = np.logical_and( - valid_reflections, - np.logical_and(not_saturated, accept_etas), - ) - idx_0[det_key].append(idx) - else: - idx = refit_idx[det_key][ig] - idx_0[det_key].append(idx) - logger.info( - f"input reflection specify {sum(idx)} of " - f"{len(gtable)} total valid reflections" - ) - - hkls[det_key].append(gtable[idx, 2:5]) - meas_omes = gtable[idx, 12].reshape(sum(idx), 1) - xyo_det[det_key].append(np.hstack([gtable[idx, -2:], meas_omes])) - return hkls, xyo_det, idx_0 From 57ba9ea695eed4d1052cb51876d5a8475993cdbc Mon Sep 17 00:00:00 2001 From: Kevin Welsh Date: Tue, 1 Apr 2025 13:45:18 -0400 Subject: [PATCH 12/19] Move file_table to into the package + fix pickled imports --- file_table.tsv => hexrd/file_table.tsv | 17 ++++++ hexrd/module_map.py | 82 ++++++++++++-------------- setup.py | 2 +- 3 files changed, 55 insertions(+), 46 deletions(-) rename file_table.tsv => hexrd/file_table.tsv (96%) diff --git a/file_table.tsv b/hexrd/file_table.tsv similarity index 96% rename from file_table.tsv rename to hexrd/file_table.tsv index 9e1e39e4f..c44a8f060 100644 --- a/file_table.tsv +++ b/hexrd/file_table.tsv @@ -142,6 +142,13 @@ hexrd/fitting/calibration/multigrain.py hexrd/hedm/fitting/calibration/multigrai hexrd/fitting/calibration/calibrator.py hexrd/powder/fitting/calibration/calibrator.py hexrd/fitting/calibration/lmfit_param_handling.py hexrd/powder/fitting/calibration/lmfit_param_handling.py hexrd/fitting/calibration/instrument.py hexrd/powder/fitting/calibration/instrument.py +hexrd/fitting/calibration/laue.py hexrd/core/fitting/calibration/laue.py +hexrd/fitting/calibration/structureless.py hexrd/core/fitting/calibration/structureless.py +hexrd/fitting/calibration/powder.py hexrd/core/fitting/calibration/powder.py +hexrd/fitting/calibration/multigrain.py hexrd/core/fitting/calibration/multigrain.py +hexrd/fitting/calibration/calibrator.py hexrd/core/fitting/calibration/calibrator.py +hexrd/fitting/calibration/lmfit_param_handling.py hexrd/core/fitting/calibration/lmfit_param_handling.py +hexrd/fitting/calibration/instrument.py hexrd/core/fitting/calibration/instrument.py hexrd/ipfcolor/sphere_sector.py hexrd/hedm/ipfcolor/sphere_sector.py hexrd/ipfcolor/colorspace.py hexrd/hedm/ipfcolor/colorspace.py hexrd/ipfcolor/__init__.py hexrd/hedm/ipfcolor/__init__.py @@ -408,6 +415,10 @@ hexrd\core\instrument\__init__.py hexrd\core\instrument\__init__.py hexrd\core\instrument\constants.py hexrd\core\instrument\constants.py hexrd\core\instrument\cylindrical_detector.py hexrd\core\instrument\cylindrical_detector.py hexrd\core\instrument\detector.py hexrd\core\instrument\detector.py +hexrd\core\instrument\detector.py hexrd\hed\instrument\detector.py +hexrd\core\instrument\detector.py hexrd\hedm\instrument\detector.py +hexrd\core\instrument\detector.py hexrd\laue\instrument\detector.py +hexrd\core\instrument\detector.py hexrd\powder\instrument\detector.py hexrd\core\instrument\detector_coatings.py hexrd\core\instrument\detector_coatings.py hexrd\core\instrument\hedm_instrument.py hexrd\core\instrument\hedm_instrument.py hexrd\core\instrument\physics_package.py hexrd\core\instrument\physics_package.py @@ -531,3 +542,9 @@ hexrd\transforms\new_capi hexrd\core\transforms\new_capi hexrd\transforms\cpp_sublibrary hexrd\core\transforms\cpp_sublibrary hexrd\transforms\cpp_sublibrary\src hexrd\core\transforms\cpp_sublibrary\src hexrd\convolution\src hexrd\core\convolution\src +hexrd\distortion\dexela_2923_quad.py hexrd\core\distortion\dexela_2923_quad.py +hexrd\resources\instrument_templates\rigaku.hexrd hexrd\core\resources\instrument_templates\rigaku.hexrd +hexrd\transforms\cpp_sublibrary\src\transforms.cpp hexrd\core\transforms\cpp_sublibrary\src\transforms.cpp +hexrd\utils\panel_buffer.py hexrd\core\utils\panel_buffer.py +tests\test_distortion.py tests\test_distortion.py +tests\transforms\test_make_binary_rmat.py tests\transforms\test_make_binary_rmat.py diff --git a/hexrd/module_map.py b/hexrd/module_map.py index d5efdbd7e..af6535586 100644 --- a/hexrd/module_map.py +++ b/hexrd/module_map.py @@ -29,7 +29,7 @@ def path_to_module(path: Path) -> str: return path.as_posix().replace("/", ".") -HEXRD_PACKAGE_PATH = Path(__file__).parent.parent +HEXRD_PACKAGE_PATH = Path(__file__).parent file_map: dict[Path, list[Path]] = defaultdict(list) with open(HEXRD_PACKAGE_PATH / "file_table.tsv", "r") as f: for line in f: @@ -47,11 +47,15 @@ def path_to_module(path: Path) -> str: if old_path.suffix not in ("", ".py") or not "hexrd" in old_path.parts: continue old_module_path = path_to_module(old_path) - # TODO: This just picks one. We should probably pick the right one? We should know the right one after - # We finish the refactor. + # Default to pick the core module if it exists. Otherwise pick the first one. + selected_path = new_paths[0] + for new_path in new_paths: + if 'core' in new_path.parts: + selected_path = new_path + break module_map[old_module_path] = ( - path_to_module(new_paths[0]), - HEXRD_PACKAGE_PATH / new_paths[0], + path_to_module(selected_path), + HEXRD_PACKAGE_PATH.parent / selected_path, ) @@ -106,49 +110,37 @@ def parent(self): return self._parent -class ModuleAliasFinder(importlib.abc.MetaPathFinder): +class ModuleAliasImporter(importlib.abc.MetaPathFinder, importlib.abc.Loader): def find_spec(self, fullname, path, target=None): if fullname in module_map: mapped_module, mapped_fp = module_map[fullname] - - if mapped_fp.name != "__init__.py": - parent = mapped_module.rsplit(".", 1)[0] - else: - parent = mapped_module - - # Need to set these to be the exact same module so that class comparison - # works correctly if you are comparing classes that are imported one way with classes - # that are imported the mapped way. - sys.modules[fullname] = importlib.import_module(mapped_module) - - # We have to totally change the structure of the package, so we need a custom submodule for ModuleSpec - # ModuleSpec.parent is used for relative imports. - if mapped_fp.is_file(): - spec = ModuleSpecWithParent( - mapped_module, - importlib.machinery.SourceFileLoader( - mapped_module, mapped_fp.as_posix() - ), - origin=mapped_fp.as_posix(), - parent=parent, - is_package=mapped_fp.name == "__init__.py", - ) - # Need to set this, since ModuleSpec doesn't by defualt. - # This tells importlib to set __file__, which is used by a few things in here. - spec.has_location = True - else: - spec = ModuleSpecWithParent( - mapped_module, - importlib.machinery.NamespaceLoader( - mapped_module, - list(mapped_fp.parts), - path_finder=importlib.machinery.PathFinder.find_spec, # type: ignore - ), - parent=parent, - is_package=True, - ) - return spec + # We only want to remap modules that go somewhere else. + # If we are already trying to import something that exists, let + # the other importers take care of it so we don't just loop forever. + if fullname == mapped_module: + return None + + return importlib.machinery.ModuleSpec( + fullname, + self, + origin=mapped_fp.as_posix(), + is_package=mapped_fp.name == "__init__.py", + ) return None + def load_module(self, fullname): + """ + This is a deprecated implementation path, but it is a lot easier to do override it this way + than to override it with create and exec_module. + """ + if fullname not in module_map: + raise ImportError(f"Module {fullname} not found in module_map") + + mapped_module, _mapped_fp = module_map[fullname] + sys.modules[fullname] = importlib.import_module(mapped_module) + return sys.modules[fullname] + -sys.meta_path.append(ModuleAliasFinder()) +# We need to redirect __all__ attempts to import hexrd things into our own +# handler. +sys.meta_path.insert(0, ModuleAliasImporter()) diff --git a/setup.py b/setup.py index 84d3c246c..dd567034c 100644 --- a/setup.py +++ b/setup.py @@ -225,7 +225,7 @@ def get_extension_modules(): ext_modules=ext_modules, packages=find_packages(), include_package_data=True, - package_data={'': ['Anomalous.h5']}, + package_data={'': ['Anomalous.h5', 'file_table.tsv']}, python_requires='>=3.9', install_requires=install_reqs, ) From fac76ae1c2abcb02cea533734723919ac5867ddc Mon Sep 17 00:00:00 2001 From: Zack Singer Date: Wed, 4 Jun 2025 23:04:46 -0400 Subject: [PATCH 13/19] Major duplicate cleanup --- hexrd/core/instrument/detector.py | 3 +- hexrd/core/instrument/hedm_instrument.py | 3 +- hexrd/core/projections/polar.py | 4 +- hexrd/core/utils/panel_buffer.py | 4 - hexrd/file_table.tsv | 12 - hexrd/hed/instrument/__init__.py | 2 +- hexrd/hed/instrument/detector.py | 2127 -------------- hexrd/hed/instrument/hedm_instrument.py | 3006 ------------------- hexrd/hed/xrdutil/__init__.py | 1 + hexrd/hed/xrdutil/phutil.py | 4 +- hexrd/hed/xrdutil/utils.py | 1185 +------- hexrd/hedm/config/__init__.py | 4 +- hexrd/hedm/config/dumper.py | 64 - hexrd/hedm/config/fitgrains.py | 2 +- hexrd/hedm/config/instrument.py | 63 - hexrd/hedm/config/loader.py | 25 - hexrd/hedm/config/root.py | 204 -- hexrd/hedm/config/utils.py | 82 - hexrd/hedm/instrument/__init__.py | 4 +- hexrd/hedm/instrument/detector.py | 2128 -------------- hexrd/hedm/instrument/hedm_instrument.py | 3012 -------------------- hexrd/hedm/instrument/physics_package.py | 302 -- hexrd/hedm/material/crystallography.py | 2261 --------------- hexrd/hedm/material/unitcell.py | 2056 ------------- hexrd/hedm/xrdutil/__init__.py | 8 +- hexrd/hedm/xrdutil/utils.py | 505 +--- hexrd/laue/instrument/__init__.py | 4 +- hexrd/laue/instrument/detector.py | 2128 -------------- hexrd/laue/instrument/hedm_instrument.py | 3007 ------------------- hexrd/laue/material/crystallography.py | 2260 --------------- hexrd/laue/xrdutil/utils.py | 1333 +-------- hexrd/powder/instrument/__init__.py | 2 +- hexrd/powder/instrument/detector.py | 2127 -------------- hexrd/powder/instrument/hedm_instrument.py | 3007 ------------------- hexrd/powder/material/crystallography.py | 2260 --------------- tests/calibration/test_2xrs_calibration.py | 2 +- tests/calibration/test_calibration.py | 2 +- tests/calibration/test_laue_auto_pick.py | 7 +- tests/calibration/test_powder_auto_pick.py | 8 +- tests/config/test_material.py | 2 +- tests/planedata/test_with_data.py | 2 +- tests/test_absorption_correction.py | 4 +- tests/test_find_orientations.py | 2 +- 43 files changed, 81 insertions(+), 33147 deletions(-) delete mode 100644 hexrd/hed/instrument/detector.py delete mode 100644 hexrd/hed/instrument/hedm_instrument.py create mode 100644 hexrd/hed/xrdutil/__init__.py delete mode 100644 hexrd/hedm/config/dumper.py delete mode 100644 hexrd/hedm/config/instrument.py delete mode 100644 hexrd/hedm/config/loader.py delete mode 100644 hexrd/hedm/config/root.py delete mode 100644 hexrd/hedm/config/utils.py delete mode 100644 hexrd/hedm/instrument/detector.py delete mode 100644 hexrd/hedm/instrument/hedm_instrument.py delete mode 100644 hexrd/hedm/instrument/physics_package.py delete mode 100644 hexrd/hedm/material/crystallography.py delete mode 100644 hexrd/hedm/material/unitcell.py delete mode 100644 hexrd/laue/instrument/detector.py delete mode 100644 hexrd/laue/instrument/hedm_instrument.py delete mode 100644 hexrd/laue/material/crystallography.py delete mode 100644 hexrd/powder/instrument/detector.py delete mode 100644 hexrd/powder/instrument/hedm_instrument.py delete mode 100644 hexrd/powder/material/crystallography.py diff --git a/hexrd/core/instrument/detector.py b/hexrd/core/instrument/detector.py index 858fa21d7..ec98ed824 100644 --- a/hexrd/core/instrument/detector.py +++ b/hexrd/core/instrument/detector.py @@ -18,6 +18,7 @@ # TODO: Resolve extra-core-dependency from hexrd.hedm import xrdutil +from hexrd.hed.xrdutil import _project_on_detector_plane from hexrd.core.rotations import mapAngle from hexrd.core.material import crystallography @@ -1583,7 +1584,7 @@ def simulate_rotation_series( allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) # find points that fall on the panel - det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( + det_xy, rMat_s, on_plane = _project_on_detector_plane( allAngs, self.rmat, rMat_c, diff --git a/hexrd/core/instrument/hedm_instrument.py b/hexrd/core/instrument/hedm_instrument.py index bfd56af9d..fcaeb5693 100644 --- a/hexrd/core/instrument/hedm_instrument.py +++ b/hexrd/core/instrument/hedm_instrument.py @@ -69,6 +69,7 @@ # TODO: Resolve extra-core-dependency from hexrd.hedm import xrdutil +from hexrd.hed.xrdutil import _project_on_detector_plane from hexrd.core.material.crystallography import PlaneData from hexrd.core import constants as ct from hexrd.core.rotations import mapAngle @@ -1757,7 +1758,7 @@ def pull_spots( ) # find vertices that all fall on the panel - det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( + det_xy, rmats_s, on_plane = _project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), panel.rmat, rMat_c, diff --git a/hexrd/core/projections/polar.py b/hexrd/core/projections/polar.py index fc89e9b7b..58d333b14 100644 --- a/hexrd/core/projections/polar.py +++ b/hexrd/core/projections/polar.py @@ -4,7 +4,7 @@ from hexrd.core.material.crystallography import PlaneData # TODO: Resolve extra-core-dependency -from hexrd.hedm.xrdutil.utils import ( +from hexrd.hed.xrdutil.utils import ( _project_on_detector_cylinder, _project_on_detector_plane, ) @@ -36,7 +36,7 @@ def __init__( as defined but the active hkls and the tThWidth (or strainMag). If array_like, the input must be (2, ) specifying the [min, maz] 2theta values explicitly in degrees. - instrument : hexrd.hedm.instrument.HEDMInstrument + instrument : hexrd.core.instrument.HEDMInstrument The instruemnt object. eta_min : scalar, optional The minimum azimuthal extent in degrees. The default is 0. diff --git a/hexrd/core/utils/panel_buffer.py b/hexrd/core/utils/panel_buffer.py index 512f9d3d4..f88b408d7 100644 --- a/hexrd/core/utils/panel_buffer.py +++ b/hexrd/core/utils/panel_buffer.py @@ -1,10 +1,6 @@ import numpy as np from hexrd.core.instrument.detector import Detector -from hexrd.hed.instrument.detector import Detector -from hexrd.hedm.instrument.detector import Detector -from hexrd.powder.instrument.detector import Detector -from hexrd.laue.instrument.detector import Detector def panel_buffer_as_2d_array(panel: Detector) -> np.ndarray: diff --git a/hexrd/file_table.tsv b/hexrd/file_table.tsv index c44a8f060..ead815f99 100644 --- a/hexrd/file_table.tsv +++ b/hexrd/file_table.tsv @@ -24,9 +24,7 @@ hexrd/cli/pickle23.py hexrd/hedm/cli/pickle23.py hexrd/cli/documentation.py hexrd/hedm/cli/documentation.py hexrd/config/fitgrains.py hexrd/hedm/config/fitgrains.py hexrd/config/material.py hexrd/core/config/material.py -hexrd/config/root.py hexrd/hedm/config/root.py hexrd/config/root.py hexrd/core/config/root.py -hexrd/config/dumper.py hexrd/hedm/config/dumper.py hexrd/config/dumper.py hexrd/core/config/dumper.py hexrd/config/loader.py hexrd/hedm/config/loader.py hexrd/config/loader.py hexrd/core/config/loader.py @@ -34,7 +32,6 @@ hexrd/config/__init__.py hexrd/hedm/config/__init__.py hexrd/config/__init__.py hexrd/core/config/__init__.py hexrd/config/findorientations.py hexrd/hedm/config/findorientations.py hexrd/config/config.py hexrd/core/config/config.py -hexrd/config/utils.py hexrd/hedm/config/utils.py hexrd/config/utils.py hexrd/core/config/utils.py hexrd/config/instrument.py hexrd/hedm/config/instrument.py hexrd/config/instrument.py hexrd/core/config/instrument.py @@ -88,11 +85,7 @@ hexrd/wppf/phase.py hexrd/powder/wppf/phase.py hexrd/material/mksupport.py hexrd/core/material/mksupport.py hexrd/material/symmetry.py hexrd/core/material/symmetry.py hexrd/material/material.py hexrd/core/material/material.py -hexrd/material/crystallography.py hexrd/hedm/material/crystallography.py -hexrd/material/crystallography.py hexrd/laue/material/crystallography.py -hexrd/material/crystallography.py hexrd/powder/material/crystallography.py hexrd/material/crystallography.py hexrd/core/material/crystallography.py -hexrd/material/unitcell.py hexrd/hedm/material/unitcell.py hexrd/material/unitcell.py hexrd/core/material/unitcell.py hexrd/material/__init__.py hexrd/core/material/__init__.py hexrd/material/symbols.py hexrd/core/material/symbols.py @@ -116,13 +109,8 @@ hexrd/instrument/hedm_instrument.py hexrd/hedm/instrument/hedm_instrument.py hexrd/instrument/hedm_instrument.py hexrd/laue/instrument/hedm_instrument.py hexrd/instrument/hedm_instrument.py hexrd/powder/instrument/hedm_instrument.py hexrd/instrument/hedm_instrument.py hexrd/core/instrument/hedm_instrument.py -hexrd/instrument/physics_package.py hexrd/hedm/instrument/physics_package.py hexrd/instrument/physics_package.py hexrd/core/instrument/physics_package.py hexrd/instrument/detector.py hexrd/core/instrument/detector.py -hexrd/instrument/detector.py hexrd/hed/instrument/detector.py -hexrd/instrument/detector.py hexrd/hedm/instrument/detector.py -hexrd/instrument/detector.py hexrd/powder/instrument/detector.py -hexrd/instrument/detector.py hexrd/laue/instrument/detector.py hexrd/instrument/cylindrical_detector.py hexrd/core/instrument/cylindrical_detector.py hexrd/instrument/__init__.py hexrd/core/instrument/__init__.py hexrd/instrument/planar_detector.py hexrd/core/instrument/planar_detector.py diff --git a/hexrd/hed/instrument/__init__.py b/hexrd/hed/instrument/__init__.py index b5414013c..396a0d078 100644 --- a/hexrd/hed/instrument/__init__.py +++ b/hexrd/hed/instrument/__init__.py @@ -10,4 +10,4 @@ unwrap_dict_to_h5, unwrap_h5_to_dict, ) -from .detector import Detector +from hexrd.core.instrument.detector import Detector diff --git a/hexrd/hed/instrument/detector.py b/hexrd/hed/instrument/detector.py deleted file mode 100644 index 37cb8fcfe..000000000 --- a/hexrd/hed/instrument/detector.py +++ /dev/null @@ -1,2127 +0,0 @@ -from abc import abstractmethod -import copy -import os -from typing import Optional - -from hexrd.core.instrument.constants import ( - COATING_DEFAULT, - FILTER_DEFAULTS, - PHOSPHOR_DEFAULT, -) -from hexrd.core.instrument.physics_package import AbstractPhysicsPackage -import numpy as np -import numba - -from hexrd.core import constants as ct -from hexrd.core import distortion as distortion_pkg -from hexrd.core import matrixutil as mutil - -from hexrd.hedm import xrdutil -from hexrd.core.rotations import mapAngle - -from hexrd.core.material import crystallography -from hexrd.core.material.crystallography import PlaneData - -from hexrd.core.transforms.xfcapi import ( - xy_to_gvec, - gvec_to_xy, - make_beam_rmat, - make_rmat_of_expmap, - oscill_angles_of_hkls, - angles_to_dvec, -) - -from hexrd.core.utils.decorators import memoize -from hexrd.core.gridutil import cellIndices -from hexrd.core.instrument import detector_coatings -from hexrd.core.material.utils import ( - calculate_linear_absorption_length, - calculate_incoherent_scattering, -) - -distortion_registry = distortion_pkg.Registry() - -max_workers_DFLT = max(1, os.cpu_count() - 1) - -beam_energy_DFLT = 65.351 - -# Memoize these, so each detector can avoid re-computing if nothing -# has changed. -_lorentz_factor = memoize(crystallography.lorentz_factor) -_polarization_factor = memoize(crystallography.polarization_factor) - - -class Detector: - """ - Base class for 2D detectors with functions and properties - common to planar and cylindrical detectors. This class - will be inherited by both those classes. - """ - - __pixelPitchUnit = 'mm' - - # Abstract methods that must be redefined in derived classes - @property - @abstractmethod - def detector_type(self): - raise NotImplementedError - - @abstractmethod - def cart_to_angles( - self, - xy_data, - rmat_s=None, - tvec_s=None, - tvec_c=None, - apply_distortion=False, - ): - """ - Transform cartesian coordinates to angular. - - Parameters - ---------- - xy_data : TYPE - The (n, 2) array of n (x, y) coordinates to be transformed in - either the raw or ideal cartesian plane (see `apply_distortion` - kwarg below). - rmat_s : array_like, optional - The (3, 3) COB matrix for the sample frame. The default is None. - tvec_s : array_like, optional - The (3, ) translation vector for the sample frame. - The default is None. - tvec_c : array_like, optional - The (3, ) translation vector for the crystal frame. - The default is None. - apply_distortion : bool, optional - If True, apply distortion to the inpout cartesian coordinates. - The default is False. - - Returns - ------- - tth_eta : TYPE - DESCRIPTION. - g_vec : TYPE - DESCRIPTION. - - """ - raise NotImplementedError - - @abstractmethod - def angles_to_cart( - self, - tth_eta, - rmat_s=None, - tvec_s=None, - rmat_c=None, - tvec_c=None, - apply_distortion=False, - ): - """ - Transform angular coordinates to cartesian. - - Parameters - ---------- - tth_eta : array_like - The (n, 2) array of n (tth, eta) coordinates to be transformed. - rmat_s : array_like, optional - The (3, 3) COB matrix for the sample frame. The default is None. - tvec_s : array_like, optional - The (3, ) translation vector for the sample frame. - The default is None. - rmat_c : array_like, optional - (3, 3) COB matrix for the crystal frame. - The default is None. - tvec_c : array_like, optional - The (3, ) translation vector for the crystal frame. - The default is None. - apply_distortion : bool, optional - If True, apply distortion to take cartesian coordinates to the - "warped" configuration. The default is False. - - Returns - ------- - xy_det : array_like - The (n, 2) array on the n input coordinates in the . - - """ - raise NotImplementedError - - @abstractmethod - def cart_to_dvecs(self, xy_data): - """Convert cartesian coordinates to dvectors""" - raise NotImplementedError - - @abstractmethod - def pixel_angles(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def pixel_tth_gradient(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def pixel_eta_gradient(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def calc_filter_coating_transmission(self, energy): - pass - - @property - @abstractmethod - def beam_position(self): - """ - returns the coordinates of the beam in the cartesian detector - frame {Xd, Yd, Zd}. NaNs if no intersection. - """ - raise NotImplementedError - - @property - def extra_config_kwargs(self): - return {} - - # End of abstract methods - - def __init__( - self, - rows=2048, - cols=2048, - pixel_size=(0.2, 0.2), - tvec=np.r_[0.0, 0.0, -1000.0], - tilt=ct.zeros_3, - name='default', - bvec=ct.beam_vec, - xrs_dist=None, - evec=ct.eta_vec, - saturation_level=None, - panel_buffer=None, - tth_distortion=None, - roi=None, - group=None, - distortion=None, - max_workers=max_workers_DFLT, - detector_filter: Optional[detector_coatings.Filter] = None, - detector_coating: Optional[detector_coatings.Coating] = None, - phosphor: Optional[detector_coatings.Phosphor] = None, - ): - """ - Instantiate a PlanarDetector object. - - Parameters - ---------- - rows : TYPE, optional - DESCRIPTION. The default is 2048. - cols : TYPE, optional - DESCRIPTION. The default is 2048. - pixel_size : TYPE, optional - DESCRIPTION. The default is (0.2, 0.2). - tvec : TYPE, optional - DESCRIPTION. The default is np.r_[0., 0., -1000.]. - tilt : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - name : TYPE, optional - DESCRIPTION. The default is 'default'. - bvec : TYPE, optional - DESCRIPTION. The default is ct.beam_vec. - evec : TYPE, optional - DESCRIPTION. The default is ct.eta_vec. - saturation_level : TYPE, optional - DESCRIPTION. The default is None. - panel_buffer : TYPE, optional - If a scalar or len(2) array_like, the interpretation is a border - in mm. If an array with shape (nrows, ncols), interpretation is a - boolean with True marking valid pixels. The default is None. - roi : TYPE, optional - DESCRIPTION. The default is None. - group : TYPE, optional - DESCRIPTION. The default is None. - distortion : TYPE, optional - DESCRIPTION. The default is None. - detector_filter : detector_coatings.Filter, optional - filter specifications including material type, - density and thickness. Used for absorption correction - calculations. - detector_coating : detector_coatings.Coating, optional - coating specifications including material type, - density and thickness. Used for absorption correction - calculations. - phosphor : detector_coatings.Phosphor, optional - phosphor specifications including material type, - density and thickness. Used for absorption correction - calculations. - - Returns - ------- - None. - - """ - self._name = name - - self._rows = rows - self._cols = cols - - self._pixel_size_row = pixel_size[0] - self._pixel_size_col = pixel_size[1] - - self._saturation_level = saturation_level - - self._panel_buffer = panel_buffer - - self._tth_distortion = tth_distortion - - if roi is None: - self._roi = roi - else: - assert len(roi) == 2, "roi is set via (start_row, start_col)" - self._roi = ( - (roi[0], roi[0] + self._rows), - (roi[1], roi[1] + self._cols), - ) - - self._tvec = np.array(tvec).flatten() - self._tilt = np.array(tilt).flatten() - - self._bvec = np.array(bvec).flatten() - self._xrs_dist = xrs_dist - - self._evec = np.array(evec).flatten() - - self._distortion = distortion - - self.max_workers = max_workers - - self.group = group - - if detector_filter is None: - detector_filter = detector_coatings.Filter( - **FILTER_DEFAULTS.TARDIS - ) - self.filter = detector_filter - - if detector_coating is None: - detector_coating = detector_coatings.Coating(**COATING_DEFAULT) - self.coating = detector_coating - - if phosphor is None: - phosphor = detector_coatings.Phosphor(**PHOSPHOR_DEFAULT) - self.phosphor = phosphor - - # detector ID - @property - def name(self): - return self._name - - @name.setter - def name(self, s): - assert isinstance(s, str), "requires string input" - self._name = s - - @property - def lmfit_name(self): - # lmfit requires underscores instead of dashes - return self.name.replace('-', '_') - - # properties for physical size of rectangular detector - @property - def rows(self): - return self._rows - - @rows.setter - def rows(self, x): - assert isinstance(x, int) - self._rows = x - - @property - def cols(self): - return self._cols - - @cols.setter - def cols(self, x): - assert isinstance(x, int) - self._cols = x - - @property - def pixel_size_row(self): - return self._pixel_size_row - - @pixel_size_row.setter - def pixel_size_row(self, x): - self._pixel_size_row = float(x) - - @property - def pixel_size_col(self): - return self._pixel_size_col - - @pixel_size_col.setter - def pixel_size_col(self, x): - self._pixel_size_col = float(x) - - @property - def pixel_area(self): - return self.pixel_size_row * self.pixel_size_col - - @property - def saturation_level(self): - return self._saturation_level - - @saturation_level.setter - def saturation_level(self, x): - if x is not None: - assert np.isreal(x) - self._saturation_level = x - - @property - def panel_buffer(self): - return self._panel_buffer - - @panel_buffer.setter - def panel_buffer(self, x): - """if not None, a buffer in mm (x, y)""" - if x is not None: - assert len(x) == 2 or x.ndim == 2 - self._panel_buffer = x - - @property - def tth_distortion(self): - return self._tth_distortion - - @tth_distortion.setter - def tth_distortion(self, x): - """if not None, a buffer in mm (x, y)""" - if x is not None: - assert x.ndim == 2 and x.shape == self.shape - self._tth_distortion = x - - @property - def roi(self): - return self._roi - - @roi.setter - def roi(self, vertex_array): - """ - !!! vertex array must be (r0, c0) - """ - if vertex_array is not None: - assert ( - len(vertex_array) == 2 - ), "roi is set via (start_row, start_col)" - self._roi = ( - (vertex_array[0], vertex_array[0] + self.rows), - (vertex_array[1], vertex_array[1] + self.cols), - ) - - @property - def row_dim(self): - return self.rows * self.pixel_size_row - - @property - def col_dim(self): - return self.cols * self.pixel_size_col - - @property - def row_pixel_vec(self): - return self.pixel_size_row * ( - 0.5 * (self.rows - 1) - np.arange(self.rows) - ) - - @property - def row_edge_vec(self): - return _row_edge_vec(self.rows, self.pixel_size_row) - - @property - def col_pixel_vec(self): - return self.pixel_size_col * ( - np.arange(self.cols) - 0.5 * (self.cols - 1) - ) - - @property - def col_edge_vec(self): - return _col_edge_vec(self.cols, self.pixel_size_col) - - @property - def corner_ul(self): - return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] - - @property - def corner_ll(self): - return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim] - - @property - def corner_lr(self): - return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim] - - @property - def corner_ur(self): - return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] - - @property - def shape(self): - return (self.rows, self.cols) - - @property - def tvec(self): - return self._tvec - - @tvec.setter - def tvec(self, x): - x = np.array(x).flatten() - assert len(x) == 3, 'input must have length = 3' - self._tvec = x - - @property - def tilt(self): - return self._tilt - - @tilt.setter - def tilt(self, x): - assert len(x) == 3, 'input must have length = 3' - self._tilt = np.array(x).squeeze() - - @property - def bvec(self): - return self._bvec - - @bvec.setter - def bvec(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._bvec = x - - @property - def xrs_dist(self): - return self._xrs_dist - - @xrs_dist.setter - def xrs_dist(self, x): - assert x is None or np.isscalar( - x - ), f"'source_distance' must be None or scalar; you input '{x}'" - self._xrs_dist = x - - @property - def evec(self): - return self._evec - - @evec.setter - def evec(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._evec = x - - @property - def distortion(self): - return self._distortion - - @distortion.setter - def distortion(self, x): - if x is not None: - registry = distortion_registry.distortion_registry - check_arg = np.zeros(len(registry), dtype=bool) - for i, dcls in enumerate(registry.values()): - check_arg[i] = isinstance(x, dcls) - assert np.any(check_arg), 'input distortion is not in registry!' - self._distortion = x - - @property - def rmat(self): - return make_rmat_of_expmap(self.tilt) - - @property - def normal(self): - return self.rmat[:, 2] - - # ...memoize??? - @property - def pixel_coords(self): - pix_i, pix_j = np.meshgrid( - self.row_pixel_vec, self.col_pixel_vec, indexing='ij' - ) - return pix_i, pix_j - - # ========================================================================= - # METHODS - # ========================================================================= - - def pixel_Q( - self, energy: np.floating, origin: np.ndarray = ct.zeros_3 - ) -> np.ndarray: - '''get the equivalent momentum transfer - for the angles. - - Parameters - ---------- - energy: float - incident photon energy in keV - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise Q in A^-1 - - ''' - lam = ct.keVToAngstrom(energy) - tth, _ = self.pixel_angles(origin=origin) - return 4.0 * np.pi * np.sin(tth * 0.5) / lam - - def pixel_compton_energy_loss( - self, - energy: np.floating, - origin: np.ndarray = ct.zeros_3, - ) -> np.ndarray: - '''inelastic compton scattering leads - to energy loss of the incident photons. - compute the final energy of the photons - for each pixel. - - Parameters - ---------- - energy: float - incident photon energy in keV - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise energy of inelastically - scatterd photons in keV - ''' - energy = np.asarray(energy) - tth, _ = self.pixel_angles() - ang_fact = 1 - np.cos(tth) - beta = energy / ct.cRestmasskeV - return energy / (1 + beta * ang_fact) - - def pixel_compton_attenuation_length( - self, - energy: np.floating, - density: np.floating, - formula: str, - origin: np.ndarray = ct.zeros_3, - ) -> np.ndarray: - '''each pixel intercepts inelastically - scattered photons of different energy. - the attenuation length and the transmission - for these photons are different. this function - calculate attenuatin length for each pixel - on the detector. - - Parameters - ---------- - energy: float - incident photon energy in keV - density: float - density of material in g/cc - formula: str - formula of the material scattering - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise attentuation length of compton - scattered photons - ''' - pixel_energy = self.pixel_compton_energy_loss(energy) - - pixel_attenuation_length = calculate_linear_absorption_length( - density, - formula, - pixel_energy.flatten(), - ) - return pixel_attenuation_length.reshape(self.shape) - - def compute_compton_scattering_intensity( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - origin: np.array = ct.zeros_3, - ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - '''compute the theoretical compton scattering - signal on the detector. this value is corrected - for the transmission of compton scattered photons - and normlaized before getting subtracting from the - raw intensity - - Parameters - ----------- - energy: float - energy of incident photon - rMat_s: np.ndarray - rotation matrix of sample orientation - physics_package: AbstractPhysicsPackage - physics package information - Returns - ------- - compton_intensity: np.ndarray - transmission corrected compton scattering - intensity - ''' - - q = self.pixel_Q(energy) - inc_s = calculate_incoherent_scattering( - physics_package.sample_material, q.flatten() - ).reshape(self.shape) - - inc_w = calculate_incoherent_scattering( - physics_package.window_material, q.flatten() - ).reshape(self.shape) - - t_s = self.calc_compton_physics_package_transmission( - energy, rMat_s, physics_package - ) - - t_w = self.calc_compton_window_transmission( - energy, rMat_s, physics_package - ) - - return inc_s * t_s + inc_w * t_w, t_s, t_w - - def polarization_factor(self, f_hor, f_vert, unpolarized=False): - """ - Calculated the polarization factor for every pixel. - - Parameters - ---------- - f_hor : float - the fraction of horizontal polarization. for XFELs - this is close to 1. - f_vert : TYPE - the fraction of vertical polarization, which is ~0 for XFELs. - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - s = f_hor + f_vert - if np.abs(s - 1) > ct.sqrt_epsf: - msg = ( - "sum of fraction of " - "horizontal and vertical polarizations " - "must be equal to 1." - ) - raise RuntimeError(msg) - - if f_hor < 0 or f_vert < 0: - msg = ( - "fraction of polarization in horizontal " - "or vertical directions can't be negative." - ) - raise RuntimeError(msg) - - tth, eta = self.pixel_angles() - kwargs = { - 'tth': tth, - 'eta': eta, - 'f_hor': f_hor, - 'f_vert': f_vert, - 'unpolarized': unpolarized, - } - - return _polarization_factor(**kwargs) - - def lorentz_factor(self): - """ - calculate the lorentz factor for every pixel - - Parameters - ---------- - None - - Raises - ------ - None - - Returns - ------- - numpy.ndarray - returns an array the same size as the detector panel - with each element containg the lorentz factor of the - corresponding pixel - """ - tth, eta = self.pixel_angles() - return _lorentz_factor(tth) - - def config_dict( - self, - chi=0, - tvec=ct.zeros_3, - beam_energy=beam_energy_DFLT, - beam_vector=ct.beam_vec, - sat_level=None, - panel_buffer=None, - style='yaml', - ): - """ - Return a dictionary of detector parameters. - - Optional instrument level parameters. This is a convenience function - to work with the APIs in several functions in xrdutil. - - Parameters - ---------- - chi : float, optional - DESCRIPTION. The default is 0. - tvec : array_like (3,), optional - DESCRIPTION. The default is ct.zeros_3. - beam_energy : float, optional - DESCRIPTION. The default is beam_energy_DFLT. - beam_vector : aray_like (3,), optional - DESCRIPTION. The default is ct.beam_vec. - sat_level : scalar, optional - DESCRIPTION. The default is None. - panel_buffer : scalar, array_like (2,), optional - DESCRIPTION. The default is None. - - Returns - ------- - config_dict : dict - DESCRIPTION. - - """ - assert style.lower() in ['yaml', 'hdf5'], ( - "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style - ) - - config_dict = {} - - # ===================================================================== - # DETECTOR PARAMETERS - # ===================================================================== - # transform and pixels - # - # assign local vars; listify if necessary - tilt = self.tilt - translation = self.tvec - roi = ( - None - if self.roi is None - else np.array([self.roi[0][0], self.roi[1][0]]).flatten() - ) - if style.lower() == 'yaml': - tilt = tilt.tolist() - translation = translation.tolist() - tvec = tvec.tolist() - roi = None if roi is None else roi.tolist() - - det_dict = dict( - detector_type=self.detector_type, - transform=dict( - tilt=tilt, - translation=translation, - ), - pixels=dict( - rows=int(self.rows), - columns=int(self.cols), - size=[float(self.pixel_size_row), float(self.pixel_size_col)], - ), - ) - - if roi is not None: - # Only add roi if it is not None - det_dict['pixels']['roi'] = roi - - if self.group is not None: - # Only add group if it is not None - det_dict['group'] = self.group - - # distortion - if self.distortion is not None: - dparams = self.distortion.params - if style.lower() == 'yaml': - dparams = dparams.tolist() - dist_d = dict( - function_name=self.distortion.maptype, parameters=dparams - ) - det_dict['distortion'] = dist_d - - # saturation level - if sat_level is None: - sat_level = self.saturation_level - det_dict['saturation_level'] = float(sat_level) - - # panel buffer - if panel_buffer is None: - # could be none, a 2-element list, or a 2-d array (rows, cols) - panel_buffer = copy.deepcopy(self.panel_buffer) - # !!! now we have to do some style-dependent munging of panel_buffer - if isinstance(panel_buffer, np.ndarray): - if panel_buffer.ndim == 1: - assert len(panel_buffer) == 2, "length of 1-d buffer must be 2" - # if here is a 2-element array - if style.lower() == 'yaml': - panel_buffer = panel_buffer.tolist() - elif panel_buffer.ndim == 2: - if style.lower() == 'yaml': - # !!! can't practically write array-like buffers to YAML - # so forced to clobber - print("clobbering panel buffer array in yaml-ready output") - panel_buffer = [0.0, 0.0] - else: - raise RuntimeError( - "panel buffer ndim must be 1 or 2; you specified %d" - % panel_buffer.ndmin - ) - elif panel_buffer is None: - # still None on self - # !!! this gets handled by unwrap_dict_to_h5 now - - # if style.lower() == 'hdf5': - # # !!! can't write None to hdf5; substitute with zeros - # panel_buffer = np.r_[0., 0.] - pass - det_dict['buffer'] = panel_buffer - - det_dict.update(self.extra_config_kwargs) - - # ===================================================================== - # SAMPLE STAGE PARAMETERS - # ===================================================================== - stage_dict = dict(chi=chi, translation=tvec) - - # ===================================================================== - # BEAM PARAMETERS - # ===================================================================== - # !!! make_reflection_patches is still using the vector - # azim, pola = calc_angles_from_beam_vec(beam_vector) - # beam_dict = dict( - # energy=beam_energy, - # vector=dict( - # azimuth=azim, - # polar_angle=pola - # ) - # ) - beam_dict = dict(energy=beam_energy, vector=beam_vector) - - config_dict['detector'] = det_dict - config_dict['oscillation_stage'] = stage_dict - config_dict['beam'] = beam_dict - - return config_dict - - def cartToPixel(self, xy_det, pixels=False, apply_distortion=False): - """ - Coverts cartesian coordinates to pixel coordinates - - Parameters - ---------- - xy_det : array_like - The (n, 2) vstacked array of (x, y) pairs in the reference - cartesian frame (possibly subject to distortion). - pixels : bool, optional - If True, return discrete pixel indices; otherwise fractional pixel - coordinates are returned. The default is False. - apply_distortion : bool, optional - If True, apply self.distortion to the input (if applicable). - The default is False. - - Returns - ------- - ij_det : array_like - The (n, 2) array of vstacked (i, j) coordinates in the pixel - reference frame where i is the (slow) row dimension and j is the - (fast) column dimension. - - """ - xy_det = np.atleast_2d(xy_det) - if apply_distortion and self.distortion is not None: - xy_det = self.distortion.apply(xy_det) - - npts = len(xy_det) - - tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1)) - i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5 - j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5 - - ij_det = np.vstack([i_pix, j_pix]).T - if pixels: - # Hide any runtime warnings in this conversion. Their output values - # will certainly be off the detector, which is fine. - with np.errstate(invalid='ignore'): - ij_det = np.array(np.round(ij_det), dtype=int) - - return ij_det - - def pixelToCart(self, ij_det): - """ - Convert vstacked array or list of [i,j] pixel indices - (or UL corner-based points) and convert to (x,y) in the - cartesian frame {Xd, Yd, Zd} - """ - ij_det = np.atleast_2d(ij_det) - - x = (ij_det[:, 1] + 0.5) * self.pixel_size_col + self.corner_ll[0] - y = ( - self.rows - ij_det[:, 0] - 0.5 - ) * self.pixel_size_row + self.corner_ll[1] - return np.vstack([x, y]).T - - def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): - """ - Notes - ----- - !!! assumes xy are in raw (distorted) frame, if applicable - """ - # munge kwargs - if rMat_s is None: - rMat_s = ct.identity_3x3 - if tVec_s is None: - tVec_s = ct.zeros_3x1 - if tVec_c is None: - tVec_c = ct.zeros_3x1 - - # FIXME: perhaps not necessary, but safe... - xy = np.atleast_2d(xy) - - ''' - # --------------------------------------------------------------------- - # TODO: needs testing and memoized gradient arrays! - # --------------------------------------------------------------------- - # need origin arg - origin = np.dot(rMat_s, tVec_c).flatten() + tVec_s.flatten() - - # get pixel indices - i_crds = cellIndices(self.row_edge_vec, xy[:, 1]) - j_crds = cellIndices(self.col_edge_vec, xy[:, 0]) - - ptth_grad = self.pixel_tth_gradient(origin=origin)[i_crds, j_crds] - peta_grad = self.pixel_eta_gradient(origin=origin)[i_crds, j_crds] - - return np.vstack([ptth_grad, peta_grad]).T - ''' - # call xrdutil function - ang_ps = xrdutil.angularPixelSize( - xy, - (self.pixel_size_row, self.pixel_size_col), - self.rmat, - rMat_s, - self.tvec, - tVec_s, - tVec_c, - distortion=self.distortion, - beamVec=self.bvec, - etaVec=self.evec, - ) - return ang_ps - - def clip_to_panel(self, xy, buffer_edges=True): - """ - if self.roi is not None, uses it by default - - TODO: check if need shape kwarg - TODO: optimize ROI search better than list comprehension below - TODO: panel_buffer can be a 2-d boolean mask, but needs testing - - """ - xy = np.atleast_2d(xy) - - ''' - # !!! THIS LOGIC IS OBSOLETE - if self.roi is not None: - ij_crds = self.cartToPixel(xy, pixels=True) - ii, jj = polygon(self.roi[:, 0], self.roi[:, 1], - shape=(self.rows, self.cols)) - on_panel_rows = [i in ii for i in ij_crds[:, 0]] - on_panel_cols = [j in jj for j in ij_crds[:, 1]] - on_panel = np.logical_and(on_panel_rows, on_panel_cols) - else: - ''' - xlim = 0.5 * self.col_dim - ylim = 0.5 * self.row_dim - if buffer_edges and self.panel_buffer is not None: - if self.panel_buffer.ndim == 2: - pix = self.cartToPixel(xy, pixels=True) - - roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows) - coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols) - - idx = np.logical_or(roff, coff) - - on_panel = np.full(pix.shape[0], False) - valid_pix = pix[~idx, :] - on_panel[~idx] = self.panel_buffer[ - valid_pix[:, 0], valid_pix[:, 1] - ] - else: - xlim -= self.panel_buffer[0] - ylim -= self.panel_buffer[1] - on_panel_x = np.logical_and( - xy[:, 0] >= -xlim, xy[:, 0] <= xlim - ) - on_panel_y = np.logical_and( - xy[:, 1] >= -ylim, xy[:, 1] <= ylim - ) - on_panel = np.logical_and(on_panel_x, on_panel_y) - elif not buffer_edges or self.panel_buffer is None: - on_panel_x = np.logical_and(xy[:, 0] >= -xlim, xy[:, 0] <= xlim) - on_panel_y = np.logical_and(xy[:, 1] >= -ylim, xy[:, 1] <= ylim) - on_panel = np.logical_and(on_panel_x, on_panel_y) - return xy[on_panel, :], on_panel - - def interpolate_nearest(self, xy, img, pad_with_nans=True): - """ - TODO: revisit normalization in here? - - """ - is_2d = img.ndim == 2 - right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert ( - is_2d and right_shape - ), "input image must be 2-d with shape (%d, %d)" % ( - self.rows, - self.cols, - ) - - # initialize output with nans - if pad_with_nans: - int_xy = np.nan * np.ones(len(xy)) - else: - int_xy = np.zeros(len(xy)) - - # clip away points too close to or off the edges of the detector - xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) - - # get pixel indices of clipped points - i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) - j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) - - # next interpolate across cols - int_vals = img[i_src, j_src] - int_xy[on_panel] = int_vals - return int_xy - - def interpolate_bilinear( - self, - xy, - img, - pad_with_nans=True, - clip_to_panel=True, - on_panel: Optional[np.ndarray] = None, - ): - """ - Interpolate an image array at the specified cartesian points. - - Parameters - ---------- - xy : array_like, (n, 2) - Array of cartesian coordinates in the image plane at which - to evaluate intensity. - img : array_like - 2-dimensional image array. - pad_with_nans : bool, optional - Toggle for assigning NaN to points that fall off the detector. - The default is True. - on_panel : np.ndarray, optional - If you want to skip clip_to_panel() for performance reasons, - just provide an array of which pixels are on the panel. - - Returns - ------- - int_xy : array_like, (n,) - The array of interpolated intensities at each of the n input - coordinates. - - Notes - ----- - TODO: revisit normalization in here? - """ - - is_2d = img.ndim == 2 - right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert ( - is_2d and right_shape - ), "input image must be 2-d with shape (%d, %d)" % ( - self.rows, - self.cols, - ) - - # initialize output with nans - if pad_with_nans: - int_xy = np.nan * np.ones(len(xy)) - else: - int_xy = np.zeros(len(xy)) - - if on_panel is None: - # clip away points too close to or off the edges of the detector - xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) - else: - xy_clip = xy[on_panel] - - # grab fractional pixel indices of clipped points - ij_frac = self.cartToPixel(xy_clip) - - # get floors/ceils from array of pixel _centers_ - # and fix indices running off the pixel centers - # !!! notice we already clipped points to the panel! - i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) - i_floor_img = _fix_indices(i_floor, 0, self.rows - 1) - - j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) - j_floor_img = _fix_indices(j_floor, 0, self.cols - 1) - - # ceilings from floors - i_ceil = i_floor + 1 - i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1) - - j_ceil = j_floor + 1 - j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) - - # first interpolate at top/bottom rows - row_floor_int = (j_ceil - ij_frac[:, 1]) * img[ - i_floor_img, j_floor_img - ] + (ij_frac[:, 1] - j_floor) * img[i_floor_img, j_ceil_img] - row_ceil_int = (j_ceil - ij_frac[:, 1]) * img[ - i_ceil_img, j_floor_img - ] + (ij_frac[:, 1] - j_floor) * img[i_ceil_img, j_ceil_img] - - # next interpolate across cols - int_vals = (i_ceil - ij_frac[:, 0]) * row_floor_int + ( - ij_frac[:, 0] - i_floor - ) * row_ceil_int - int_xy[on_panel] = int_vals - return int_xy - - def make_powder_rings( - self, - pd, - merge_hkls=False, - delta_tth=None, - delta_eta=10.0, - eta_period=None, - eta_list=None, - rmat_s=ct.identity_3x3, - tvec_s=ct.zeros_3, - tvec_c=ct.zeros_3, - full_output=False, - tth_distortion=None, - ): - """ - Generate points on Debye_Scherrer rings over the detector. - - !!! it is assuming that rmat_s is built from (chi, ome) as it the case - for HEDM! - - Parameters - ---------- - pd : TYPE - DESCRIPTION. - merge_hkls : TYPE, optional - DESCRIPTION. The default is False. - delta_tth : TYPE, optional - DESCRIPTION. The default is None. - delta_eta : TYPE, optional - DESCRIPTION. The default is 10.. - eta_period : TYPE, optional - DESCRIPTION. The default is None. - eta_list : TYPE, optional - DESCRIPTION. The default is None. - rmat_s : TYPE, optional - DESCRIPTION. The default is ct.identity_3x3. - tvec_s : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - tvec_c : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - full_output : TYPE, optional - DESCRIPTION. The default is False. - tth_distortion : special class, optional - Special distortion class. The default is None. - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - if tth_distortion is not None: - tnorms = mutil.rowNorm(np.vstack([tvec_s, tvec_c])) - assert ( - np.all(tnorms) < ct.sqrt_epsf - ), "If using distrotion function, translations must be zero" - - # in case you want to give it tth angles directly - if isinstance(pd, PlaneData): - pd = PlaneData(None, pd) - if delta_tth is not None: - pd.tThWidth = np.radians(delta_tth) - else: - delta_tth = np.degrees(pd.tThWidth) - - # !!! conversions, meh... - del_eta = np.radians(delta_eta) - - # do merging if asked - if merge_hkls: - _, tth_ranges = pd.getMergedRanges(cullDupl=True) - tth = np.average(tth_ranges, axis=1) - else: - tth_ranges = pd.getTThRanges() - tth = pd.getTTh() - tth_pm = tth_ranges - np.tile(tth, (2, 1)).T - sector_vertices = np.vstack( - [ - [ - i[0], - -del_eta, - i[0], - del_eta, - i[1], - del_eta, - i[1], - -del_eta, - 0.0, - 0.0, - ] - for i in tth_pm - ] - ) - else: - # Okay, we have a array-like tth specification - tth = np.array(pd).flatten() - if delta_tth is None: - raise RuntimeError( - "If supplying a 2theta list as first arg, " - + "must supply a delta_tth" - ) - tth_pm = 0.5 * delta_tth * np.r_[-1.0, 1.0] - tth_ranges = np.radians([i + tth_pm for i in tth]) # !!! units - sector_vertices = np.tile( - 0.5 - * np.radians( - [ - -delta_tth, - -delta_eta, - -delta_tth, - delta_eta, - delta_tth, - delta_eta, - delta_tth, - -delta_eta, - 0.0, - 0.0, - ] - ), - (len(tth), 1), - ) - # !! conversions, meh... - tth = np.radians(tth) - del_eta = np.radians(delta_eta) - - # for generating rings, make eta vector in correct period - if eta_period is None: - eta_period = (-np.pi, np.pi) - - if eta_list is None: - neta = int(360.0 / float(delta_eta)) - # this is the vector of ETA EDGES - eta_edges = mapAngle( - np.radians(delta_eta * np.linspace(0.0, neta, num=neta + 1)) - + eta_period[0], - eta_period, - ) - - # get eta bin centers from edges - """ - # !!! this way is probably overkill, since we have delta eta - eta_centers = np.average( - np.vstack([eta[:-1], eta[1:]), - axis=0) - """ - # !!! should be safe as eta_edges are monotonic - eta_centers = eta_edges[:-1] + 0.5 * del_eta - else: - eta_centers = np.radians(eta_list).flatten() - neta = len(eta_centers) - eta_edges = ( - np.tile(eta_centers, (2, 1)) - + np.tile(0.5 * del_eta * np.r_[-1, 1], (neta, 1)).T - ).T.flatten() - - # get chi and ome from rmat_s - # !!! API ambiguity - # !!! this assumes rmat_s was made from the composition - # !!! rmat_s = R(Xl, chi) * R(Yl, ome) - ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) - - # make list of angle tuples - angs = [ - np.vstack([i * np.ones(neta), eta_centers, ome * np.ones(neta)]) - for i in tth - ] - - # need xy coords and pixel sizes - valid_ang = [] - valid_xy = [] - map_indices = [] - npp = 5 # [ll, ul, ur, lr, center] - for i_ring in range(len(angs)): - # expand angles to patch vertices - these_angs = angs[i_ring].T - - # push to vertices to see who falls off - # FIXME: clipping is not checking if masked regions are on the - # patch interior - patch_vertices = ( - np.tile(these_angs[:, :2], (1, npp)) - + np.tile(sector_vertices[i_ring], (neta, 1)) - ).reshape(npp * neta, 2) - - # find vertices that all fall on the panel - # !!! not API ambiguity regarding rmat_s above - all_xy = self.angles_to_cart( - patch_vertices, - rmat_s=rmat_s, - tvec_s=tvec_s, - rmat_c=None, - tvec_c=tvec_c, - apply_distortion=True, - ) - - _, on_panel = self.clip_to_panel(all_xy) - - # all vertices must be on... - - patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) - patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] - - # !!! Have to apply after clipping, distortion can get wonky near - # the edeg of the panel, and it is assumed to be <~1 deg - # !!! The tth_ranges are NOT correct! - if tth_distortion is not None: - patch_valid_angs = tth_distortion.apply( - self.angles_to_cart(these_angs[patch_is_on, :2]), - return_nominal=True, - ) - patch_valid_xys = self.angles_to_cart( - patch_valid_angs, apply_distortion=True - ) - else: - patch_valid_angs = these_angs[patch_is_on, :2] - patch_valid_xys = patch_xys[:, -1, :].squeeze() - - # form output arrays - valid_ang.append(patch_valid_angs) - valid_xy.append(patch_valid_xys) - map_indices.append(patch_is_on) - # ??? is this option necessary? - if full_output: - return valid_ang, valid_xy, tth_ranges, map_indices, eta_edges - else: - return valid_ang, valid_xy, tth_ranges - - def map_to_plane(self, pts, rmat, tvec): - """ - Map detctor points to specified plane. - - Parameters - ---------- - pts : TYPE - DESCRIPTION. - rmat : TYPE - DESCRIPTION. - tvec : TYPE - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - Notes - ----- - by convention: - - n * (u*pts_l - tvec) = 0 - - [pts]_l = rmat*[pts]_m + tvec - - """ - # arg munging - pts = np.atleast_2d(pts) - npts = len(pts) - - # map plane normal & translation vector, LAB FRAME - nvec_map_lab = rmat[:, 2].reshape(3, 1) - tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1) - tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1) - - # put pts as 3-d in panel CS and transform to 3-d lab coords - pts_det = np.hstack([pts, np.zeros((npts, 1))]) - pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab - - # scaling along pts vectors to hit map plane - u = np.dot(nvec_map_lab.T, tvec_map_lab) / np.dot( - nvec_map_lab.T, pts_lab - ) - - # pts on map plane, in LAB FRAME - pts_map_lab = np.tile(u, (3, 1)) * pts_lab - - return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T - - def simulate_rotation_series( - self, - plane_data, - grain_param_list, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_ranges=[ - (-np.pi, np.pi), - ], - ome_period=(-np.pi, np.pi), - chi=0.0, - tVec_s=ct.zeros_3, - wavelength=None, - ): - """ - Simulate a monochromatic rotation series for a list of grains. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_param_list : TYPE - DESCRIPTION. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - chi : TYPE, optional - DESCRIPTION. The default is 0.. - tVec_s : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - wavelength : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - valid_ids : TYPE - DESCRIPTION. - valid_hkls : TYPE - DESCRIPTION. - valid_angs : TYPE - DESCRIPTION. - valid_xys : TYPE - DESCRIPTION. - ang_pixel_size : TYPE - DESCRIPTION. - - """ - # grab B-matrix from plane data - bMat = plane_data.latVecOps['B'] - - # reconcile wavelength - # * added sanity check on exclusions here; possible to - # * make some reflections invalid (NaN) - if wavelength is None: - wavelength = plane_data.wavelength - else: - if plane_data.wavelength != wavelength: - plane_data.wavelength = ct.keVToAngstrom(wavelength) - assert not np.any( - np.isnan(plane_data.getTTh()) - ), "plane data exclusions incompatible with wavelength" - - # vstacked G-vector id, h, k, l - full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) - - """ LOOP OVER GRAINS """ - valid_ids = [] - valid_hkls = [] - valid_angs = [] - valid_xys = [] - ang_pixel_size = [] - for gparm in grain_param_list: - - # make useful parameters - rMat_c = make_rmat_of_expmap(gparm[:3]) - tVec_c = gparm[3:6] - vInv_s = gparm[6:] - - # All possible bragg conditions as vstacked [tth, eta, ome] - # for each omega solution - angList = np.vstack( - oscill_angles_of_hkls( - full_hkls[:, 1:], - chi, - rMat_c, - bMat, - wavelength, - v_inv=vInv_s, - beam_vec=self.bvec, - ) - ) - - # filter by eta and omega ranges - # ??? get eta range from detector? - allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( - full_hkls, angList, eta_ranges, ome_ranges - ) - allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) - - # find points that fall on the panel - det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( - allAngs, - self.rmat, - rMat_c, - chi, - self.tvec, - tVec_c, - tVec_s, - self.distortion, - self.bvec, - ) - xys_p, on_panel = self.clip_to_panel(det_xy) - valid_xys.append(xys_p) - - # filter angs and hkls that are on the detector plane - # !!! check this -- seems unnecessary but the results of - # _project_on_detector_plane() can have len < the input? - # the output of _project_on_detector_plane has been modified to - # hand back the index array to remedy this JVB 2020-05-27 - if np.any(~on_plane): - allAngs = np.atleast_2d(allAngs[on_plane, :]) - allHKLs = np.atleast_2d(allHKLs[on_plane, :]) - - # grab hkls and gvec ids for this panel - valid_hkls.append(allHKLs[on_panel, 1:]) - valid_ids.append(allHKLs[on_panel, 0]) - - # reflection angles (voxel centers) and pixel size in (tth, eta) - valid_angs.append(allAngs[on_panel, :]) - ang_pixel_size.append(self.angularPixelSize(xys_p)) - return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size - - def simulate_laue_pattern( - self, - crystal_data, - minEnergy=5.0, - maxEnergy=35.0, - rmat_s=None, - tvec_s=None, - grain_params=None, - beam_vec=None, - ): - """ """ - if isinstance(crystal_data, PlaneData): - - plane_data = crystal_data - - # grab the expanded list of hkls from plane_data - hkls = np.hstack(plane_data.getSymHKLs()) - - # and the unit plane normals (G-vectors) in CRYSTAL FRAME - gvec_c = np.dot(plane_data.latVecOps['B'], hkls) - - # Filter out g-vectors going in the wrong direction. `gvec_to_xy()` used - # to do this, but not anymore. - to_keep = np.dot(gvec_c.T, self.bvec) <= 0 - - hkls = hkls[:, to_keep] - gvec_c = gvec_c[:, to_keep] - elif len(crystal_data) == 2: - # !!! should clean this up - hkls = np.array(crystal_data[0]) - bmat = crystal_data[1] - gvec_c = np.dot(bmat, hkls) - else: - raise RuntimeError( - f'argument list not understood: {crystal_data=}' - ) - nhkls_tot = hkls.shape[1] - - # parse energy ranges - # TODO: allow for spectrum parsing - multipleEnergyRanges = False - if hasattr(maxEnergy, '__len__'): - assert len(maxEnergy) == len( - minEnergy - ), 'energy cutoff ranges must have the same length' - multipleEnergyRanges = True - lmin = [] - lmax = [] - for i in range(len(maxEnergy)): - lmin.append(ct.keVToAngstrom(maxEnergy[i])) - lmax.append(ct.keVToAngstrom(minEnergy[i])) - else: - lmin = ct.keVToAngstrom(maxEnergy) - lmax = ct.keVToAngstrom(minEnergy) - - # parse grain parameters kwarg - if grain_params is None: - grain_params = np.atleast_2d( - np.hstack([np.zeros(6), ct.identity_6x1]) - ) - n_grains = len(grain_params) - - # sample rotation - if rmat_s is None: - rmat_s = ct.identity_3x3 - - # dummy translation vector... make input - if tvec_s is None: - tvec_s = ct.zeros_3 - - # beam vector - if beam_vec is None: - beam_vec = ct.beam_vec - - # ========================================================================= - # LOOP OVER GRAINS - # ========================================================================= - - # pre-allocate output arrays - xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) - hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) - angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) - dspacing = np.nan * np.ones((n_grains, nhkls_tot)) - energy = np.nan * np.ones((n_grains, nhkls_tot)) - for iG, gp in enumerate(grain_params): - rmat_c = make_rmat_of_expmap(gp[:3]) - tvec_c = gp[3:6].reshape(3, 1) - vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) - - # stretch them: V^(-1) * R * Gc - gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) - ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) - - # project - dpts = gvec_to_xy( - ghat_c_str.T, - self.rmat, - rmat_s, - rmat_c, - self.tvec, - tvec_s, - tvec_c, - beam_vec=beam_vec, - ) - - # check intersections with detector plane - canIntersect = ~np.isnan(dpts[:, 0]) - npts_in = sum(canIntersect) - - if np.any(canIntersect): - dpts = dpts[canIntersect, :].reshape(npts_in, 2) - dhkl = hkls[:, canIntersect].reshape(3, npts_in) - - rmat_b = make_beam_rmat(beam_vec, ct.eta_vec) - # back to angles - tth_eta, gvec_l = xy_to_gvec( - dpts, - self.rmat, - rmat_s, - self.tvec, - tvec_s, - tvec_c, - rmat_b=rmat_b, - ) - tth_eta = np.vstack(tth_eta).T - - # warp measured points - if self.distortion is not None: - dpts = self.distortion.apply_inverse(dpts) - - # plane spacings and energies - dsp = 1.0 / mutil.rowNorm(gvec_s_str[:, canIntersect].T) - wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) - - # clip to detector panel - _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) - - if multipleEnergyRanges: - validEnergy = np.zeros(len(wlen), dtype=bool) - for i in range(len(lmin)): - in_energy_range = np.logical_and( - wlen >= lmin[i], wlen <= lmax[i] - ) - validEnergy = validEnergy | in_energy_range - else: - validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) - - # index for valid reflections - keepers = np.where(np.logical_and(on_panel, validEnergy))[0] - - # assign output arrays - xy_det[iG][keepers, :] = dpts[keepers, :] - hkls_in[iG][:, keepers] = dhkl[:, keepers] - angles[iG][keepers, :] = tth_eta[keepers, :] - dspacing[iG, keepers] = dsp[keepers] - energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) - return xy_det, hkls_in, angles, dspacing, energy - - @staticmethod - def update_memoization_sizes(all_panels): - funcs = [ - _polarization_factor, - _lorentz_factor, - ] - - min_size = len(all_panels) - return Detector.increase_memoization_sizes(funcs, min_size) - - @staticmethod - def increase_memoization_sizes(funcs, min_size): - for f in funcs: - cache_info = f.cache_info() - if cache_info['maxsize'] < min_size: - f.set_cache_maxsize(min_size) - - def calc_physics_package_transmission( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - ) -> np.float64: - """get the transmission from the physics package - need to consider HED and HEDM samples separately - """ - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_sample = self.calc_transmission_sample( - seca, secb, energy, physics_package - ) - T_window = self.calc_transmission_window(secb, energy, physics_package) - - transmission_physics_package = T_sample * T_window - return transmission_physics_package - - def calc_compton_physics_package_transmission( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - '''calculate the attenuation of inelastically - scattered photons. since these photons lose energy, - the attenuation length is angle dependent ergo a separate - routine than elastically scattered absorption. - ''' - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_sample = self.calc_compton_transmission( - seca, secb, energy, physics_package, 'sample' - ) - T_window = self.calc_compton_transmission_window( - secb, energy, physics_package - ) - - return T_sample * T_window - - def calc_compton_window_transmission( - self, - energy: np.floating, - rMat_s: np.ndarray, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - '''calculate the attenuation of inelastically - scattered photons just fropm the window. - since these photons lose energy, the attenuation length - is angle dependent ergo a separate routine than - elastically scattered absorption. - ''' - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_window = self.calc_compton_transmission( - seca, secb, energy, physics_package, 'window' - ) - T_sample = self.calc_compton_transmission_sample( - seca, energy, physics_package - ) - - return T_sample * T_window - - def calc_transmission_sample( - self, - seca: np.array, - secb: np.array, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.array: - thickness_s = physics_package.sample_thickness # in microns - if np.isclose(thickness_s, 0): - return np.ones(self.shape) - - # in microns^-1 - mu_s = 1.0 / physics_package.sample_absorption_length(energy) - x = mu_s * thickness_s - pre = 1.0 / x / (secb - seca) - num = np.exp(-x * seca) - np.exp(-x * secb) - return pre * num - - def calc_transmission_window( - self, - secb: np.array, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.array: - material_w = physics_package.window_material - thickness_w = physics_package.window_thickness # in microns - if material_w is None or np.isclose(thickness_w, 0): - return np.ones(self.shape) - - # in microns^-1 - mu_w = 1.0 / physics_package.window_absorption_length(energy) - return np.exp(-thickness_w * mu_w * secb) - - def calc_compton_transmission( - self, - seca: np.ndarray, - secb: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - pp_layer: str, - ) -> np.ndarray: - - if pp_layer == 'sample': - formula = physics_package.sample_material - density = physics_package.sample_density - thickness = physics_package.sample_thickness - mu = 1.0 / physics_package.sample_absorption_length(energy) - mu_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, - density, - formula, - ) - elif pp_layer == 'window': - formula = physics_package.window_material - if formula is None: - return np.ones(self.shape) - - density = physics_package.window_density - thickness = physics_package.window_thickness - mu = 1.0 / physics_package.sample_absorption_length(energy) - mu_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, density, formula - ) - - if thickness <= 0: - return np.ones(self.shape) - - x1 = mu * thickness * seca - x2 = mu_prime * thickness * secb - num = np.exp(-x1) - np.exp(-x2) - return -num / (x1 - x2) - - def calc_compton_transmission_sample( - self, - seca: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - thickness_s = physics_package.sample_thickness # in microns - - mu_s = 1.0 / physics_package.sample_absorption_length(energy) - return np.exp(-mu_s * thickness_s * seca) - - def calc_compton_transmission_window( - self, - secb: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - formula = physics_package.window_material - if formula is None: - return np.ones(self.shape) - - density = physics_package.window_density # in g/cc - thickness_w = physics_package.window_thickness # in microns - - mu_w_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, density, formula - ) - return np.exp(-mu_w_prime * thickness_w * secb) - - def calc_effective_pinhole_area( - self, physics_package: AbstractPhysicsPackage - ) -> np.array: - """get the effective pinhole area correction""" - if np.isclose(physics_package.pinhole_diameter, 0) or np.isclose( - physics_package.pinhole_thickness, 0 - ): - return np.ones(self.shape) - - hod = ( - physics_package.pinhole_thickness - / physics_package.pinhole_diameter - ) - bvec = self.bvec - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cth = -dvecs[:, 2].reshape(self.shape) - tanth = np.tan(np.arccos(cth)) - f = hod * tanth - f[np.abs(f) > 1.0] = np.nan - asinf = np.arcsin(f) - return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) - - def calc_transmission_generic( - self, - secb: np.array, - thickness: np.floating, - absorption_length: np.floating, - ) -> np.array: - if np.isclose(thickness, 0): - return np.ones(self.shape) - - mu = 1.0 / absorption_length # in microns^-1 - return np.exp(-thickness * mu * secb) - - def calc_transmission_phosphor( - self, - secb: np.array, - thickness: np.floating, - readout_length: np.floating, - absorption_length: np.floating, - energy: np.floating, - pre_U0: np.floating, - ) -> np.array: - if np.isclose(thickness, 0): - return np.ones(self.shape) - - f1 = absorption_length * thickness - f2 = absorption_length * readout_length - arg = secb + 1 / f2 - return pre_U0 * energy * ((1.0 - np.exp(-f1 * arg)) / arg) - - -# ============================================================================= -# UTILITY METHODS -# ============================================================================= - - -def _fix_indices(idx, lo, hi): - nidx = np.array(idx) - off_lo = nidx < lo - off_hi = nidx > hi - nidx[off_lo] = lo - nidx[off_hi] = hi - return nidx - - -def _row_edge_vec(rows, pixel_size_row): - return pixel_size_row * (0.5 * rows - np.arange(rows + 1)) - - -def _col_edge_vec(cols, pixel_size_col): - return pixel_size_col * (np.arange(cols + 1) - 0.5 * cols) - - -# FIXME find a better place for this, and maybe include loop over pixels -@numba.njit(nogil=True, cache=True) -def _solid_angle_of_triangle(vtx_list): - norms = np.sqrt(np.sum(vtx_list * vtx_list, axis=1)) - norms_prod = norms[0] * norms[1] * norms[2] - scalar_triple_product = np.dot( - vtx_list[0], np.cross(vtx_list[2], vtx_list[1]) - ) - denominator = ( - norms_prod - + norms[0] * np.dot(vtx_list[1], vtx_list[2]) - + norms[1] * np.dot(vtx_list[2], vtx_list[0]) - + norms[2] * np.dot(vtx_list[0], vtx_list[1]) - ) - - return 2.0 * np.arctan2(scalar_triple_product, denominator) diff --git a/hexrd/hed/instrument/hedm_instrument.py b/hexrd/hed/instrument/hedm_instrument.py deleted file mode 100644 index 2845de0dd..000000000 --- a/hexrd/hed/instrument/hedm_instrument.py +++ /dev/null @@ -1,3006 +0,0 @@ -# -*- coding: utf-8 -*- -# ============================================================================= -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. -# All rights reserved. -# -# This file is part of HEXRD. For details on dowloading the source, -# see the file COPYING. -# -# Please also see the file LICENSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License (as published by the Free -# Software Foundation) version 2.1 dated February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program (see file LICENSE); if not, write to -# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, -# Boston, MA 02111-1307 USA or visit . -# ============================================================================= -""" -Created on Fri Dec 9 13:05:27 2016 - -@author: bernier2 -""" -from contextlib import contextmanager -import copy -import logging -import os -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor -from functools import partial -from typing import Optional - -from tqdm import tqdm - -import yaml - -import h5py - -import numpy as np - -from io import IOBase - -from scipy import ndimage -from scipy.linalg import logm -from skimage.measure import regionprops - -from hexrd.core import constants -from hexrd.core.imageseries import ImageSeries -from hexrd.core.imageseries.process import ProcessedImageSeries -from hexrd.core.imageseries.omega import OmegaImageSeries -from hexrd.core.fitting.utils import fit_ring -from hexrd.core.gridutil import make_tolerance_grid -from hexrd.core import matrixutil as mutil -from hexrd.core.transforms.xfcapi import ( - angles_to_gvec, - gvec_to_xy, - make_sample_rmat, - make_rmat_of_expmap, - unit_vector, -) - -# TODO: Resolve extra-workflow dependency -from hexrd.hedm import xrdutil -from hexrd.hedm.material.crystallography import PlaneData -from hexrd.core import constants as ct -from hexrd.core.rotations import mapAngle -from hexrd.core import distortion as distortion_pkg -from hexrd.core.utils.concurrent import distribute_tasks -from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict -from hexrd.core.utils.yaml import NumpyToNativeDumper -from hexrd.core.valunits import valWUnit - -# TODO: Resolve extra-workflow-dependency -from hexrd.powder.wppf import LeBail - -from hexrd.core.instrument.cylindrical_detector import CylindricalDetector -from hexrd.core.instrument.detector import beam_energy_DFLT, max_workers_DFLT, Detector -from hexrd.core.instrument.planar_detector import PlanarDetector - -from skimage.draw import polygon -from skimage.util import random_noise -from hexrd.powder.wppf import wppfsupport - -try: - from fast_histogram import histogram1d - - fast_histogram = True -except ImportError: - from numpy import histogram as histogram1d - - fast_histogram = False - -logger = logging.getLogger() -logger.setLevel('INFO') - -# ============================================================================= -# PARAMETERS -# ============================================================================= - -instrument_name_DFLT = 'instrument' - -beam_vec_DFLT = ct.beam_vec -source_distance_DFLT = np.inf - -eta_vec_DFLT = ct.eta_vec - -panel_id_DFLT = 'generic' -nrows_DFLT = 2048 -ncols_DFLT = 2048 -pixel_size_DFLT = (0.2, 0.2) - -tilt_params_DFLT = np.zeros(3) -t_vec_d_DFLT = np.r_[0.0, 0.0, -1000.0] - -chi_DFLT = 0.0 -t_vec_s_DFLT = np.zeros(3) - -multi_ims_key = ct.shared_ims_key -ims_classes = (ImageSeries, ProcessedImageSeries, OmegaImageSeries) - -buffer_key = 'buffer' -distortion_key = 'distortion' - -# ============================================================================= -# UTILITY METHODS -# ============================================================================= - - -def generate_chunks( - nrows, ncols, base_nrows, base_ncols, row_gap=0, col_gap=0 -): - """ - Generate chunking data for regularly tiled composite detectors. - - Parameters - ---------- - nrows : int - DESCRIPTION. - ncols : int - DESCRIPTION. - base_nrows : int - DESCRIPTION. - base_ncols : int - DESCRIPTION. - row_gap : int, optional - DESCRIPTION. The default is 0. - col_gap : int, optional - DESCRIPTION. The default is 0. - - Returns - ------- - rects : array_like - The (nrows*ncols, ) list of ROI specs (see Notes). - labels : array_like - The (nrows*ncols, ) list of ROI (i, j) matrix indexing labels 'i_j'. - - Notes - ----- - ProcessedImageSeries needs a (2, 2) array for the 'rect' kwarg: - [[row_start, row_stop], - [col_start, col_stop]] - """ - row_starts = np.array([i * (base_nrows + row_gap) for i in range(nrows)]) - col_starts = np.array([i * (base_ncols + col_gap) for i in range(ncols)]) - rr = np.vstack([row_starts, row_starts + base_nrows]) - cc = np.vstack([col_starts, col_starts + base_ncols]) - rects = [] - labels = [] - for i in range(nrows): - for j in range(ncols): - this_rect = np.array([[rr[0, i], rr[1, i]], [cc[0, j], cc[1, j]]]) - rects.append(this_rect) - labels.append('%d_%d' % (i, j)) - return rects, labels - - -def chunk_instrument(instr, rects, labels, use_roi=False): - """ - Generate chunked config fro regularly tiled composite detectors. - - Parameters - ---------- - instr : TYPE - DESCRIPTION. - rects : TYPE - DESCRIPTION. - labels : TYPE - DESCRIPTION. - - Returns - ------- - new_icfg_dict : TYPE - DESCRIPTION. - - """ - icfg_dict = instr.write_config() - new_icfg_dict = dict( - beam=icfg_dict['beam'], - oscillation_stage=icfg_dict['oscillation_stage'], - detectors={}, - ) - for panel_id, panel in instr.detectors.items(): - pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] - - for pnum, pdata in enumerate(zip(rects, labels)): - rect, label = pdata - panel_name = f'{panel_id}_{label}' - - row_col_dim = np.diff(rect) # (2, 1) - shape = tuple(row_col_dim.flatten()) - center = rect[:, 0].reshape(2, 1) + 0.5 * row_col_dim - - sp_tvec = np.concatenate( - [panel.pixelToCart(center.T).flatten(), np.zeros(1)] - ) - - tvec = np.dot(panel.rmat, sp_tvec) + panel.tvec - - # new config dict - tmp_cfg = copy.deepcopy(pcfg_dict) - - # fix sizes - tmp_cfg['pixels']['rows'] = shape[0] - tmp_cfg['pixels']['columns'] = shape[1] - if use_roi: - tmp_cfg['pixels']['roi'] = (rect[0][0], rect[1][0]) - - # update tvec - tmp_cfg['transform']['translation'] = tvec.tolist() - - new_icfg_dict['detectors'][panel_name] = copy.deepcopy(tmp_cfg) - - if panel.panel_buffer is not None: - if panel.panel_buffer.ndim == 2: # have a mask array! - submask = panel.panel_buffer[ - rect[0, 0] : rect[0, 1], rect[1, 0] : rect[1, 1] - ] - new_icfg_dict['detectors'][panel_name]['buffer'] = submask - return new_icfg_dict - - -def _parse_imgser_dict(imgser_dict, det_key, roi=None): - """ - Associates a dict of imageseries to the target panel(s). - - Parameters - ---------- - imgser_dict : dict - The input dict of imageseries. Either `det_key` is in imgser_dict, or - the shared key is. Entries can be an ImageSeries object or a 2- or 3-d - ndarray of images. - det_key : str - The target detector key. - roi : tuple or None, optional - The roi of the target images. Format is - ((row_start, row_stop), (col_start, col_stop)) - The stops are used in the normal sense of a slice. The default is None. - - Raises - ------ - RuntimeError - If niether `det_key` nor the shared key is in the input imgser_dict; - Also, if the shared key is specified but the roi is None. - - Returns - ------- - ims : hexrd.core.imageseries - The desired imageseries object. - - """ - # grab imageseries for this detector - try: - ims = imgser_dict[det_key] - except KeyError: - matched_det_keys = [det_key in k for k in imgser_dict] - if multi_ims_key in imgser_dict: - images_in = imgser_dict[multi_ims_key] - elif np.any(matched_det_keys): - if sum(matched_det_keys) != 1: - raise RuntimeError(f"multiple entries found for '{det_key}'") - # use boolean array to index the proper key - # !!! these should be in the same order - img_keys = img_keys = np.asarray(list(imgser_dict.keys())) - matched_det_key = img_keys[matched_det_keys][0] # !!! only one - images_in = imgser_dict[matched_det_key] - else: - raise RuntimeError( - f"neither '{det_key}' nor '{multi_ims_key}' found" - + 'in imageseries input' - ) - - # have images now - if roi is None: - raise RuntimeError( - "roi must be specified to use shared imageseries" - ) - - if isinstance(images_in, ims_classes): - # input is an imageseries of some kind - ims = ProcessedImageSeries( - images_in, - [ - ('rectangle', roi), - ], - ) - if isinstance(images_in, OmegaImageSeries): - # if it was an OmegaImageSeries, must re-cast - ims = OmegaImageSeries(ims) - elif isinstance(images_in, np.ndarray): - # 2- or 3-d array of images - ndim = images_in.ndim - if ndim == 2: - ims = images_in[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]] - elif ndim == 3: - nrows = roi[0][1] - roi[0][0] - ncols = roi[1][1] - roi[1][0] - n_images = len(images_in) - ims = np.empty((n_images, nrows, ncols), dtype=images_in.dtype) - for i, image in images_in: - ims[i, :, :] = images_in[ - roi[0][0] : roi[0][1], roi[1][0] : roi[1][1] - ] - else: - raise RuntimeError( - f"image input dim must be 2 or 3; you gave {ndim}" - ) - return ims - - -def calc_beam_vec(azim, pola): - """ - Calculate unit beam propagation vector from - spherical coordinate spec in DEGREES. - - ...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL! - """ - tht = np.radians(azim) - phi = np.radians(pola) - bv = np.r_[ - np.sin(phi) * np.cos(tht), np.cos(phi), np.sin(phi) * np.sin(tht) - ] - return -bv - - -def calc_angles_from_beam_vec(bvec): - """ - Return the azimuth and polar angle from a beam - vector - """ - bvec = np.atleast_1d(bvec).flatten() - nvec = unit_vector(-bvec) - azim = float(np.degrees(np.arctan2(nvec[2], nvec[0]))) - pola = float(np.degrees(np.arccos(nvec[1]))) - return azim, pola - - -def migrate_instrument_config(instrument_config): - """utility function to generate old instrument config dictionary""" - cfg_list = [] - for detector_id in instrument_config['detectors']: - cfg_list.append( - dict( - detector=instrument_config['detectors'][detector_id], - oscillation_stage=instrument_config['oscillation_stage'], - ) - ) - return cfg_list - - -def angle_in_range(angle, ranges, ccw=True, units='degrees'): - """ - Return the index of the first wedge the angle is found in - - WARNING: always clockwise; assumes wedges are not overlapping - """ - tau = 360.0 - if units.lower() == 'radians': - tau = 2 * np.pi - w = np.nan - for i, wedge in enumerate(ranges): - amin = wedge[0] - amax = wedge[1] - check = amin + np.mod(angle - amin, tau) - if check < amax: - w = i - break - return w - - -# ???: move to gridutil? -def centers_of_edge_vec(edges): - assert np.asarray(edges).ndim == 1, "edges must be 1-d" - return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0) - - -def max_tth(instr): - """ - Return the maximum Bragg angle (in radians) subtended by the instrument. - - Parameters - ---------- - instr : hexrd.hedm.instrument.HEDMInstrument instance - the instrument class to evalutate. - - Returns - ------- - tth_max : float - The maximum observable Bragg angle by the instrument in radians. - """ - tth_max = 0.0 - for det in instr.detectors.values(): - ptth, peta = det.pixel_angles() - tth_max = max(np.max(ptth), tth_max) - return tth_max - - -def pixel_resolution(instr): - """ - Return the minimum, median, and maximum angular - resolution of the instrument. - - Parameters - ---------- - instr : HEDMInstrument instance - An instrument. - - Returns - ------- - tth_stats : float - min/median/max tth resolution in radians. - eta_stats : TYPE - min/median/max eta resolution in radians. - - """ - max_tth = np.inf - max_eta = np.inf - min_tth = -np.inf - min_eta = -np.inf - ang_ps_full = [] - for panel in instr.detectors.values(): - angps = panel.angularPixelSize( - np.stack(panel.pixel_coords, axis=0) - .reshape(2, np.cumprod(panel.shape)[-1]) - .T - ) - ang_ps_full.append(angps) - max_tth = min(max_tth, np.min(angps[:, 0])) - max_eta = min(max_eta, np.min(angps[:, 1])) - min_tth = max(min_tth, np.max(angps[:, 0])) - min_eta = max(min_eta, np.max(angps[:, 1])) - med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten() - return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta) - - -def max_resolution(instr): - """ - Return the maximum angular resolution of the instrument. - - Parameters - ---------- - instr : HEDMInstrument instance - An instrument. - - Returns - ------- - max_tth : float - Maximum tth resolution in radians. - max_eta : TYPE - maximum eta resolution in radians. - - """ - max_tth = np.inf - max_eta = np.inf - for panel in instr.detectors.values(): - angps = panel.angularPixelSize( - np.stack(panel.pixel_coords, axis=0) - .reshape(2, np.cumprod(panel.shape)[-1]) - .T - ) - max_tth = min(max_tth, np.min(angps[:, 0])) - max_eta = min(max_eta, np.min(angps[:, 1])) - return max_tth, max_eta - - -def _gaussian_dist(x, cen, fwhm): - sigm = fwhm / (2 * np.sqrt(2 * np.log(2))) - return np.exp(-0.5 * (x - cen) ** 2 / sigm**2) - - -def _sigma_to_fwhm(sigm): - return sigm * ct.sigma_to_fwhm - - -def _fwhm_to_sigma(fwhm): - return fwhm / ct.sigma_to_fwhm - - -# ============================================================================= -# CLASSES -# ============================================================================= - - -class HEDMInstrument(object): - """ - Abstraction of XRD instrument. - - * Distortion needs to be moved to a class with registry; tuple unworkable - * where should reference eta be defined? currently set to default config - """ - - def __init__( - self, - instrument_config=None, - image_series=None, - eta_vector=None, - instrument_name=None, - tilt_calibration_mapping=None, - max_workers=max_workers_DFLT, - physics_package=None, - active_beam_name: Optional[str] = None, - ): - self._id = instrument_name_DFLT - - self._active_beam_name = active_beam_name - self._beam_dict = {} - - if eta_vector is None: - self._eta_vector = eta_vec_DFLT - else: - self._eta_vector = eta_vector - - self.max_workers = max_workers - - self.physics_package = physics_package - - if instrument_config is None: - # Default instrument - if instrument_name is not None: - self._id = instrument_name - self._num_panels = 1 - self._create_default_beam() - - # FIXME: must add cylindrical - self._detectors = dict( - panel_id_DFLT=PlanarDetector( - rows=nrows_DFLT, - cols=ncols_DFLT, - pixel_size=pixel_size_DFLT, - tvec=t_vec_d_DFLT, - tilt=tilt_params_DFLT, - bvec=self.beam_vector, - xrs_dist=self.source_distance, - evec=self._eta_vector, - distortion=None, - roi=None, - group=None, - max_workers=self.max_workers, - ), - ) - - self._tvec = t_vec_s_DFLT - self._chi = chi_DFLT - else: - if isinstance(instrument_config, h5py.File): - tmp = {} - unwrap_h5_to_dict(instrument_config, tmp) - instrument_config = tmp['instrument'] - elif not isinstance(instrument_config, dict): - raise RuntimeError( - "instrument_config must be either an HDF5 file object" - + "or a dictionary. You gave a %s" - % type(instrument_config) - ) - if instrument_name is None: - if 'id' in instrument_config: - self._id = instrument_config['id'] - else: - self._id = instrument_name - - self._num_panels = len(instrument_config['detectors']) - - if instrument_config.get('physics_package', None) is not None: - self.physics_package = instrument_config['physics_package'] - - xrs_config = instrument_config['beam'] - is_single_beam = 'energy' in xrs_config and 'vector' in xrs_config - if is_single_beam: - # Assume single beam. Load the same way as multibeam - self._create_default_beam() - xrs_config = {self.active_beam_name: xrs_config} - - # Multi beam load - for beam_name, beam in xrs_config.items(): - self._beam_dict[beam_name] = { - 'energy': beam['energy'], - 'vector': calc_beam_vec( - beam['vector']['azimuth'], - beam['vector']['polar_angle'], - ), - 'distance': beam.get('source_distance', np.inf), - } - - # Set the active beam name if not set already - if self._active_beam_name is None: - self._active_beam_name = next(iter(self._beam_dict)) - - # now build detector dict - detectors_config = instrument_config['detectors'] - det_dict = dict.fromkeys(detectors_config) - for det_id, det_info in detectors_config.items(): - det_group = det_info.get('group') # optional detector group - pixel_info = det_info['pixels'] - affine_info = det_info['transform'] - detector_type = det_info.get('detector_type', 'planar') - filter = det_info.get('filter', None) - coating = det_info.get('coating', None) - phosphor = det_info.get('phosphor', None) - try: - saturation_level = det_info['saturation_level'] - except KeyError: - saturation_level = 2**16 - shape = (pixel_info['rows'], pixel_info['columns']) - - panel_buffer = None - if buffer_key in det_info: - det_buffer = det_info[buffer_key] - if det_buffer is not None: - if isinstance(det_buffer, np.ndarray): - if det_buffer.ndim == 2: - if det_buffer.shape != shape: - msg = ( - f'Buffer shape for {det_id} ' - f'({det_buffer.shape}) does not match ' - f'detector shape ({shape})' - ) - raise BufferShapeMismatchError(msg) - else: - assert len(det_buffer) == 2 - panel_buffer = det_buffer - elif isinstance(det_buffer, list): - panel_buffer = np.asarray(det_buffer) - elif np.isscalar(det_buffer): - panel_buffer = det_buffer * np.ones(2) - else: - raise RuntimeError( - "panel buffer spec invalid for %s" % det_id - ) - - # optional roi - roi = pixel_info.get('roi') - - # handle distortion - distortion = None - if distortion_key in det_info: - distortion_cfg = det_info[distortion_key] - if distortion_cfg is not None: - try: - func_name = distortion_cfg['function_name'] - dparams = distortion_cfg['parameters'] - distortion = distortion_pkg.get_mapping( - func_name, dparams - ) - except KeyError: - raise RuntimeError( - "problem with distortion specification" - ) - if detector_type.lower() not in DETECTOR_TYPES: - msg = f'Unknown detector type: {detector_type}' - raise NotImplementedError(msg) - - DetectorClass = DETECTOR_TYPES[detector_type.lower()] - kwargs = dict( - name=det_id, - rows=pixel_info['rows'], - cols=pixel_info['columns'], - pixel_size=pixel_info['size'], - panel_buffer=panel_buffer, - saturation_level=saturation_level, - tvec=affine_info['translation'], - tilt=affine_info['tilt'], - bvec=self.beam_vector, - xrs_dist=self.source_distance, - evec=self._eta_vector, - distortion=distortion, - roi=roi, - group=det_group, - max_workers=self.max_workers, - detector_filter=filter, - detector_coating=coating, - phosphor=phosphor, - ) - - if DetectorClass is CylindricalDetector: - # Add cylindrical detector kwargs - kwargs['radius'] = det_info.get('radius', 49.51) - - det_dict[det_id] = DetectorClass(**kwargs) - - self._detectors = det_dict - - self._tvec = np.r_[ - instrument_config['oscillation_stage']['translation'] - ] - self._chi = instrument_config['oscillation_stage']['chi'] - - # grab angles from beam vec - # !!! these are in DEGREES! - azim, pola = calc_angles_from_beam_vec(self.beam_vector) - - self.update_memoization_sizes() - - @property - def mean_detector_center(self) -> np.ndarray: - """Return the mean center for all detectors""" - centers = np.array([panel.tvec for panel in self.detectors.values()]) - return centers.sum(axis=0) / len(centers) - - def mean_group_center(self, group: str) -> np.ndarray: - """Return the mean center for detectors belonging to a group""" - centers = np.array( - [x.tvec for x in self.detectors_in_group(group).values()] - ) - return centers.sum(axis=0) / len(centers) - - @property - def detector_groups(self) -> list[str]: - groups = [] - for panel in self.detectors.values(): - group = panel.group - if group is not None and group not in groups: - groups.append(group) - - return groups - - def detectors_in_group(self, group: str) -> dict[str, Detector]: - return {k: v for k, v in self.detectors.items() if v.group == group} - - # properties for physical size of rectangular detector - @property - def id(self): - return self._id - - @property - def num_panels(self): - return self._num_panels - - @property - def detectors(self): - return self._detectors - - @property - def detector_parameters(self): - pdict = {} - for key, panel in self.detectors.items(): - pdict[key] = panel.config_dict( - self.chi, - self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - return pdict - - @property - def tvec(self): - return self._tvec - - @tvec.setter - def tvec(self, x): - x = np.array(x).flatten() - assert len(x) == 3, 'input must have length = 3' - self._tvec = x - - @property - def chi(self): - return self._chi - - @chi.setter - def chi(self, x): - self._chi = float(x) - - @property - def beam_energy(self) -> float: - return self.active_beam['energy'] - - @beam_energy.setter - def beam_energy(self, x: float): - self.active_beam['energy'] = float(x) - self.beam_dict_modified() - - @property - def beam_wavelength(self): - return ct.keVToAngstrom(self.beam_energy) - - @property - def has_multi_beam(self) -> bool: - return len(self.beam_dict) > 1 - - @property - def beam_dict(self) -> dict: - return self._beam_dict - - def _create_default_beam(self): - name = 'XRS1' - self._beam_dict[name] = { - 'energy': beam_energy_DFLT, - 'vector': beam_vec_DFLT.copy(), - 'distance': np.inf, - } - - if self._active_beam_name is None: - self._active_beam_name = name - - @property - def beam_names(self) -> list[str]: - return list(self.beam_dict) - - def xrs_beam_energy(self, beam_name: Optional[str]) -> float: - if beam_name is None: - beam_name = self.active_beam_name - - return self.beam_dict[beam_name]['energy'] - - @property - def active_beam_name(self) -> str: - return self._active_beam_name - - @active_beam_name.setter - def active_beam_name(self, name: str): - if self._active_beam_name not in self.beam_dict: - raise RuntimeError( - f'"{name}" is not present in "{self.beam_names}"' - ) - - self._active_beam_name = name - - # Update anything beam related where we need to - self._update_panel_beams() - - def beam_dict_modified(self): - # A function to call to indicate that the beam dict was modified. - # Update anything beam related where we need to - self._update_panel_beams() - - @property - def active_beam(self) -> dict: - return self.beam_dict[self.active_beam_name] - - def _update_panel_beams(self): - # FIXME: maybe we shouldn't store these on the panels? - # Might be hard to fix, though... - for panel in self.detectors.values(): - panel.bvec = self.beam_vector - panel.xrs_dist = self.source_distance - - @property - def beam_vector(self) -> np.ndarray: - return self.active_beam['vector'] - - @beam_vector.setter - def beam_vector(self, x: np.ndarray): - x = np.array(x).flatten() - if len(x) == 3: - assert ( - sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - bvec = x - elif len(x) == 2: - bvec = calc_beam_vec(*x) - else: - raise RuntimeError("input must be a unit vector or angle pair") - - # Modify the beam vector for the active beam dict - self.active_beam['vector'] = bvec - self.beam_dict_modified() - - @property - def source_distance(self): - return self.active_beam['distance'] - - @source_distance.setter - def source_distance(self, x): - assert np.isscalar( - x - ), f"'source_distance' must be a scalar; you input '{x}'" - self.active_beam['distance'] = x - self.beam_dict_modified() - - @property - def eta_vector(self): - return self._eta_vector - - @eta_vector.setter - def eta_vector(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._eta_vector = x - # ...maybe change dictionary item behavior for 3.x compatibility? - for detector_id in self.detectors: - panel = self.detectors[detector_id] - panel.evec = self._eta_vector - - # ========================================================================= - # METHODS - # ========================================================================= - - def write_config(self, file=None, style='yaml', calibration_dict={}): - """WRITE OUT YAML FILE""" - # initialize output dictionary - assert style.lower() in ['yaml', 'hdf5'], ( - "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style - ) - - par_dict = {} - - par_dict['id'] = self.id - - # Multi beam writer - beam_dict = {} - for beam_name, beam in self.beam_dict.items(): - azim, polar = calc_angles_from_beam_vec(beam['vector']) - beam_dict[beam_name] = { - 'energy': beam['energy'], - 'vector': { - 'azimuth': azim, - 'polar_angle': polar, - }, - } - if beam['distance'] != np.inf: - beam_dict[beam_name]['source_distance'] = beam['distance'] - - if len(beam_dict) == 1: - # Just write it out a single beam (classical way) - beam_dict = next(iter(beam_dict.values())) - - par_dict['beam'] = beam_dict - - if calibration_dict: - par_dict['calibration_crystal'] = calibration_dict - - ostage = dict(chi=self.chi, translation=self.tvec.tolist()) - par_dict['oscillation_stage'] = ostage - - det_dict = dict.fromkeys(self.detectors) - for det_name, detector in self.detectors.items(): - # grab panel config - # !!! don't need beam or tvec - # !!! have vetted style - pdict = detector.config_dict( - chi=self.chi, - tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style=style, - ) - det_dict[det_name] = pdict['detector'] - par_dict['detectors'] = det_dict - - # handle output file if requested - if file is not None: - if style.lower() == 'yaml': - with open(file, 'w') as f: - yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) - else: - - def _write_group(file): - instr_grp = file.create_group('instrument') - unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) - - # hdf5 - if isinstance(file, str): - with h5py.File(file, 'w') as f: - _write_group(f) - elif isinstance(file, h5py.File): - _write_group(file) - else: - raise TypeError("Unexpected file type.") - - return par_dict - - def extract_polar_maps( - self, - plane_data, - imgser_dict, - active_hkls=None, - threshold=None, - tth_tol=None, - eta_tol=0.25, - ): - """ - Extract eta-omega maps from an imageseries. - - Quick and dirty way to histogram angular patch data for make - pole figures suitable for fiber generation - - TODO: streamline projection code - TODO: normalization - !!!: images must be non-negative! - !!!: plane_data is NOT a copy! - """ - if tth_tol is not None: - plane_data.tThWidth = np.radians(tth_tol) - else: - tth_tol = np.degrees(plane_data.tThWidth) - - # make rings clipped to panel - # !!! eta_idx has the same length as plane_data.exclusions - # each entry are the integer indices into the bins - # !!! eta_edges is the list of eta bin EDGES; same for all - # detectors, so calculate it once - # !!! grab first panel - panel = next(iter(self.detectors.values())) - pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = ( - panel.make_powder_rings( - plane_data, - merge_hkls=False, - delta_eta=eta_tol, - full_output=True, - ) - ) - - if active_hkls is not None: - assert hasattr( - active_hkls, '__len__' - ), "active_hkls must be an iterable with __len__" - - # need to re-cast for element-wise operations - active_hkls = np.array(active_hkls) - - # these are all active reflection unique hklIDs - active_hklIDs = plane_data.getHKLID(plane_data.hkls, master=True) - - # find indices - idx = np.zeros_like(active_hkls, dtype=int) - for i, input_hklID in enumerate(active_hkls): - try: - idx[i] = np.where(active_hklIDs == input_hklID)[0] - except ValueError: - raise RuntimeError(f"hklID '{input_hklID}' is invalid") - tth_ranges = tth_ranges[idx] - - delta_eta = eta_edges[1] - eta_edges[0] - ncols_eta = len(eta_edges) - 1 - - ring_maps_panel = dict.fromkeys(self.detectors) - for i_d, det_key in enumerate(self.detectors): - print("working on detector '%s'..." % det_key) - - # grab panel - panel = self.detectors[det_key] - # native_area = panel.pixel_area # pixel ref area - - # pixel angular coords for the detector panel - ptth, peta = panel.pixel_angles() - - # grab imageseries for this detector - ims = _parse_imgser_dict(imgser_dict, det_key, roi=panel.roi) - - # grab omegas from imageseries and squawk if missing - try: - omegas = ims.metadata['omega'] - except KeyError: - raise RuntimeError( - f"imageseries for '{det_key}' has no omega info" - ) - - # initialize maps and assing by row (omega/frame) - nrows_ome = len(omegas) - - # init map with NaNs - shape = (len(tth_ranges), nrows_ome, ncols_eta) - ring_maps = np.full(shape, np.nan) - - # Generate ring parameters once, and re-use them for each image - ring_params = [] - for tthr in tth_ranges: - kwargs = { - 'tthr': tthr, - 'ptth': ptth, - 'peta': peta, - 'eta_edges': eta_edges, - 'delta_eta': delta_eta, - } - ring_params.append(_generate_ring_params(**kwargs)) - - # Divide up the images among processes - tasks = distribute_tasks(len(ims), self.max_workers) - func = partial( - _run_histograms, - ims=ims, - tth_ranges=tth_ranges, - ring_maps=ring_maps, - ring_params=ring_params, - threshold=threshold, - ) - - max_workers = self.max_workers - if max_workers == 1 or len(tasks) == 1: - # Just execute it serially. - for task in tasks: - func(task) - else: - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Evaluate the results via `list()`, so that if an - # exception is raised in a thread, it will be re-raised - # and visible to the user. - list(executor.map(func, tasks)) - - ring_maps_panel[det_key] = ring_maps - - return ring_maps_panel, eta_edges - - def extract_line_positions( - self, - plane_data, - imgser_dict, - tth_tol=None, - eta_tol=1.0, - npdiv=2, - eta_centers=None, - collapse_eta=True, - collapse_tth=False, - do_interpolation=True, - do_fitting=False, - tth_distortion=None, - fitting_kwargs=None, - ): - """ - Perform annular interpolation on diffraction images. - - Provides data for extracting the line positions from powder diffraction - images, pole figure patches from imageseries, or Bragg peaks from - Laue diffraction images. - - Parameters - ---------- - plane_data : hexrd.crystallography.PlaneData object or array_like - Object determining the 2theta positions for the integration - sectors. If PlaneData, this will be all non-excluded reflections, - subject to merging within PlaneData.tThWidth. If array_like, - interpreted as a list of 2theta angles IN DEGREES. - imgser_dict : dict - Dictionary of powder diffraction images, one for each detector. - tth_tol : scalar, optional - The radial (i.e. 2theta) width of the integration sectors - IN DEGREES. This arg is required if plane_data is array_like. - The default is None. - eta_tol : scalar, optional - The azimuthal (i.e. eta) width of the integration sectors - IN DEGREES. The default is 1. - npdiv : int, optional - The number of oversampling pixel subdivision (see notes). - The default is 2. - eta_centers : array_like, optional - The desired azimuthal sector centers. The default is None. If - None, then bins are distrubted sequentially from (-180, 180). - collapse_eta : bool, optional - Flag for summing sectors in eta. The default is True. - collapse_tth : bool, optional - Flag for summing sectors in 2theta. The default is False. - do_interpolation : bool, optional - If True, perform bilinear interpolation. The default is True. - do_fitting : bool, optional - If True, then perform spectrum fitting, and append the results - to the returned data. collapse_eta must also be True for this - to have any effect. The default is False. - tth_distortion : special class, optional - for special case of pinhole camera distortions. See - hexrd.hedm.xrdutil.phutil.SampleLayerDistortion (only type supported) - fitting_kwargs : dict, optional - kwargs passed to hexrd.core.fitting.utils.fit_ring if do_fitting is True - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - panel_data : dict - Dictionary over the detctors with the following structure: - [list over (merged) 2theta ranges] - [list over valid eta sectors] - [angle data , - bin intensities , - fitting results ] - - Notes - ----- - TODO: May change the array_like input units to degrees. - TODO: rename function. - - """ - - if fitting_kwargs is None: - fitting_kwargs = {} - - # ===================================================================== - # LOOP OVER DETECTORS - # ===================================================================== - logger.info("Interpolating ring data") - pbar_dets = partial( - tqdm, - total=self.num_panels, - desc="Detector", - position=self.num_panels, - ) - - # Split up the workers among the detectors - max_workers_per_detector = max(1, self.max_workers // self.num_panels) - - kwargs = { - 'plane_data': plane_data, - 'tth_tol': tth_tol, - 'eta_tol': eta_tol, - 'eta_centers': eta_centers, - 'npdiv': npdiv, - 'collapse_tth': collapse_tth, - 'collapse_eta': collapse_eta, - 'do_interpolation': do_interpolation, - 'do_fitting': do_fitting, - 'fitting_kwargs': fitting_kwargs, - 'tth_distortion': tth_distortion, - 'max_workers': max_workers_per_detector, - } - func = partial(_extract_detector_line_positions, **kwargs) - - def make_instr_cfg(panel): - return panel.config_dict( - chi=self.chi, - tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - - images = [] - for detector_id, panel in self.detectors.items(): - images.append( - _parse_imgser_dict(imgser_dict, detector_id, roi=panel.roi) - ) - - panels = [self.detectors[k] for k in self.detectors] - instr_cfgs = [make_instr_cfg(x) for x in panels] - pbp_array = np.arange(self.num_panels) - iter_args = zip(panels, instr_cfgs, images, pbp_array) - with ProcessPoolExecutor( - mp_context=constants.mp_context, max_workers=self.num_panels - ) as executor: - results = list(pbar_dets(executor.map(func, iter_args))) - - panel_data = {} - for det, res in zip(self.detectors, results): - panel_data[det] = res - - return panel_data - - def simulate_powder_pattern( - self, mat_list, params=None, bkgmethod=None, origin=None, noise=None - ): - """ - Generate powder diffraction iamges from specified materials. - - Parameters - ---------- - mat_list : array_like (n, ) - List of Material classes. - params : dict, optional - Dictionary of LeBail parameters (see Notes). The default is None. - bkgmethod : dict, optional - Background function specification. The default is None. - origin : array_like (3,), optional - Vector describing the origin of the diffrction volume. - The default is None, wiich is equivalent to [0, 0, 0]. - noise : str, optional - Flag describing type of noise to be applied. The default is None. - - Returns - ------- - img_dict : dict - Dictionary of diffraciton images over the detectors. - - Notes - ----- - TODO: add more controls for noise function. - TODO: modify hooks to LeBail parameters. - TODO: add optional volume fraction weights for phases in mat_list - """ - """ - >> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab, - saransh1@llnl.gov - >> @DATE: 01/22/2021 SS 1.0 original - >> @DETAILS: adding hook to WPPF class. this changes the input list - significantly - """ - if origin is None: - origin = self.tvec - origin = np.asarray(origin).squeeze() - assert len(origin) == 3, "origin must be a 3-element sequence" - - if bkgmethod is None: - bkgmethod = {'chebyshev': 3} - - ''' - if params is none, fill in some sane default values - only the first value is used. the rest of the values are - the upper, lower bounds and vary flag for refinement which - are not used but required for interfacing with WPPF - - zero_error : zero shift error - U, V, W : Cagliotti parameters - P, X, Y : Lorentzian parameters - eta1, eta2, eta3 : Mixing parameters - ''' - if params is None: - # params = {'zero_error': [0.0, -1., 1., True], - # 'U': [2e-1, -1., 1., True], - # 'V': [2e-2, -1., 1., True], - # 'W': [2e-2, -1., 1., True], - # 'X': [2e-1, -1., 1., True], - # 'Y': [2e-1, -1., 1., True] - # } - params = wppfsupport._generate_default_parameters_LeBail( - mat_list, - 1, - bkgmethod, - ) - ''' - use the material list to obtain the dictionary of initial intensities - we need to make sure that the intensities are properly scaled by the - lorentz polarization factor. since the calculation is done in the - LeBail class, all that means is the initial intensity needs that factor - in there - ''' - img_dict = dict.fromkeys(self.detectors) - - # find min and max tth over all panels - tth_mi = np.inf - tth_ma = 0.0 - ptth_dict = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - ptth, peta = panel.pixel_angles(origin=origin) - tth_mi = min(tth_mi, ptth.min()) - tth_ma = max(tth_ma, ptth.max()) - ptth_dict[det_key] = ptth - - ''' - now make a list of two theta and dummy ones for the experimental - spectrum this is never really used so any values should be okay. We - could also pas the integrated detector image if we would like to - simulate some realistic background. But thats for another day. - ''' - # convert angles to degrees because thats what the WPPF expects - tth_mi = np.degrees(tth_mi) - tth_ma = np.degrees(tth_ma) - - # get tth angular resolution for instrument - ang_res = max_resolution(self) - - # !!! calc nsteps by oversampling - nsteps = int(np.ceil(2 * (tth_ma - tth_mi) / np.degrees(ang_res[0]))) - - # evaulation vector for LeBail - tth = np.linspace(tth_mi, tth_ma, nsteps) - - expt = np.vstack([tth, np.ones_like(tth)]).T - - wavelength = [ - valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), - 1.0, - ] - - ''' - now go through the material list and get the intensity dictionary - ''' - intensity = {} - for mat in mat_list: - - multiplicity = mat.planeData.getMultiplicity() - - tth = mat.planeData.getTTh() - - LP = ( - (1 + np.cos(tth) ** 2) - / np.cos(0.5 * tth) - / np.sin(0.5 * tth) ** 2 - ) - - intensity[mat.name] = {} - intensity[mat.name]['synchrotron'] = ( - mat.planeData.structFact * LP * multiplicity - ) - - kwargs = { - 'expt_spectrum': expt, - 'params': params, - 'phases': mat_list, - 'wavelength': {'synchrotron': wavelength}, - 'bkgmethod': bkgmethod, - 'intensity_init': intensity, - 'peakshape': 'pvtch', - } - - self.WPPFclass = LeBail(**kwargs) - - self.simulated_spectrum = self.WPPFclass.spectrum_sim - self.background = self.WPPFclass.background - - ''' - now that we have the simulated intensities, its time to get the - two theta for the detector pixels and interpolate what the intensity - for each pixel should be - ''' - - img_dict = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - ptth = ptth_dict[det_key] - - img = np.interp( - np.degrees(ptth), - self.simulated_spectrum.x, - self.simulated_spectrum.y + self.background.y, - ) - - if noise is None: - img_dict[det_key] = img - - else: - # Rescale to be between 0 and 1 so random_noise() will work - prev_max = img.max() - img /= prev_max - - if noise.lower() == 'poisson': - im_noise = random_noise(img, mode='poisson', clip=True) - mi = im_noise.min() - ma = im_noise.max() - if ma > mi: - im_noise = (im_noise - mi) / (ma - mi) - - elif noise.lower() == 'gaussian': - im_noise = random_noise(img, mode='gaussian', clip=True) - - elif noise.lower() == 'salt': - im_noise = random_noise(img, mode='salt') - - elif noise.lower() == 'pepper': - im_noise = random_noise(img, mode='pepper') - - elif noise.lower() == 's&p': - im_noise = random_noise(img, mode='s&p') - - elif noise.lower() == 'speckle': - im_noise = random_noise(img, mode='speckle', clip=True) - - # Now scale back up - img_dict[det_key] = im_noise * prev_max - - return img_dict - - def simulate_laue_pattern( - self, - crystal_data, - minEnergy=5.0, - maxEnergy=35.0, - rmat_s=None, - grain_params=None, - ): - """ - Simulate Laue diffraction over the instrument. - - Parameters - ---------- - crystal_data : TYPE - DESCRIPTION. - minEnergy : TYPE, optional - DESCRIPTION. The default is 5.. - maxEnergy : TYPE, optional - DESCRIPTION. The default is 35.. - rmat_s : TYPE, optional - DESCRIPTION. The default is None. - grain_params : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - results : TYPE - DESCRIPTION. - - xy_det, hkls_in, angles, dspacing, energy - - TODO: revisit output; dict, or concatenated list? - """ - results = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - results[det_key] = panel.simulate_laue_pattern( - crystal_data, - minEnergy=minEnergy, - maxEnergy=maxEnergy, - rmat_s=rmat_s, - tvec_s=self.tvec, - grain_params=grain_params, - beam_vec=self.beam_vector, - ) - return results - - def simulate_rotation_series( - self, - plane_data, - grain_param_list, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_ranges=[ - (-np.pi, np.pi), - ], - ome_period=(-np.pi, np.pi), - wavelength=None, - ): - """ - Simulate a monochromatic rotation series over the instrument. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_param_list : TYPE - DESCRIPTION. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - wavelength : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - results : TYPE - DESCRIPTION. - - TODO: revisit output; dict, or concatenated list? - """ - results = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - results[det_key] = panel.simulate_rotation_series( - plane_data, - grain_param_list, - eta_ranges=eta_ranges, - ome_ranges=ome_ranges, - ome_period=ome_period, - chi=self.chi, - tVec_s=self.tvec, - wavelength=wavelength, - ) - return results - - def pull_spots( - self, - plane_data, - grain_params, - imgser_dict, - tth_tol=0.25, - eta_tol=1.0, - ome_tol=1.0, - npdiv=2, - threshold=10, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_period=None, - dirname='results', - filename=None, - output_format='text', - return_spot_list=False, - quiet=True, - check_only=False, - interp='nearest', - ): - """ - Exctract reflection info from a rotation series. - - Input must be encoded as an OmegaImageseries object. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_params : TYPE - DESCRIPTION. - imgser_dict : TYPE - DESCRIPTION. - tth_tol : TYPE, optional - DESCRIPTION. The default is 0.25. - eta_tol : TYPE, optional - DESCRIPTION. The default is 1.. - ome_tol : TYPE, optional - DESCRIPTION. The default is 1.. - npdiv : TYPE, optional - DESCRIPTION. The default is 2. - threshold : TYPE, optional - DESCRIPTION. The default is 10. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - dirname : TYPE, optional - DESCRIPTION. The default is 'results'. - filename : TYPE, optional - DESCRIPTION. The default is None. - output_format : TYPE, optional - DESCRIPTION. The default is 'text'. - return_spot_list : TYPE, optional - DESCRIPTION. The default is False. - quiet : TYPE, optional - DESCRIPTION. The default is True. - check_only : TYPE, optional - DESCRIPTION. The default is False. - interp : TYPE, optional - DESCRIPTION. The default is 'nearest'. - - Returns - ------- - compl : TYPE - DESCRIPTION. - output : TYPE - DESCRIPTION. - - """ - # grain parameters - rMat_c = make_rmat_of_expmap(grain_params[:3]) - tVec_c = grain_params[3:6] - - # grab omega ranges from first imageseries - # - # WARNING: all imageseries AND all wedges within are assumed to have - # the same omega values; put in a check that they are all the same??? - oims0 = next(iter(imgser_dict.values())) - ome_ranges = [ - np.radians([i['ostart'], i['ostop']]) - for i in oims0.omegawedges.wedges - ] - if ome_period is None: - ims = next(iter(imgser_dict.values())) - ostart = ims.omega[0, 0] - ome_period = np.radians(ostart + np.r_[0.0, 360.0]) - - # delta omega in DEGREES grabbed from first imageseries in the dict - delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] - - # make omega grid for frame expansion around reference frame - # in DEGREES - ndiv_ome, ome_del = make_tolerance_grid( - delta_ome, - ome_tol, - 1, - adjust_window=True, - ) - - # generate structuring element for connected component labeling - if ndiv_ome == 1: - label_struct = ndimage.generate_binary_structure(2, 2) - else: - label_struct = ndimage.generate_binary_structure(3, 3) - - # simulate rotation series - sim_results = self.simulate_rotation_series( - plane_data, - [ - grain_params, - ], - eta_ranges=eta_ranges, - ome_ranges=ome_ranges, - ome_period=ome_period, - ) - - # patch vertex generator (global for instrument) - tol_vec = 0.5 * np.radians( - [ - -tth_tol, - -eta_tol, - -tth_tol, - eta_tol, - tth_tol, - eta_tol, - tth_tol, - -eta_tol, - ] - ) - - # prepare output if requested - if filename is not None and output_format.lower() == 'hdf5': - this_filename = os.path.join(dirname, filename) - writer = GrainDataWriter_h5( - os.path.join(dirname, filename), - self.write_config(), - grain_params, - ) - - # ===================================================================== - # LOOP OVER PANELS - # ===================================================================== - iRefl = 0 - next_invalid_peak_id = -100 - compl = [] - output = dict.fromkeys(self.detectors) - for detector_id, panel in self.detectors.items(): - # initialize text-based output writer - if filename is not None and output_format.lower() == 'text': - output_dir = os.path.join(dirname, detector_id) - os.makedirs(output_dir, exist_ok=True) - this_filename = os.path.join(output_dir, filename) - writer = PatchDataWriter(this_filename) - - # grab panel - instr_cfg = panel.config_dict( - self.chi, - self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - native_area = panel.pixel_area # pixel ref area - - # pull out the OmegaImageSeries for this panel from input dict - ome_imgser = _parse_imgser_dict( - imgser_dict, detector_id, roi=panel.roi - ) - - # extract simulation results - sim_results_p = sim_results[detector_id] - hkl_ids = sim_results_p[0][0] - hkls_p = sim_results_p[1][0] - ang_centers = sim_results_p[2][0] - xy_centers = sim_results_p[3][0] - ang_pixel_size = sim_results_p[4][0] - - # now verify that full patch falls on detector... - # ???: strictly necessary? - # - # patch vertex array from sim - nangs = len(ang_centers) - patch_vertices = ( - np.tile(ang_centers[:, :2], (1, 4)) - + np.tile(tol_vec, (nangs, 1)) - ).reshape(4 * nangs, 2) - ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape( - len(patch_vertices), 1 - ) - - # find vertices that all fall on the panel - det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( - np.hstack([patch_vertices, ome_dupl]), - panel.rmat, - rMat_c, - self.chi, - panel.tvec, - tVec_c, - self.tvec, - panel.distortion, - ) - _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) - - # all vertices must be on... - patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) - patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] - - # re-filter... - hkl_ids = hkl_ids[patch_is_on] - hkls_p = hkls_p[patch_is_on, :] - ang_centers = ang_centers[patch_is_on, :] - xy_centers = xy_centers[patch_is_on, :] - ang_pixel_size = ang_pixel_size[patch_is_on, :] - - # TODO: add polygon testing right here! - # done - if check_only: - patch_output = [] - for i_pt, angs in enumerate(ang_centers): - # the evaluation omegas; - # expand about the central value using tol vector - ome_eval = np.degrees(angs[2]) + ome_del - - # ...vectorize the omega_to_frame function to avoid loop? - frame_indices = [ - ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval - ] - if -1 in frame_indices: - if not quiet: - msg = """ - window for (%d %d %d) falls outside omega range - """ % tuple( - hkls_p[i_pt, :] - ) - print(msg) - continue - else: - these_vertices = patch_xys[i_pt] - ijs = panel.cartToPixel(these_vertices) - ii, jj = polygon(ijs[:, 0], ijs[:, 1]) - contains_signal = False - for i_frame in frame_indices: - contains_signal = contains_signal or np.any( - ome_imgser[i_frame][ii, jj] > threshold - ) - compl.append(contains_signal) - patch_output.append((ii, jj, frame_indices)) - else: - # make the tth,eta patches for interpolation - patches = xrdutil.make_reflection_patches( - instr_cfg, - ang_centers[:, :2], - ang_pixel_size, - omega=ang_centers[:, 2], - tth_tol=tth_tol, - eta_tol=eta_tol, - rmat_c=rMat_c, - tvec_c=tVec_c, - npdiv=npdiv, - quiet=True, - ) - - # GRAND LOOP over reflections for this panel - patch_output = [] - for i_pt, patch in enumerate(patches): - - # strip relevant objects out of current patch - vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch - - prows, pcols = areas.shape - nrm_fac = areas / float(native_area) - nrm_fac = nrm_fac / np.min(nrm_fac) - - # grab hkl info - hkl = hkls_p[i_pt, :] - hkl_id = hkl_ids[i_pt] - - # edge arrays - tth_edges = vtx_angs[0][0, :] - delta_tth = tth_edges[1] - tth_edges[0] - eta_edges = vtx_angs[1][:, 0] - delta_eta = eta_edges[1] - eta_edges[0] - - # need to reshape eval pts for interpolation - xy_eval = np.vstack( - [xy_eval[0].flatten(), xy_eval[1].flatten()] - ).T - - # the evaluation omegas; - # expand about the central value using tol vector - ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del - - # ???: vectorize the omega_to_frame function to avoid loop? - frame_indices = [ - ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval - ] - - if -1 in frame_indices: - if not quiet: - msg = """ - window for (%d%d%d) falls outside omega range - """ % tuple( - hkl - ) - print(msg) - continue - else: - # initialize spot data parameters - # !!! maybe change these to nan to not fuck up writer - peak_id = next_invalid_peak_id - sum_int = np.nan - max_int = np.nan - meas_angs = np.nan * np.ones(3) - meas_xy = np.nan * np.ones(2) - - # quick check for intensity - contains_signal = False - patch_data_raw = [] - for i_frame in frame_indices: - tmp = ome_imgser[i_frame][ijs[0], ijs[1]] - contains_signal = contains_signal or np.any( - tmp > threshold - ) - patch_data_raw.append(tmp) - patch_data_raw = np.stack(patch_data_raw, axis=0) - compl.append(contains_signal) - - if contains_signal: - # initialize patch data array for intensities - if interp.lower() == 'bilinear': - patch_data = np.zeros( - (len(frame_indices), prows, pcols) - ) - for i, i_frame in enumerate(frame_indices): - patch_data[i] = panel.interpolate_bilinear( - xy_eval, - ome_imgser[i_frame], - pad_with_nans=False, - ).reshape( - prows, pcols - ) # * nrm_fac - elif interp.lower() == 'nearest': - patch_data = patch_data_raw # * nrm_fac - else: - msg = ( - "interpolation option " - + "'%s' not understood" - ) - raise RuntimeError(msg % interp) - - # now have interpolated patch data... - labels, num_peaks = ndimage.label( - patch_data > threshold, structure=label_struct - ) - slabels = np.arange(1, num_peaks + 1) - - if num_peaks > 0: - peak_id = iRefl - props = regionprops(labels, patch_data) - coms = np.vstack( - [x.weighted_centroid for x in props] - ) - if num_peaks > 1: - center = np.r_[patch_data.shape] * 0.5 - center_t = np.tile(center, (num_peaks, 1)) - com_diff = coms - center_t - closest_peak_idx = np.argmin( - np.sum(com_diff**2, axis=1) - ) - else: - closest_peak_idx = 0 - coms = coms[closest_peak_idx] - # meas_omes = \ - # ome_edges[0] + (0.5 + coms[0])*delta_ome - meas_omes = ome_eval[0] + coms[0] * delta_ome - meas_angs = np.hstack( - [ - tth_edges[0] - + (0.5 + coms[2]) * delta_tth, - eta_edges[0] - + (0.5 + coms[1]) * delta_eta, - mapAngle( - np.radians(meas_omes), ome_period - ), - ] - ) - - # intensities - # - summed is 'integrated' over interpolated - # data - # - max is max of raw input data - sum_int = np.sum( - patch_data[ - labels == slabels[closest_peak_idx] - ] - ) - max_int = np.max( - patch_data_raw[ - labels == slabels[closest_peak_idx] - ] - ) - # ???: Should this only use labeled pixels? - # Those are segmented from interpolated data, - # not raw; likely ok in most cases. - - # need MEASURED xy coords - # FIXME: overload angles_to_cart? - gvec_c = angles_to_gvec( - meas_angs, - chi=self.chi, - rmat_c=rMat_c, - beam_vec=self.beam_vector, - ) - rMat_s = make_sample_rmat( - self.chi, meas_angs[2] - ) - meas_xy = gvec_to_xy( - gvec_c, - panel.rmat, - rMat_s, - rMat_c, - panel.tvec, - self.tvec, - tVec_c, - beam_vec=self.beam_vector, - ) - if panel.distortion is not None: - meas_xy = panel.distortion.apply_inverse( - np.atleast_2d(meas_xy) - ).flatten() - # FIXME: why is this suddenly necessary??? - meas_xy = meas_xy.squeeze() - else: - patch_data = patch_data_raw - - if peak_id < 0: - # The peak is invalid. - # Decrement the next invalid peak ID. - next_invalid_peak_id -= 1 - - # write output - if filename is not None: - if output_format.lower() == 'text': - writer.dump_patch( - peak_id, - hkl_id, - hkl, - sum_int, - max_int, - ang_centers[i_pt], - meas_angs, - xy_centers[i_pt], - meas_xy, - ) - elif output_format.lower() == 'hdf5': - xyc_arr = xy_eval.reshape( - prows, pcols, 2 - ).transpose(2, 0, 1) - writer.dump_patch( - detector_id, - iRefl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - np.radians(ome_eval), - xyc_arr, - ijs, - frame_indices, - patch_data, - ang_centers[i_pt], - xy_centers[i_pt], - meas_angs, - meas_xy, - ) - - if return_spot_list: - # Full output - xyc_arr = xy_eval.reshape( - prows, pcols, 2 - ).transpose(2, 0, 1) - _patch_output = [ - detector_id, - iRefl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - np.radians(ome_eval), - xyc_arr, - ijs, - frame_indices, - patch_data, - ang_centers[i_pt], - xy_centers[i_pt], - meas_angs, - meas_xy, - ] - else: - # Trimmed output - _patch_output = [ - peak_id, - hkl_id, - hkl, - sum_int, - max_int, - ang_centers[i_pt], - meas_angs, - meas_xy, - ] - patch_output.append(_patch_output) - iRefl += 1 - output[detector_id] = patch_output - if filename is not None and output_format.lower() == 'text': - writer.close() - if filename is not None and output_format.lower() == 'hdf5': - writer.close() - return compl, output - - def update_memoization_sizes(self): - # Resize all known memoization functions to have a cache at least - # the size of the number of detectors. - all_panels = list(self.detectors.values()) - PlanarDetector.update_memoization_sizes(all_panels) - CylindricalDetector.update_memoization_sizes(all_panels) - - def calc_transmission( - self, rMat_s: np.ndarray = None - ) -> dict[str, np.ndarray]: - """calculate the transmission from the - filter and polymer coating. the inverse of this - number is the intensity correction that needs - to be applied. actual computation is done inside - the detector class - """ - if rMat_s is None: - rMat_s = ct.identity_3x3 - - energy = self.beam_energy - transmissions = {} - for det_name, det in self.detectors.items(): - transmission_filter, transmission_phosphor = ( - det.calc_filter_coating_transmission(energy) - ) - - transmission = transmission_filter * transmission_phosphor - - if self.physics_package is not None: - transmission_physics_package = ( - det.calc_physics_package_transmission( - energy, rMat_s, self.physics_package - ) - ) - effective_pinhole_area = det.calc_effective_pinhole_area( - self.physics_package - ) - - transmission = ( - transmission - * transmission_physics_package - * effective_pinhole_area - ) - - transmissions[det_name] = transmission - return transmissions - - -# ============================================================================= -# UTILITIES -# ============================================================================= - - -class PatchDataWriter(object): - """Class for dumping Bragg reflection data.""" - - def __init__(self, filename): - self._delim = ' ' - # fmt: off - header_items = ( - '# ID', 'PID', - 'H', 'K', 'L', - 'sum(int)', 'max(int)', - 'pred tth', 'pred eta', 'pred ome', - 'meas tth', 'meas eta', 'meas ome', - 'pred X', 'pred Y', - 'meas X', 'meas Y' - ) - self._header = self._delim.join([ - self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]), - self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), - self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) - ]) - - # fmt: on - if isinstance(filename, IOBase): - self.fid = filename - else: - self.fid = open(filename, 'w') - print(self._header, file=self.fid) - - def __del__(self): - self.close() - - def close(self): - self.fid.close() - - def dump_patch( - self, peak_id, hkl_id, hkl, spot_int, max_int, pangs, mangs, pxy, mxy - ): - """ - !!! maybe need to check that last four inputs are arrays - """ - if mangs is None: - spot_int = np.nan - max_int = np.nan - mangs = np.nan * np.ones(3) - mxy = np.nan * np.ones(2) - - res = ( - [int(peak_id), int(hkl_id)] - + np.array(hkl, dtype=int).tolist() - + [spot_int, max_int] - + pangs.tolist() - + mangs.tolist() - + pxy.tolist() - + mxy.tolist() - ) - - output_str = self._delim.join( - [ - self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), - self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), - self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:]), - ] - ) - print(output_str, file=self.fid) - return output_str - - -class GrainDataWriter(object): - """Class for dumping grain data.""" - - def __init__(self, filename=None, array=None): - """Writes to either file or np array - - Array must be initialized with number of rows to be written. - """ - if filename is None and array is None: - raise RuntimeError( - 'GrainDataWriter must be specified with filename or array' - ) - - self.array = None - self.fid = None - - # array supersedes filename - if array is not None: - assert ( - array.shape[1] == 21 - ), f'grain data table must have 21 columns not {array.shape[21]}' - self.array = array - self._array_row = 0 - return - - self._delim = ' ' - # fmt: off - header_items = ( - '# grain ID', 'completeness', 'chi^2', - 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', - 't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]', - 'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]', - 'inv(V_s)[1,2]*sqrt(2)', - 'inv(V_s)[0,2]*sqrt(2)', - 'inv(V_s)[0,1]*sqrt(2)', - 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]', - 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]' - ) - self._header = self._delim.join( - [self._delim.join( - np.tile('{:<12}', 3) - ).format(*header_items[:3]), - self._delim.join( - np.tile('{:<23}', len(header_items) - 3) - ).format(*header_items[3:])] - ) - # fmt: on - if isinstance(filename, IOBase): - self.fid = filename - else: - self.fid = open(filename, 'w') - print(self._header, file=self.fid) - - def __del__(self): - self.close() - - def close(self): - if self.fid is not None: - self.fid.close() - - def dump_grain(self, grain_id, completeness, chisq, grain_params): - assert ( - len(grain_params) == 12 - ), "len(grain_params) must be 12, not %d" % len(grain_params) - - # extract strain - emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) - evec = mutil.symmToVecMV(emat, scale=False) - - res = ( - [int(grain_id), completeness, chisq] - + grain_params.tolist() - + evec.tolist() - ) - - if self.array is not None: - row = self._array_row - assert ( - row < self.array.shape[0] - ), f'invalid row {row} in array table' - self.array[row] = res - self._array_row += 1 - return res - - # (else) format and write to file - output_str = self._delim.join( - [ - self._delim.join(['{:<12d}', '{:<12f}', '{:<12e}']).format( - *res[:3] - ), - self._delim.join(np.tile('{:<23.16e}', len(res) - 3)).format( - *res[3:] - ), - ] - ) - print(output_str, file=self.fid) - return output_str - - -class GrainDataWriter_h5(object): - """Class for dumping grain results to an HDF5 archive. - - TODO: add material spec - """ - - def __init__(self, filename, instr_cfg, grain_params, use_attr=False): - if isinstance(filename, h5py.File): - self.fid = filename - else: - self.fid = h5py.File(filename + ".hdf5", "w") - icfg = dict(instr_cfg) - - # add instrument groups and attributes - self.instr_grp = self.fid.create_group('instrument') - unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr) - - # add grain group - self.grain_grp = self.fid.create_group('grain') - rmat_c = make_rmat_of_expmap(grain_params[:3]) - tvec_c = np.array(grain_params[3:6]).flatten() - vinv_s = np.array(grain_params[6:]).flatten() - vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) - - if use_attr: # attribute version - self.grain_grp.attrs.create('rmat_c', rmat_c) - self.grain_grp.attrs.create('tvec_c', tvec_c) - self.grain_grp.attrs.create('inv(V)_s', vinv_s) - self.grain_grp.attrs.create('vmat_s', vmat_s) - else: # dataset version - self.grain_grp.create_dataset('rmat_c', data=rmat_c) - self.grain_grp.create_dataset('tvec_c', data=tvec_c) - self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) - self.grain_grp.create_dataset('vmat_s', data=vmat_s) - - data_key = 'reflection_data' - self.data_grp = self.fid.create_group(data_key) - - for det_key in self.instr_grp['detectors'].keys(): - self.data_grp.create_group(det_key) - - # FIXME: throws exception when called after close method - # def __del__(self): - # self.close() - - def close(self): - self.fid.close() - - def dump_patch( - self, - panel_id, - i_refl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - ome_centers, - xy_centers, - ijs, - frame_indices, - spot_data, - pangs, - pxy, - mangs, - mxy, - gzip=1, - ): - """ - to be called inside loop over patches - - default GZIP level for data arrays is 1 - """ - fi = np.array(frame_indices, dtype=int) - - panel_grp = self.data_grp[panel_id] - spot_grp = panel_grp.create_group("spot_%05d" % i_refl) - spot_grp.attrs.create('peak_id', int(peak_id)) - spot_grp.attrs.create('hkl_id', int(hkl_id)) - spot_grp.attrs.create('hkl', np.array(hkl, dtype=int)) - spot_grp.attrs.create('predicted_angles', pangs) - spot_grp.attrs.create('predicted_xy', pxy) - if mangs is None: - mangs = np.nan * np.ones(3) - spot_grp.attrs.create('measured_angles', mangs) - if mxy is None: - mxy = np.nan * np.ones(3) - spot_grp.attrs.create('measured_xy', mxy) - - # get centers crds from edge arrays - # FIXME: export full coordinate arrays, or just center vectors??? - # - # ome_crd, eta_crd, tth_crd = np.meshgrid( - # ome_centers, - # centers_of_edge_vec(eta_edges), - # centers_of_edge_vec(tth_edges), - # indexing='ij') - # - # ome_dim, eta_dim, tth_dim = spot_data.shape - - # !!! for now just exporting center vectors for spot_data - tth_crd = centers_of_edge_vec(tth_edges) - eta_crd = centers_of_edge_vec(eta_edges) - - shuffle_data = True # reduces size by 20% - spot_grp.create_dataset( - 'tth_crd', - data=tth_crd, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'eta_crd', - data=eta_crd, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'ome_crd', - data=ome_centers, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'xy_centers', - data=xy_centers, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'ij_centers', - data=ijs, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'frame_indices', - data=fi, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'intensities', - data=spot_data, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - return - - -class GenerateEtaOmeMaps(object): - """ - eta-ome map class derived from new image_series and YAML config - - ...for now... - - must provide: - - self.dataStore - self.planeData - self.iHKLList - self.etaEdges # IN RADIANS - self.omeEdges # IN RADIANS - self.etas # IN RADIANS - self.omegas # IN RADIANS - - """ - - def __init__( - self, - image_series_dict, - instrument, - plane_data, - active_hkls=None, - eta_step=0.25, - threshold=None, - ome_period=(0, 360), - ): - """ - image_series must be OmegaImageSeries class - instrument_params must be a dict (loaded from yaml spec) - active_hkls must be a list (required for now) - - FIXME: get rid of omega period; should get it from imageseries - """ - - self._planeData = plane_data - - # ???: change name of iHKLList? - # ???: can we change the behavior of iHKLList? - if active_hkls is None: - self._iHKLList = plane_data.getHKLID(plane_data.hkls, master=True) - n_rings = len(self._iHKLList) - else: - assert hasattr( - active_hkls, '__len__' - ), "active_hkls must be an iterable with __len__" - self._iHKLList = active_hkls - n_rings = len(active_hkls) - - # grab a det key and corresponding imageseries (first will do) - # !!! assuming that the imageseries for all panels - # have the same length and omegas - det_key, this_det_ims = next(iter(image_series_dict.items())) - - # handle omegas - # !!! for multi wedge, enforncing monotonicity - # !!! wedges also cannot overlap or span more than 360 - omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES - delta_ome = omegas_array[0][-1] - omegas_array[0][0] - frame_mask = None - ome_period = omegas_array[0, 0] + np.r_[0.0, 360.0] # !!! be careful - if this_det_ims.omegawedges.nwedges > 1: - delta_omes = [ - (i['ostop'] - i['ostart']) / i['nsteps'] - for i in this_det_ims.omegawedges.wedges - ] - check_wedges = mutil.uniqueVectors( - np.atleast_2d(delta_omes), tol=1e-6 - ).squeeze() - assert ( - check_wedges.size == 1 - ), "all wedges must have the same delta omega to 1e-6" - # grab representative delta ome - # !!! assuming positive delta consistent with OmegaImageSeries - delta_ome = delta_omes[0] - - # grab full-range start/stop - # !!! be sure to map to the same period to enable arithmatic - # ??? safer to do this way rather than just pulling from - # the omegas attribute? - owedges = this_det_ims.omegawedges.wedges - ostart = owedges[0]['ostart'] # !!! DEGREES - ostop = float( - mapAngle(owedges[-1]['ostop'], ome_period, units='degrees') - ) - # compute total nsteps - # FIXME: need check for roundoff badness - nsteps = int((ostop - ostart) / delta_ome) - ome_edges_full = np.linspace( - ostart, ostop, num=nsteps + 1, endpoint=True - ) - omegas_array = np.vstack( - [ome_edges_full[:-1], ome_edges_full[1:]] - ).T - ome_centers = np.average(omegas_array, axis=1) - - # use OmegaImageSeries method to determine which bins have data - # !!! this array has -1 outside a wedge - # !!! again assuming the valid frame order increases monotonically - frame_mask = np.array( - [ - this_det_ims.omega_to_frame(ome)[0] != -1 - for ome in ome_centers - ] - ) - - # ???: need to pass a threshold? - eta_mapping, etas = instrument.extract_polar_maps( - plane_data, - image_series_dict, - active_hkls=active_hkls, - threshold=threshold, - tth_tol=None, - eta_tol=eta_step, - ) - - # for convenience grab map shape from first - map_shape = next(iter(eta_mapping.values())).shape[1:] - - # pack all detectors with masking - # FIXME: add omega masking - data_store = [] - for i_ring in range(n_rings): - # first handle etas - full_map = np.zeros(map_shape, dtype=float) - nan_mask_full = np.zeros( - (len(eta_mapping), map_shape[0], map_shape[1]) - ) - i_p = 0 - for det_key, eta_map in eta_mapping.items(): - nan_mask = ~np.isnan(eta_map[i_ring]) - nan_mask_full[i_p] = nan_mask - full_map[nan_mask] += eta_map[i_ring][nan_mask] - i_p += 1 - re_nan_these = np.sum(nan_mask_full, axis=0) == 0 - full_map[re_nan_these] = np.nan - - # now omegas - if frame_mask is not None: - # !!! must expand row dimension to include - # skipped omegas - tmp = np.ones((len(frame_mask), map_shape[1])) * np.nan - tmp[frame_mask, :] = full_map - full_map = tmp - data_store.append(full_map) - self._dataStore = data_store - - # set required attributes - self._omegas = mapAngle( - np.radians(np.average(omegas_array, axis=1)), - np.radians(ome_period), - ) - self._omeEdges = mapAngle( - np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), - np.radians(ome_period), - ) - - # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the - # indexer to work properly - if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf: - # !!! SIGNED delta ome - del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0]) - self._omeEdges[-1] = self._omeEdges[-2] + del_ome - - # handle etas - # WARNING: unlinke the omegas in imageseries metadata, - # these are in RADIANS and represent bin centers - self._etaEdges = etas - self._etas = self._etaEdges[:-1] + 0.5 * np.radians(eta_step) - - @property - def dataStore(self): - return self._dataStore - - @property - def planeData(self): - return self._planeData - - @property - def iHKLList(self): - return np.atleast_1d(self._iHKLList).flatten() - - @property - def etaEdges(self): - return self._etaEdges - - @property - def omeEdges(self): - return self._omeEdges - - @property - def etas(self): - return self._etas - - @property - def omegas(self): - return self._omegas - - def save(self, filename): - xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename) - - -def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): - # mark pixels in the spec'd tth range - pixels_in_tthr = np.logical_and(ptth >= tthr[0], ptth <= tthr[1]) - - # catch case where ring isn't on detector - if not np.any(pixels_in_tthr): - return None - - pixel_ids = np.where(pixels_in_tthr) - - # grab relevant eta coords using histogram - pixel_etas = peta[pixel_ids] - reta_hist = histogram(pixel_etas, eta_edges) - bins_on_detector = np.where(reta_hist)[0] - - return pixel_etas, eta_edges, pixel_ids, bins_on_detector - - -def run_fast_histogram(x, bins, weights=None): - return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), weights=weights) - - -def run_numpy_histogram(x, bins, weights=None): - return histogram1d(x, bins=bins, weights=weights)[0] - - -histogram = run_fast_histogram if fast_histogram else run_numpy_histogram - - -def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): - for i_row in range(*rows): - image = ims[i_row] - - # handle threshold if specified - if threshold is not None: - # !!! NaNs get preserved - image = np.array(image) - image[image < threshold] = 0.0 - - for i_r, tthr in enumerate(tth_ranges): - this_map = ring_maps[i_r] - params = ring_params[i_r] - if not params: - # We are supposed to skip this ring... - continue - - # Unpack the params - pixel_etas, eta_edges, pixel_ids, bins_on_detector = params - result = histogram(pixel_etas, eta_edges, weights=image[pixel_ids]) - - # Note that this preserves nan values for bins not on the detector. - this_map[i_row, bins_on_detector] = result[bins_on_detector] - - -def _extract_detector_line_positions( - iter_args, - plane_data, - tth_tol, - eta_tol, - eta_centers, - npdiv, - collapse_tth, - collapse_eta, - do_interpolation, - do_fitting, - fitting_kwargs, - tth_distortion, - max_workers, -): - panel, instr_cfg, images, pbp = iter_args - - if images.ndim == 2: - images = np.tile(images, (1, 1, 1)) - elif images.ndim != 3: - raise RuntimeError("images must be 2- or 3-d") - - # make rings - # !!! adding tth_distortion pass-through; comes in as dict over panels - tth_distr_cls = None - if tth_distortion is not None: - tth_distr_cls = tth_distortion[panel.name] - - pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( - plane_data, - merge_hkls=True, - delta_tth=tth_tol, - delta_eta=eta_tol, - eta_list=eta_centers, - tth_distortion=tth_distr_cls, - ) - - tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) - - # !!! this is only needed if doing fitting - if isinstance(plane_data, PlaneData): - tth_idx, tth_ranges = plane_data.getMergedRanges(cullDupl=True) - tth_ref = plane_data.getTTh() - tth0 = [np.degrees(tth_ref[i]) for i in tth_idx] - else: - tth0 = plane_data - - # ================================================================= - # LOOP OVER RING SETS - # ================================================================= - pbar_rings = partial( - tqdm, total=len(pow_angs), desc="Ringset", position=pbp - ) - - kwargs = { - 'instr_cfg': instr_cfg, - 'panel': panel, - 'eta_tol': eta_tol, - 'npdiv': npdiv, - 'collapse_tth': collapse_tth, - 'collapse_eta': collapse_eta, - 'images': images, - 'do_interpolation': do_interpolation, - 'do_fitting': do_fitting, - 'fitting_kwargs': fitting_kwargs, - 'tth_distortion': tth_distr_cls, - } - func = partial(_extract_ring_line_positions, **kwargs) - iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) - with ProcessPoolExecutor( - mp_context=constants.mp_context, max_workers=max_workers - ) as executor: - return list(pbar_rings(executor.map(func, iter_arg))) - - -def _extract_ring_line_positions( - iter_args, - instr_cfg, - panel, - eta_tol, - npdiv, - collapse_tth, - collapse_eta, - images, - do_interpolation, - do_fitting, - fitting_kwargs, - tth_distortion, -): - """ - Extracts data for a single Debye-Scherrer ring . - - Parameters - ---------- - iter_args : tuple - (angs [radians], - xys [mm], - tth_tol [deg], - this_tth0 [deg]) - instr_cfg : TYPE - DESCRIPTION. - panel : TYPE - DESCRIPTION. - eta_tol : TYPE - DESCRIPTION. - npdiv : TYPE - DESCRIPTION. - collapse_tth : TYPE - DESCRIPTION. - collapse_eta : TYPE - DESCRIPTION. - images : TYPE - DESCRIPTION. - do_interpolation : TYPE - DESCRIPTION. - do_fitting : TYPE - DESCRIPTION. - fitting_kwargs : TYPE - DESCRIPTION. - tth_distortion : TYPE - DESCRIPTION. - - Yields - ------ - patch_data : TYPE - DESCRIPTION. - - """ - # points are already checked to fall on detector - angs, xys, tth_tol, this_tth0 = iter_args - - # SS 01/31/25 noticed some nans in xys even after clipping - # going to do another round of masking to get rid of those - nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) - nan_mask = np.logical_or.reduce(nan_mask, 1) - if angs.ndim > 1 and xys.ndim > 1: - angs = angs[nan_mask, :] - xys = xys[nan_mask, :] - - n_images = len(images) - native_area = panel.pixel_area - - # make the tth,eta patches for interpolation - patches = xrdutil.make_reflection_patches( - instr_cfg, - angs, - panel.angularPixelSize(xys), - tth_tol=tth_tol, - eta_tol=eta_tol, - npdiv=npdiv, - quiet=True, - ) - - # loop over patches - # FIXME: fix initialization - if collapse_tth: - patch_data = np.zeros((len(angs), n_images)) - else: - patch_data = [] - for i_p, patch in enumerate(patches): - # strip relevant objects out of current patch - vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch - - # need to reshape eval pts for interpolation - xy_eval = np.vstack([xys_eval[0].flatten(), xys_eval[1].flatten()]).T - - _, on_panel = panel.clip_to_panel(xy_eval) - - if np.any(~on_panel): - continue - - if collapse_tth: - ang_data = (vtx_angs[0][0, [0, -1]], vtx_angs[1][[0, -1], 0]) - elif collapse_eta: - # !!! yield the tth bin centers - tth_centers = np.average( - np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]), axis=0 - ) - ang_data = (tth_centers, angs[i_p][-1]) - if do_fitting: - fit_data = [] - else: - ang_data = vtx_angs - - prows, pcols = areas.shape - area_fac = areas / float(native_area) - - # interpolate - if not collapse_tth: - ims_data = [] - for j_p in np.arange(len(images)): - # catch interpolation type - image = images[j_p] - if do_interpolation: - p_img = ( - panel.interpolate_bilinear( - xy_eval, - image, - ).reshape(prows, pcols) - * area_fac - ) - else: - p_img = image[ijs[0], ijs[1]] * area_fac - - # catch flat spectrum data, which will cause - # fitting to fail. - # ???: best here, or make fitting handle it? - mxval = np.max(p_img) - mnval = np.min(p_img) - if mxval == 0 or (1.0 - mnval / mxval) < 0.01: - continue - - # catch collapsing options - if collapse_tth: - patch_data[i_p, j_p] = np.average(p_img) - # ims_data.append(np.sum(p_img)) - else: - if collapse_eta: - lineout = np.average(p_img, axis=0) - ims_data.append(lineout) - if do_fitting: - if tth_distortion is not None: - # must correct tth0 - tmp = tth_distortion.apply( - panel.angles_to_cart( - np.vstack( - [ - np.radians(this_tth0), - np.tile( - ang_data[-1], len(this_tth0) - ), - ] - ).T - ), - return_nominal=True, - ) - pk_centers = np.degrees(tmp[:, 0]) - else: - pk_centers = this_tth0 - kwargs = { - 'tth_centers': np.degrees(tth_centers), - 'lineout': lineout, - 'tth_pred': pk_centers, - **fitting_kwargs, - } - result = fit_ring(**kwargs) - fit_data.append(result) - else: - ims_data.append(p_img) - if not collapse_tth: - output = [ang_data, ims_data] - if do_fitting: - output.append(fit_data) - patch_data.append(output) - - return patch_data - - -DETECTOR_TYPES = { - 'planar': PlanarDetector, - 'cylindrical': CylindricalDetector, -} - - -class BufferShapeMismatchError(RuntimeError): - # This is raised when the buffer shape does not match the detector shape - pass - - -@contextmanager -def switch_xray_source(instr: HEDMInstrument, xray_source: Optional[str]): - if xray_source is None: - # If the x-ray source is None, leave it as the current active one - yield - return - - prev_beam_name = instr.active_beam_name - instr.active_beam_name = xray_source - try: - yield - finally: - instr.active_beam_name = prev_beam_name diff --git a/hexrd/hed/xrdutil/__init__.py b/hexrd/hed/xrdutil/__init__.py new file mode 100644 index 000000000..15fb5638f --- /dev/null +++ b/hexrd/hed/xrdutil/__init__.py @@ -0,0 +1 @@ +from .utils import _project_on_detector_plane, _project_on_detector_cylinder diff --git a/hexrd/hed/xrdutil/phutil.py b/hexrd/hed/xrdutil/phutil.py index 59aa0294c..8fd772976 100644 --- a/hexrd/hed/xrdutil/phutil.py +++ b/hexrd/hed/xrdutil/phutil.py @@ -255,7 +255,7 @@ def tth_corr_map_sample_layer( Parameters ---------- - instrument : hexrd.hedm.instrument.HEDMInstrument + instrument : hexrd.core.instrument.HEDMInstrument The pionhole camera instrument object. layer_standoff : scalar The sample layer standoff from the upstream face of the pinhole @@ -372,7 +372,7 @@ def tth_corr_map_pinhole(instrument, pinhole_thickness, pinhole_radius): Parameters ---------- - instrument : hexrd.hedm.instrument.HEDMInstrument + instrument : hexrd.core.instrument.HEDMInstrument The pionhole camera instrument object. pinhole_thickness : scalar The thickenss (height) of the pinhole (cylinder) in mm diff --git a/hexrd/hed/xrdutil/utils.py b/hexrd/hed/xrdutil/utils.py index 073064818..52d86bb3e 100644 --- a/hexrd/hed/xrdutil/utils.py +++ b/hexrd/hed/xrdutil/utils.py @@ -27,29 +27,14 @@ # ============================================================ -from typing import Optional, Union, Any, Generator - # TODO: Resolve extra-workflow dependency -from hexrd.hedm.material.crystallography import PlaneData from hexrd.core.distortion.distortionabc import DistortionABC -import numba import numpy as np -import numba from hexrd.core import constants -from hexrd.core import matrixutil as mutil -from hexrd.core import rotations as rot -from hexrd.core import gridutil as gutil - -from hexrd.hedm.material.crystallography import processWavelength, PlaneData - +from hexrd.core.material.crystallography import processWavelength, PlaneData from hexrd.core.transforms import xfcapi -from hexrd.core.valunits import valWUnit - -from hexrd.core import distortion as distortion_pkg - -from hexrd.core.deprecation import deprecated simlp = 'hexrd.hedm.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' @@ -72,471 +57,12 @@ nans_1x2 = np.nan * np.ones((1, 2)) -# ============================================================================= -# CLASSES -# ============================================================================= - - -class EtaOmeMaps(object): - """ - find-orientations loads pickled eta-ome data, but CollapseOmeEta is not - pickleable, because it holds a list of ReadGE, each of which holds a - reference to an open file object, which is not pickleable. - """ - - def __init__(self, ome_eta_archive: str): - ome_eta: np.ndarray = np.load(ome_eta_archive, allow_pickle=True) - - planeData_args = ome_eta['planeData_args'] - planeData_hkls = ome_eta['planeData_hkls'] - self.planeData = PlaneData(planeData_hkls, *planeData_args) - self.planeData.exclusions = ome_eta['planeData_excl'] - self.dataStore = ome_eta['dataStore'] - self.iHKLList = ome_eta['iHKLList'] - self.etaEdges = ome_eta['etaEdges'] - self.omeEdges = ome_eta['omeEdges'] - self.etas = ome_eta['etas'] - self.omegas = ome_eta['omegas'] - - def save_eta_ome_maps(self, filename: str) -> None: - """ - eta_ome.dataStore - eta_ome.planeData - eta_ome.iHKLList - eta_ome.etaEdges - eta_ome.omeEdges - eta_ome.etas - eta_ome.omegas - """ - args = np.array(self.planeData.getParams(), dtype=object)[:4] - args[2] = valWUnit('wavelength', 'length', args[2], 'angstrom') - hkls = np.vstack([i['hkl'] for i in self.planeData.hklDataList]).T - save_dict = { - 'dataStore': self.dataStore, - 'etas': self.etas, - 'etaEdges': self.etaEdges, - 'iHKLList': self.iHKLList, - 'omegas': self.omegas, - 'omeEdges': self.omeEdges, - 'planeData_args': args, - 'planeData_hkls': hkls, - 'planeData_excl': self.planeData.exclusions, - } - np.savez_compressed(filename, **save_dict) - - # ============================================================================= # FUNCTIONS # ============================================================================= - -def _zproject(x: np.ndarray, y: np.ndarray): - return np.cos(x) * np.sin(y) - np.sin(x) * np.cos(y) - - -def zproject_sph_angles( - invecs: np.ndarray, - chi: float = 0.0, - method: str = 'stereographic', - source: str = 'd', - use_mask: bool = False, - invert_z: bool = False, - rmat: Optional[np.ndarray] = None, -) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray]]: - """ - Projects spherical angles to 2-d mapping. - - Parameters - ---------- - invec : array_like - The (n, 3) array of input points, interpreted via the 'source' kwarg. - chi : scalar, optional - The inclination angle of the sample frame. The default is 0.. - method : str, optional - Mapping type spec, either 'stereographic' or 'equal-area'. - The default is 'stereographic'. - source : str, optional - The type specifier of the input vectors, either 'd', 'q', or 'g'. - 'd' signifies unit diffraction vectors as (2theta, eta, omega), - 'q' specifies unit scattering vectors as (2theta, eta, omega), - 'g' specifies unit vectors in the sample frame as (x, y, z). - The default is 'd'. - use_mask : bool, optional - If True, trim points not on the +z hemishpere (polar angles > 90). - The default is False. - invert_z : bool, optional - If True, invert the Z-coordinates of the unit vectors calculated from - the input angles. The default is False. - rmat : numpy.ndarry, shape=(3, 3), optional - Array representing a change of basis (rotation) to appy to the - calculated unit vectors. The default is None. - - Raises - ------ - RuntimeError - If method not in ('stereographic', 'equal-area'). - - Returns - ------- - numpy.ndarray or tuple - If use_mask = False, then the array of n mapped input points with shape - (n, 2). If use_mask = True, then the first element is the ndarray of - mapped points with shape (<=n, 2), and the second is a bool array with - shape (n,) marking the point that fell on the upper hemishpere. - . - - Notes - ----- - CAVEAT: +Z axis projections only!!! - TODO: check mask application. - """ - assert isinstance(source, str), "source kwarg must be a string" - - invecs = np.atleast_2d(invecs) - if source.lower() == 'd': - spts_s = xfcapi.angles_to_dvec(invecs, chi=chi) - elif source.lower() == 'q': - spts_s = xfcapi.angles_to_gvec(invecs, chi=chi) - elif source.lower() == 'g': - spts_s = invecs - - if rmat is not None: - spts_s = np.dot(spts_s, rmat.T) - - if invert_z: - spts_s[:, 2] = -spts_s[:, 2] - - # filter based on hemisphere - if use_mask: - pzi = spts_s[:, 2] <= 0 - spts_s = spts_s[pzi, :] - - if method.lower() == 'stereographic': - ppts = np.vstack( - [ - spts_s[:, 0] / (1.0 - spts_s[:, 2]), - spts_s[:, 1] / (1.0 - spts_s[:, 2]), - ] - ).T - elif method.lower() == 'equal-area': - chords = spts_s + np.tile([0, 0, 1], (len(spts_s), 1)) - scl = np.tile(mutil.rowNorm(chords), (2, 1)).T - ucrd = mutil.unitVector( - np.hstack([chords[:, :2], np.zeros((len(spts_s), 1))]).T - ) - - ppts = ucrd[:2, :].T * scl - else: - raise RuntimeError(f"method '{method}' not recognized") - - if use_mask: - return ppts, pzi - else: - return ppts - - -def make_polar_net( - ndiv: int = 24, projection: str = 'stereographic', max_angle: float = 120.0 -) -> np.ndarray: - """ - TODO: options for generating net boundaries; fixed to Z proj. - """ - ndiv_tth = int(np.floor(0.5 * ndiv)) + 1 - wtths = np.radians( - np.linspace(0, 1, num=ndiv_tth, endpoint=True) * max_angle - ) - wetas = np.radians(np.linspace(-1, 1, num=ndiv + 1, endpoint=True) * 180.0) - weta_gen = np.radians(np.linspace(-1, 1, num=181, endpoint=True) * 180.0) - pts = [] - for eta in wetas: - net_ang = np.vstack( - [[wtths[0], wtths[-1]], np.tile(eta, 2), np.zeros(2)] - ).T - pts.append(zproject_sph_angles(net_ang, method=projection, source='d')) - pts.append(np.nan * np.ones((1, 2))) - for tth in wtths[1:]: - net_ang = np.vstack( - [tth * np.ones_like(weta_gen), weta_gen, np.zeros_like(weta_gen)] - ).T - pts.append(zproject_sph_angles(net_ang, method=projection, source='d')) - pts.append(nans_1x2) - - return np.vstack(pts) - - validateAngleRanges = xfcapi.validate_angle_ranges - -@deprecated(removal_date='2025-01-01') -def simulateOmeEtaMaps( - omeEdges, - etaEdges, - planeData, - expMaps, - chi=0.0, - etaTol=None, - omeTol=None, - etaRanges=None, - omeRanges=None, - bVec=constants.beam_vec, - eVec=constants.eta_vec, - vInv=constants.identity_6x1, -): - """ - Simulate spherical maps. - - Parameters - ---------- - omeEdges : TYPE - DESCRIPTION. - etaEdges : TYPE - DESCRIPTION. - planeData : TYPE - DESCRIPTION. - expMaps : (3, n) ndarray - DESCRIPTION. - chi : TYPE, optional - DESCRIPTION. The default is 0.. - etaTol : TYPE, optional - DESCRIPTION. The default is None. - omeTol : TYPE, optional - DESCRIPTION. The default is None. - etaRanges : TYPE, optional - DESCRIPTION. The default is None. - omeRanges : TYPE, optional - DESCRIPTION. The default is None. - bVec : TYPE, optional - DESCRIPTION. The default is [0, 0, -1]. - eVec : TYPE, optional - DESCRIPTION. The default is [1, 0, 0]. - vInv : TYPE, optional - DESCRIPTION. The default is [1, 1, 1, 0, 0, 0]. - - Returns - ------- - eta_ome : TYPE - DESCRIPTION. - - Notes - ----- - all angular info is entered in degrees - - ??? might want to creat module-level angluar unit flag - ??? might want to allow resvers delta omega - - """ - # convert to radians - etaEdges = np.radians(np.sort(etaEdges)) - omeEdges = np.radians(np.sort(omeEdges)) - - omeIndices = list(range(len(omeEdges))) - etaIndices = list(range(len(etaEdges))) - - i_max = omeIndices[-1] - j_max = etaIndices[-1] - - etaMin = etaEdges[0] - etaMax = etaEdges[-1] - omeMin = omeEdges[0] - omeMax = omeEdges[-1] - if omeRanges is None: - omeRanges = [ - [omeMin, omeMax], - ] - - if etaRanges is None: - etaRanges = [ - [etaMin, etaMax], - ] - - # signed deltas IN RADIANS - del_ome = omeEdges[1] - omeEdges[0] - del_eta = etaEdges[1] - etaEdges[0] - - delOmeSign = np.sign(del_eta) - - # tolerances are in degrees (easier) - if omeTol is None: - omeTol = abs(del_ome) - else: - omeTol = np.radians(omeTol) - if etaTol is None: - etaTol = abs(del_eta) - else: - etaTol = np.radians(etaTol) - - # pixel dialtions - dpix_ome = round(omeTol / abs(del_ome)) - dpix_eta = round(etaTol / abs(del_eta)) - - i_dil, j_dil = np.meshgrid( - np.arange(-dpix_ome, dpix_ome + 1), np.arange(-dpix_eta, dpix_eta + 1) - ) - - # get symmetrically expanded hkls from planeData - sym_hkls = planeData.getSymHKLs() - nhkls = len(sym_hkls) - - # make things C-contiguous for use in xfcapi functions - expMaps = np.array(expMaps.T, order='C') - nOrs = len(expMaps) - - bMat = np.array(planeData.latVecOps['B'], order='C') - wlen = planeData.wavelength - - bVec = np.array(bVec.flatten(), order='C') - eVec = np.array(eVec.flatten(), order='C') - vInv = np.array(vInv.flatten(), order='C') - - eta_ome = np.zeros((nhkls, max(omeIndices), max(etaIndices)), order='C') - for iHKL in range(nhkls): - these_hkls = np.ascontiguousarray(sym_hkls[iHKL].T, dtype=float) - for iOr in range(nOrs): - rMat_c = xfcapi.make_rmat_of_expmap(expMaps[iOr, :]) - angList = np.vstack( - xfcapi.oscill_angles_of_hkls( - these_hkls, - chi, - rMat_c, - bMat, - wlen, - beam_vec=bVec, - eta_vec=eVec, - v_inv=vInv, - ) - ) - if not np.all(np.isnan(angList)): - # - angList[:, 1] = rot.mapAngle( - angList[:, 1], [etaEdges[0], etaEdges[0] + 2 * np.pi] - ) - angList[:, 2] = rot.mapAngle( - angList[:, 2], [omeEdges[0], omeEdges[0] + 2 * np.pi] - ) - # - # do eta ranges - angMask_eta = np.zeros(len(angList), dtype=bool) - for etas in etaRanges: - angMask_eta = np.logical_or( - angMask_eta, - xfcapi.validate_angle_ranges( - angList[:, 1], etas[0], etas[1] - ), - ) - - # do omega ranges - ccw = True - angMask_ome = np.zeros(len(angList), dtype=bool) - for omes in omeRanges: - if omes[1] - omes[0] < 0: - ccw = False - angMask_ome = np.logical_or( - angMask_ome, - xfcapi.validate_angle_ranges( - angList[:, 2], omes[0], omes[1], ccw=ccw - ), - ) - - # mask angles list, hkls - angMask = np.logical_and(angMask_eta, angMask_ome) - - culledTTh = angList[angMask, 0] - culledEta = angList[angMask, 1] - culledOme = angList[angMask, 2] - - for iTTh in range(len(culledTTh)): - culledEtaIdx = np.where(etaEdges - culledEta[iTTh] > 0)[0] - if len(culledEtaIdx) > 0: - culledEtaIdx = culledEtaIdx[0] - 1 - if culledEtaIdx < 0: - culledEtaIdx = None - else: - culledEtaIdx = None - culledOmeIdx = np.where(omeEdges - culledOme[iTTh] > 0)[0] - if len(culledOmeIdx) > 0: - if delOmeSign > 0: - culledOmeIdx = culledOmeIdx[0] - 1 - else: - culledOmeIdx = culledOmeIdx[-1] - if culledOmeIdx < 0: - culledOmeIdx = None - else: - culledOmeIdx = None - - if culledEtaIdx is not None and culledOmeIdx is not None: - if dpix_ome > 0 or dpix_eta > 0: - i_sup = omeIndices[culledOmeIdx] + np.array( - [i_dil.flatten()], dtype=int - ) - j_sup = etaIndices[culledEtaIdx] + np.array( - [j_dil.flatten()], dtype=int - ) - - # catch shit that falls off detector... - # maybe make this fancy enough to wrap at 2pi? - idx_mask = np.logical_and( - np.logical_and(i_sup >= 0, i_sup < i_max), - np.logical_and(j_sup >= 0, j_sup < j_max), - ) - eta_ome[iHKL, i_sup[idx_mask], j_sup[idx_mask]] = ( - 1.0 - ) - else: - eta_ome[ - iHKL, - omeIndices[culledOmeIdx], - etaIndices[culledEtaIdx], - ] = 1.0 - return eta_ome - - -def _fetch_hkls_from_planedata(pd: PlaneData): - return np.hstack(pd.getSymHKLs(withID=True)).T - - -def _filter_hkls_eta_ome( - hkls: np.ndarray, - angles: np.ndarray, - eta_range: list[tuple[float]], - ome_range: list[tuple[float]], - return_mask: bool = False, -) -> Union[ - tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray, np.ndarray] -]: - """ - given a set of hkls and angles, filter them by the - eta and omega ranges - """ - angMask_eta = np.zeros(len(angles), dtype=bool) - for etas in eta_range: - angMask_eta = np.logical_or( - angMask_eta, - xfcapi.validate_angle_ranges(angles[:, 1], etas[0], etas[1]), - ) - - ccw = True - angMask_ome = np.zeros(len(angles), dtype=bool) - for omes in ome_range: - if omes[1] - omes[0] < 0: - ccw = False - angMask_ome = np.logical_or( - angMask_ome, - xfcapi.validate_angle_ranges( - angles[:, 2], omes[0], omes[1], ccw=ccw - ), - ) - - angMask = np.logical_and(angMask_eta, angMask_ome) - - allAngs = angles[angMask, :] - allHKLs = np.vstack([hkls, hkls])[angMask, :] - - if return_mask: - return allAngs, allHKLs, angMask - else: - return allAngs, allHKLs - - def _project_on_detector_plane( allAngs: np.ndarray, rMat_d: np.ndarray, @@ -629,57 +155,6 @@ def _project_on_detector_cylinder( return det_xy, rMat_ss, valid_mask -def _dvecToDetectorXYcylinder( - dVec_cs: np.ndarray, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - physical_size: np.ndarray, - angle_extent: float, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -) -> tuple[np.ndarray, np.ndarray]: - - cvec = _unitvec_to_cylinder( - dVec_cs, - caxis, - paxis, - radius, - tVec_d, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - cvec_det, valid_mask = _clip_to_cylindrical_detector( - cvec, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - xy_det = _dewarp_from_cylinder( - cvec_det, - tVec_d, - caxis, - paxis, - radius, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - return xy_det, valid_mask - - def _unitvec_to_cylinder( uvw: np.ndarray, caxis: np.ndarray, @@ -900,630 +375,52 @@ def _warp_to_cylinder( return res -def _dvec_to_angs( - dvecs: np.ndarray, bvec: np.ndarray, evec: np.ndarray -) -> tuple[np.ndarray, np.ndarray]: - """ - convert diffraction vectors to (tth, eta) - angles in the 'eta' frame - dvecs is assumed to have (nx3) shape - """ - num = dvecs.shape[0] - exb = np.cross(evec, bvec) - exb = exb / np.linalg.norm(exb) - bxexb = np.cross(bvec, exb) - bxexb = bxexb / np.linalg.norm(bxexb) - - dp = np.dot(bvec, dvecs.T) - dp[np.abs(dp) > 1.0] = np.sign(dp[np.abs(dp) > 1.0]) - tth = np.arccos(dp) - - dvecs_p = dvecs - np.tile(dp, [3, 1]).T * np.tile(bvec, [num, 1]) - - dpx = np.dot(bxexb, dvecs_p.T) - dpy = np.dot(exb, dvecs_p.T) - eta = np.arctan2(dpy, dpx) - - return tth, eta - - -def simulateGVecs( - pd: PlaneData, - detector_params: np.ndarray, - grain_params: np.ndarray, - ome_range: list[tuple[float]] = [ - (-np.pi, np.pi), - ], - ome_period: tuple[float] = (-np.pi, np.pi), - eta_range: list[tuple[float]] = [ - (-np.pi, np.pi), - ], - panel_dims: list[tuple[float]] = [(-204.8, -204.8), (204.8, 204.8)], - pixel_pitch: tuple[float] = (0.2, 0.2), - distortion: DistortionABC = None, - beam_vector: np.ndarray = constants.beam_vec, -) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: - """ - returns valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps - - panel_dims are [(xmin, ymin), (xmax, ymax)] in mm - - pixel_pitch is [row_size, column_size] in mm - - simulate the monochormatic scattering for a specified - - - space group - - wavelength - - orientation - - strain - - position - - detector parameters - - oscillation axis tilt (chi) - - subject to - - - omega (oscillation) ranges (list of (min, max) tuples) - - eta (azimuth) ranges - - pd................a hexrd.crystallography.PlaneData instance - detector_params...a (10,) ndarray containing the tilt angles (3), - translation (3), chi (1), and sample frame translation - (3) parameters - grain_params......a (12,) ndarray containing the exponential map (3), - translation (3), and inverse stretch tensor compnents - in Mandel-Voigt notation (6). - - * currently only one panel is supported, but this will likely change soon - """ - bMat = pd.latVecOps['B'] - wlen = pd.wavelength - full_hkls = _fetch_hkls_from_planedata(pd) - - # extract variables for convenience - rMat_d = xfcapi.make_detector_rmat(detector_params[:3]) - tVec_d = np.ascontiguousarray(detector_params[3:6]) - chi = detector_params[6] - tVec_s = np.ascontiguousarray(detector_params[7:10]) - rMat_c = xfcapi.make_rmat_of_expmap(grain_params[:3]) - tVec_c = np.ascontiguousarray(grain_params[3:6]) - vInv_s = np.ascontiguousarray(grain_params[6:12]) - beam_vector = np.ascontiguousarray(beam_vector) - - # first find valid G-vectors - angList = np.vstack( - xfcapi.oscill_angles_of_hkls( - full_hkls[:, 1:], - chi, - rMat_c, - bMat, - wlen, - v_inv=vInv_s, - beam_vec=beam_vector, - ) - ) - allAngs, allHKLs = _filter_hkls_eta_ome( - full_hkls, angList, eta_range, ome_range - ) - - if len(allAngs) == 0: - valid_ids = [] - valid_hkl = [] - valid_ang = [] - valid_xy = [] - ang_ps = [] - else: - # ??? preallocate for speed? - det_xy, rMat_ss, _ = _project_on_detector_plane( - allAngs, - rMat_d, - rMat_c, - chi, - tVec_d, - tVec_c, - tVec_s, - distortion, - beamVec=beam_vector, - ) - - on_panel = np.logical_and( - np.logical_and( - det_xy[:, 0] >= panel_dims[0][0], - det_xy[:, 0] <= panel_dims[1][0], - ), - np.logical_and( - det_xy[:, 1] >= panel_dims[0][1], - det_xy[:, 1] <= panel_dims[1][1], - ), - ) - - op_idx = np.where(on_panel)[0] - - valid_ang = allAngs[op_idx, :] - valid_ang[:, 2] = xfcapi.mapAngle(valid_ang[:, 2], ome_period) - valid_ids = allHKLs[op_idx, 0] - valid_hkl = allHKLs[op_idx, 1:] - valid_xy = det_xy[op_idx, :] - ang_ps = angularPixelSize( - valid_xy, - pixel_pitch, - rMat_d, - # Provide only the first sample rotation matrix to angularPixelSize - # Perhaps this is something that can be improved in the future? - rMat_ss[0], - tVec_d, - tVec_s, - tVec_c, - distortion=distortion, - beamVec=beam_vector, - ) - - return valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps - - -@deprecated(new_func=simlp, removal_date='2025-01-01') -def simulateLauePattern( - hkls, - bMat, - rmat_d, - tvec_d, - panel_dims, - panel_buffer=5, - minEnergy=8, - maxEnergy=24, - rmat_s=np.eye(3), - grain_params=None, - distortion=None, - beamVec=None, -): - - if beamVec is None: - beamVec = constants.beam_vec - - # parse energy ranges - multipleEnergyRanges = False - if hasattr(maxEnergy, '__len__'): - assert len(maxEnergy) == len( - minEnergy - ), 'energy cutoff ranges must have the same length' - multipleEnergyRanges = True - lmin = [processWavelength(e) for e in maxEnergy] - lmax = [processWavelength(e) for e in minEnergy] - else: - lmin = processWavelength(maxEnergy) - lmax = processWavelength(minEnergy) - - # process crystal rmats and inverse stretches - if grain_params is None: - grain_params = np.atleast_2d( - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0] - ) - - n_grains = len(grain_params) - - # dummy translation vector... make input - tvec_s = np.zeros((3, 1)) - - # number of hkls - nhkls_tot = hkls.shape[1] - - # unit G-vectors in crystal frame - ghat_c = mutil.unitVector(np.dot(bMat, hkls)) - - # pre-allocate output arrays - xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) - hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) - angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) - dspacing = np.nan * np.ones((n_grains, nhkls_tot)) - energy = np.nan * np.ones((n_grains, nhkls_tot)) - - """ - LOOP OVER GRAINS - """ - - for iG, gp in enumerate(grain_params): - rmat_c = xfcapi.make_rmat_of_expmap(gp[:3]) - tvec_c = gp[3:6].reshape(3, 1) - vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) - - # stretch them: V^(-1) * R * Gc - ghat_s_str = mutil.unitVector(np.dot(vInv_s, np.dot(rmat_c, ghat_c))) - ghat_c_str = np.dot(rmat_c.T, ghat_s_str) - - # project - dpts = xfcapi.gvec_to_xy( - ghat_c_str.T, - rmat_d, - rmat_s, - rmat_c, - tvec_d, - tvec_s, - tvec_c, - beam_vec=beamVec, - ).T - - # check intersections with detector plane - canIntersect = ~np.isnan(dpts[0, :]) - npts_in = sum(canIntersect) - - if np.any(canIntersect): - dpts = dpts[:, canIntersect].reshape(2, npts_in) - dhkl = hkls[:, canIntersect].reshape(3, npts_in) - - rmat_b = xfcapi.make_beam_rmat(beamVec, constants.eta_vec) - - # back to angles - tth_eta, gvec_l = xfcapi.xy_to_gvec( - dpts.T, rmat_d, rmat_s, tvec_d, tvec_s, tvec_c, rmat_b=rmat_b - ) - tth_eta = np.vstack(tth_eta).T - - # warp measured points - if distortion is not None: - dpts = distortion.apply_inverse(dpts) - - # plane spacings and energies - dsp = 1.0 / mutil.columnNorm(np.dot(bMat, dhkl)) - wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) - - # find on spatial extent of detector - xTest = np.logical_and( - dpts[0, :] >= -0.5 * panel_dims[1] + panel_buffer, - dpts[0, :] <= 0.5 * panel_dims[1] - panel_buffer, - ) - yTest = np.logical_and( - dpts[1, :] >= -0.5 * panel_dims[0] + panel_buffer, - dpts[1, :] <= 0.5 * panel_dims[0] - panel_buffer, - ) - - onDetector = np.logical_and(xTest, yTest) - if multipleEnergyRanges: - validEnergy = np.zeros(len(wlen), dtype=bool) - for i in range(len(lmin)): - validEnergy = validEnergy | np.logical_and( - wlen >= lmin[i], wlen <= lmax[i] - ) - else: - validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) - - # index for valid reflections - keepers = np.where(np.logical_and(onDetector, validEnergy))[0] - - # assign output arrays - xy_det[iG][keepers, :] = dpts[:, keepers].T - hkls_in[iG][:, keepers] = dhkl[:, keepers] - angles[iG][keepers, :] = tth_eta[keepers, :] - dspacing[iG, keepers] = dsp[keepers] - energy[iG, keepers] = processWavelength(wlen[keepers]) - return xy_det, hkls_in, angles, dspacing, energy - - -@numba.njit(nogil=True, cache=True) -def _expand_pixels( - original: np.ndarray, w: float, h: float, result: np.ndarray -) -> np.ndarray: - hw = 0.5 * w - hh = 0.5 * h - for el in range(len(original)): - x, y = original[el, 0], original[el, 1] - result[el * 4 + 0, 0] = x - hw - result[el * 4 + 0, 1] = y - hh - result[el * 4 + 1, 0] = x + hw - result[el * 4 + 1, 1] = y - hh - result[el * 4 + 2, 0] = x + hw - result[el * 4 + 2, 1] = y + hh - result[el * 4 + 3, 0] = x - hw - result[el * 4 + 3, 1] = y + hh - - return result - - -@numba.njit(nogil=True, cache=True) -def _compute_max( - tth: np.ndarray, eta: np.ndarray, result: np.ndarray -) -> np.ndarray: - period = 2.0 * np.pi - hperiod = np.pi - for el in range(0, len(tth), 4): - max_tth = np.abs(tth[el + 0] - tth[el + 3]) - eta_diff = eta[el + 0] - eta[el + 3] - max_eta = np.abs(np.remainder(eta_diff + hperiod, period) - hperiod) - for i in range(3): - curr_tth = np.abs(tth[el + i] - tth[el + i + 1]) - eta_diff = eta[el + i] - eta[el + i + 1] - curr_eta = np.abs( - np.remainder(eta_diff + hperiod, period) - hperiod - ) - max_tth = np.maximum(curr_tth, max_tth) - max_eta = np.maximum(curr_eta, max_eta) - result[el // 4, 0] = max_tth - result[el // 4, 1] = max_eta - - return result - - -def angularPixelSize( - xy_det: np.ndarray, - xy_pixelPitch: tuple[float], - rMat_d: np.ndarray, - rMat_s: np.ndarray, +def _dvecToDetectorXYcylinder( + dVec_cs: np.ndarray, tVec_d: np.ndarray, - tVec_s: np.ndarray, - tVec_c: np.ndarray, - distortion: DistortionABC = None, - beamVec: np.ndarray = None, - etaVec: np.ndarray = None, -) -> np.ndarray: - """ - Calculate angular pixel sizes on a detector. - - * choices to beam vector and eta vector specs have been supressed - * assumes xy_det in UNWARPED configuration - """ - xy_det = np.atleast_2d(xy_det) - if distortion is not None: # !!! check this logic - xy_det = distortion.apply(xy_det) - if beamVec is None: - beamVec = constants.beam_vec - if etaVec is None: - etaVec = constants.eta_vec - - # Verify that rMat_s is only 2D (a single matrix). - # Arrays of matrices were previously provided, which `xy_to_gvec` - # cannot currently handle. - if rMat_s.ndim != 2: - msg = ( - f'rMat_s should have 2 dimensions, but has {rMat_s.ndim} ' - 'dimensions instead' - ) - raise ValueError(msg) - - xy_expanded = np.empty((len(xy_det) * 4, 2), dtype=xy_det.dtype) - xy_expanded = _expand_pixels( - xy_det, xy_pixelPitch[0], xy_pixelPitch[1], xy_expanded - ) - - rmat_b = xfcapi.make_beam_rmat(beamVec, etaVec) + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> tuple[np.ndarray, np.ndarray]: - gvec_space, _ = xfcapi.xy_to_gvec( - xy_expanded, - rMat_d, - rMat_s, + cvec = _unitvec_to_cylinder( + dVec_cs, + caxis, + paxis, + radius, tVec_d, - tVec_s, - tVec_c, - rmat_b=rmat_b, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, ) - result = np.empty_like(xy_det) - return _compute_max(gvec_space[0], gvec_space[1], result) - - -def make_reflection_patches( - instr_cfg: dict[str, Any], - tth_eta: np.ndarray, - ang_pixel_size: np.ndarray, - omega: Optional[np.ndarray] = None, - tth_tol: float = 0.2, - eta_tol: float = 1.0, - rmat_c: np.ndarray = np.eye(3), - tvec_c: np.ndarray = np.zeros((3, 1)), - npdiv: int = 1, - quiet: bool = False, # TODO: Remove this parameter - it isn't used - compute_areas_func: np.ndarray = gutil.compute_areas, -) -> Generator[ - tuple[ - np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray - ], - None, - None, -]: - """Make angular patches on a detector. - - panel_dims are [(xmin, ymin), (xmax, ymax)] in mm - - pixel_pitch is [row_size, column_size] in mm - - FIXME: DISTORTION HANDING IS STILL A KLUDGE!!! - - patches are: - - delta tth - d ------------- ... ------------- - e | x | x | x | ... | x | x | x | - l ------------- ... ------------- - t . - a . - . - e ------------- ... ------------- - t | x | x | x | ... | x | x | x | - a ------------- ... ------------- - - outputs are: - (tth_vtx, eta_vtx), - (x_vtx, y_vtx), - connectivity, - subpixel_areas, - (x_center, y_center), - (i_row, j_col) - """ - # detector quantities - rmat_d = xfcapi.make_rmat_of_expmap( - np.r_[instr_cfg['detector']['transform']['tilt']] + cvec_det, valid_mask = _clip_to_cylindrical_detector( + cvec, + tVec_d, + caxis, + paxis, + radius, + physical_size, + angle_extent, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, ) - tvec_d = np.r_[instr_cfg['detector']['transform']['translation']] - pixel_size = instr_cfg['detector']['pixels']['size'] - frame_nrows = instr_cfg['detector']['pixels']['rows'] - frame_ncols = instr_cfg['detector']['pixels']['columns'] - - panel_dims = ( - -0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]], - 0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]], - ) - row_edges = ( - np.arange(frame_nrows + 1)[::-1] * pixel_size[1] + panel_dims[0][1] + xy_det = _dewarp_from_cylinder( + cvec_det, + tVec_d, + caxis, + paxis, + radius, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, ) - col_edges = np.arange(frame_ncols + 1) * pixel_size[0] + panel_dims[0][0] - - # handle distortion - distortion = None - if distortion_key in instr_cfg['detector']: - distortion_cfg = instr_cfg['detector'][distortion_key] - if distortion_cfg is not None: - try: - func_name = distortion_cfg['function_name'] - dparams = distortion_cfg['parameters'] - distortion = distortion_pkg.get_mapping(func_name, dparams) - except KeyError: - raise RuntimeError("problem with distortion specification") - - # sample frame - chi = instr_cfg['oscillation_stage']['chi'] - tvec_s = np.r_[instr_cfg['oscillation_stage']['translation']] - bvec = np.r_[instr_cfg['beam']['vector']] - - # data to loop - # ??? WOULD IT BE CHEAPER TO CARRY ZEROS OR USE CONDITIONAL? - if omega is None: - full_angs = np.hstack([tth_eta, np.zeros((len(tth_eta), 1))]) - else: - full_angs = np.hstack([tth_eta, omega.reshape(len(tth_eta), 1)]) - - for angs, pix in zip(full_angs, ang_pixel_size): - # calculate bin edges for patch based on local angular pixel size - # tth - ntths, tth_edges = gutil.make_tolerance_grid( - bin_width=np.degrees(pix[0]), - window_width=tth_tol, - num_subdivisions=npdiv, - ) - - # eta - netas, eta_edges = gutil.make_tolerance_grid( - bin_width=np.degrees(pix[1]), - window_width=eta_tol, - num_subdivisions=npdiv, - ) - - # FOR ANGULAR MESH - conn = gutil.cellConnectivity(netas, ntths, origin='ll') - - # meshgrid args are (cols, rows), a.k.a (fast, slow) - m_tth, m_eta = np.meshgrid(tth_edges, eta_edges) - npts_patch = m_tth.size - - # calculate the patch XY coords from the (tth, eta) angles - # !!! will CHEAT and ignore the small perturbation the different - # omega angle values causes and simply use the central value - gVec_angs_vtx = np.tile(angs, (npts_patch, 1)) + np.radians( - np.vstack( - [m_tth.flatten(), m_eta.flatten(), np.zeros(npts_patch)] - ).T - ) - - xy_eval_vtx, _, _ = _project_on_detector_plane( - gVec_angs_vtx, - rmat_d, - rmat_c, - chi, - tvec_d, - tvec_c, - tvec_s, - distortion, - beamVec=bvec, - ) - - areas = compute_areas_func(xy_eval_vtx, conn) - - # EVALUATION POINTS - # !!! for lack of a better option will use centroids - tth_eta_cen = gutil.cellCentroids( - np.atleast_2d(gVec_angs_vtx[:, :2]), conn - ) - - gVec_angs = np.hstack( - [tth_eta_cen, np.tile(angs[2], (len(tth_eta_cen), 1))] - ) - - xy_eval, _, _ = _project_on_detector_plane( - gVec_angs, - rmat_d, - rmat_c, - chi, - tvec_d, - tvec_c, - tvec_s, - distortion, - beamVec=bvec, - ) - - row_indices = gutil.cellIndices(row_edges, xy_eval[:, 1]) - col_indices = gutil.cellIndices(col_edges, xy_eval[:, 0]) - - yield ( - ( - ( - gVec_angs_vtx[:, 0].reshape(m_tth.shape), - gVec_angs_vtx[:, 1].reshape(m_tth.shape), - ), - ( - xy_eval_vtx[:, 0].reshape(m_tth.shape), - xy_eval_vtx[:, 1].reshape(m_tth.shape), - ), - conn, - areas.reshape(netas, ntths), - ( - xy_eval[:, 0].reshape(netas, ntths), - xy_eval[:, 1].reshape(netas, ntths), - ), - ( - row_indices.reshape(netas, ntths), - col_indices.reshape(netas, ntths), - ), - ) - ) - - -def extract_detector_transformation( - detector_params: Union[dict[str, Any], np.ndarray], -) -> tuple[np.ndarray, np.ndarray, float, np.ndarray]: - """ - Construct arrays from detector parameters. - - goes from 10 vector of detector parames OR instrument config dictionary - (from YAML spec) to affine transformation arrays - - Parameters - ---------- - detector_params : TYPE - DESCRIPTION. - Returns - ------- - rMat_d : TYPE - DESCRIPTION. - tVec_d : TYPE - DESCRIPTION. - chi : TYPE - DESCRIPTION. - tVec_s : TYPE - DESCRIPTION. - - """ - # extract variables for convenience - if isinstance(detector_params, dict): - rMat_d = xfcapi.make_rmat_of_expmap( - np.array(detector_params['detector']['transform']['tilt']) - ) - tVec_d = np.r_[detector_params['detector']['transform']['translation']] - chi = detector_params['oscillation_stage']['chi'] - tVec_s = np.r_[detector_params['oscillation_stage']['translation']] - else: - assert len( - detector_params >= 10 - ), "list of detector parameters must have length >= 10" - rMat_d = xfcapi.make_rmat_of_expmap(detector_params[:3]) - tVec_d = np.ascontiguousarray(detector_params[3:6]) - chi = detector_params[6] - tVec_s = np.ascontiguousarray(detector_params[7:10]) - return rMat_d, tVec_d, chi, tVec_s + return xy_det, valid_mask diff --git a/hexrd/hedm/config/__init__.py b/hexrd/hedm/config/__init__.py index c46374847..5a51db4e0 100644 --- a/hexrd/hedm/config/__init__.py +++ b/hexrd/hedm/config/__init__.py @@ -2,12 +2,10 @@ import yaml -from . import root -from . import utils # The following were moved to core -from hexrd.core.config import config +from hexrd.core.config import config, root, utils from hexrd.core.config import material """ diff --git a/hexrd/hedm/config/dumper.py b/hexrd/hedm/config/dumper.py deleted file mode 100644 index 4067ed93c..000000000 --- a/hexrd/hedm/config/dumper.py +++ /dev/null @@ -1,64 +0,0 @@ -import yaml -import numpy as np -from pathlib import Path - - -def _dict_path_by_id(d, value, path=()): - if id(d) == value: - return path - elif isinstance(d, dict): - for k, v in d.items(): - p = _dict_path_by_id(v, value, path + (k,)) - if p is not None: - return p - elif isinstance(d, list): - for i, v in enumerate(d): - p = _dict_path_by_id(v, value, path + (str(i),)) - if p is not None: - return p - - return None - - -class NumPyIncludeDumper(yaml.Dumper): - """ - A yaml.Dumper implementation that will dump numpy.ndarray's. The arrays are - saved using numpy.save(...) in path generate from the values path in the - YAML document, relative to the location of the YAML document. For example - - "foo": - "bar": ndarray - - The ndarray would be saved in foo/bar.npy. - - """ - - def __init__(self, stream, **kwargs): - super().__init__(stream, **kwargs) - - self._basedir = Path(stream.name).parent - self._dct = None - - def ndarray_representer(self, data): - path = _dict_path_by_id(self._dct, id(data)) - path = Path(*path) - if path is None: - raise ValueError("Unable to determine array path.") - - array_path = self._basedir / path.with_suffix('.npy') - array_path.parent.mkdir(parents=True, exist_ok=True) - - np.save(array_path, data) - relative_array_path = array_path.relative_to(self._basedir) - - return self.represent_scalar('!include', str(relative_array_path)) - - # We need intercept the dict so we can lookup the paths to ndarray's - def represent(self, data): - self._dct = data - return super().represent(data) - - -NumPyIncludeDumper.add_representer( - np.ndarray, NumPyIncludeDumper.ndarray_representer -) diff --git a/hexrd/hedm/config/fitgrains.py b/hexrd/hedm/config/fitgrains.py index 432a01048..fe4ade161 100644 --- a/hexrd/hedm/config/fitgrains.py +++ b/hexrd/hedm/config/fitgrains.py @@ -2,7 +2,7 @@ import os from hexrd.core.config.config import Config -from .utils import get_exclusion_parameters +from hexrd.core.config.utils import get_exclusion_parameters logger = logging.getLogger('hexrd.config') diff --git a/hexrd/hedm/config/instrument.py b/hexrd/hedm/config/instrument.py deleted file mode 100644 index 35fbebe4b..000000000 --- a/hexrd/hedm/config/instrument.py +++ /dev/null @@ -1,63 +0,0 @@ -import h5py -import yaml - -from hexrd.core.config.config import Config -from .loader import NumPyIncludeLoader - -from hexrd.core import instrument - - -class Instrument(Config): - """Handle HEDM instrument config.""" - - def __init__(self, cfg, instr_file=None): - super().__init__(cfg) - self._configuration = instr_file - self._max_workers = self._cfg.multiprocessing - - # Note: instrument is instantiated with a yaml dictionary; use self - # to instantiate classes based on this one - @property - def configuration(self): - """Return the YAML config filename.""" - return self._configuration - - @property - def hedm(self): - """Return the HEDMInstrument class.""" - if not hasattr(self, '_hedm'): - if self.configuration is None: - raise RuntimeError("No instrument file was given") - - try: - icfg = h5py.File(self.configuration, 'r') - except OSError: - with open(self.configuration, 'r') as f: - icfg = yaml.load(f, Loader=NumPyIncludeLoader) - - kwargs = { - 'instrument_config': icfg, - 'max_workers': self._max_workers, - } - self._hedm = instrument.HEDMInstrument(**kwargs) - return self._hedm - - @hedm.setter - def hedm(self, icfg_fname): - """Set the HEDMInstrument class.""" - try: - icfg = h5py.File(icfg_fname, 'r') - except OSError: - with open(icfg_fname, 'r') as f: - icfg = yaml.load(f, Loader=NumPyIncludeLoader) - - kwargs = { - 'instrument_config': icfg, - 'max_workers': self._max_workers, - } - self._hedm = instrument.HEDMInstrument(**kwargs) - - @property - def detector_dict(self): - """Return dictionary of detectors.""" - return self.hedm.detectors diff --git a/hexrd/hedm/config/loader.py b/hexrd/hedm/config/loader.py deleted file mode 100644 index 4d378d859..000000000 --- a/hexrd/hedm/config/loader.py +++ /dev/null @@ -1,25 +0,0 @@ -import yaml -from pathlib import Path -import numpy as np - - -class NumPyIncludeLoader(yaml.SafeLoader): - """ - A yaml.Loader implemenation that allows !include . This - allows the loading of npy files into the YAML document. - """ - - def __init__(self, stream): - self._basedir = Path(stream.name).parent - - super(NumPyIncludeLoader, self).__init__(stream) - - def include(self, node): - file_path = self._basedir / self.construct_scalar(node) - - a = np.load(file_path) - - return a - - -NumPyIncludeLoader.add_constructor('!include', NumPyIncludeLoader.include) diff --git a/hexrd/hedm/config/root.py b/hexrd/hedm/config/root.py deleted file mode 100644 index c8254d8a9..000000000 --- a/hexrd/hedm/config/root.py +++ /dev/null @@ -1,204 +0,0 @@ -import os -from pathlib import Path -import logging -import multiprocessing as mp - -from hexrd.core.constants import shared_ims_key -from hexrd.core import imageseries - -from hexrd.core.config.config import Config -from .instrument import Instrument -from .findorientations import FindOrientationsConfig -from .fitgrains import FitGrainsConfig -from hexrd.core.config.material import MaterialConfig - -logger = logging.getLogger('hexrd.config') - - -class RootConfig(Config): - - @property - def working_dir(self): - """Working directory, either specified in file or current directory - - If the directory is not specified in the config file, then it will - default to the current working directory. If it is specified, the - directory must exist, or it will throw an IOError. - """ - wdir = Path(self.get('working_dir', default=Path.cwd())) - if not wdir.exists(): - raise IOError(f'"working_dir": {str(wdir)} does not exist') - return wdir - - @working_dir.setter - def working_dir(self, val): - val = Path(val) - if not val.is_dir(): - raise IOError('"working_dir": "%s" does not exist' % str(val)) - self.set('working_dir', val) - - @property - def analysis_name(self): - """Name of the analysis - - This will be used to set up the output directory. The name can - contain slash ("/") characters, which will generate a subdirectory - structure in the `analysis_dir`. - """ - return str(self.get('analysis_name', default='analysis')) - - @analysis_name.setter - def analysis_name(self, val): - self.set('analysis_name', val) - - @property - def analysis_dir(self): - """Analysis directory, where output files go - - The name is derived from `working_dir` and `analysis_name`. This - property returns a Path object. The directory and any intermediate - directories can be created with the `mkdir()` method, e.g. - - >>> analysis_dir.mkdir(parents=True, exist_ok=True) - """ - adir = Path(self.working_dir) / self.analysis_name - return adir - - @property - def analysis_id(self): - return '_'.join( - [ - self.analysis_name.strip().replace(' ', '-'), - self.material.active.strip().replace(' ', '-'), - ] - ) - - @property - def new_file_placement(self): - """Use new file placements for find-orientations and fit-grains - - The new file placement rules put several files in the `analysis_dir` - instead of the `working_dir`. - """ - return self.get('new_file_placement', default=False) - - @property - def find_orientations(self): - return FindOrientationsConfig(self) - - @property - def fit_grains(self): - if not hasattr(self, "_fitgrain_config"): - self._fitgrain_config = FitGrainsConfig(self) - return self._fitgrain_config - - @property - def instrument(self): - if not hasattr(self, '_instr_config'): - instr_file = self.get('instrument', None) - if instr_file is not None: - instr_file = self.check_filename(instr_file, self.working_dir) - self._instr_config = Instrument(self, instr_file) - return self._instr_config - - @instrument.setter - def instrument(self, instr_config): - self._instr_config = instr_config - - @property - def material(self): - if not hasattr(self, '_material_config'): - self._material_config = MaterialConfig(self) - - if self.instrument.configuration is not None: - # !!! must make matl beam energy consistent with the instrument - beam_energy = self.instrument.hedm.beam_energy - self._material_config.beam_energy = beam_energy - - return self._material_config - - @material.setter - def material(self, material_config): - self._material_config = material_config - - @property - def multiprocessing(self): - # determine number of processes to run in parallel - multiproc = self.get('multiprocessing', default=-1) - ncpus = mp.cpu_count() - if multiproc == 'all': - res = ncpus - elif multiproc == 'half': - temp = ncpus // 2 - res = temp if temp else 1 - elif isinstance(multiproc, int): - if multiproc >= 0: - if multiproc > ncpus: - logger.warning( - 'Resuested %s processes, %d available', - multiproc, - ncpus, - ) - res = ncpus - else: - res = multiproc if multiproc else 1 - else: - temp = ncpus + multiproc - if temp < 1: - logger.warning( - 'Cannot use less than 1 process, requested %d of %d', - temp, - ncpus, - ) - res = 1 - else: - res = temp - else: - temp = ncpus - 1 - logger.warning("Invalid value %s for multiprocessing", multiproc) - res = temp - return res - - @multiprocessing.setter - def multiprocessing(self, val): - isint = isinstance(val, int) - if val in ('half', 'all', -1): - self.set('multiprocessing', val) - elif isint and val >= 0 and val <= mp.cpu_count(): - self.set('multiprocessing', int(val)) - else: - raise RuntimeError( - '"multiprocessing": must be 1:%d, got %s' - % (mp.cpu_count(), val) - ) - - @property - def image_series(self): - """Return the imageseries dictionary.""" - if not hasattr(self, '_image_dict'): - self._image_dict = dict() - fmt = self.get('image_series:format') - imsdata = self.get('image_series:data') - for ispec in imsdata: - fname = self.check_filename(ispec['file'], self.working_dir) - args = ispec['args'] - ims = imageseries.open(fname, fmt, **args) - oms = imageseries.omega.OmegaImageSeries(ims) - try: - panel = ispec['panel'] - if isinstance(panel, (tuple, list)): - panel = '_'.join(panel) - elif panel is None: - panel = shared_ims_key - except KeyError: - try: - panel = oms.metadata['panel'] - except KeyError: - panel = shared_ims_key - self._image_dict[panel] = oms - - return self._image_dict - - @image_series.setter - def image_series(self, ims_dict): - self._image_dict = ims_dict diff --git a/hexrd/hedm/config/utils.py b/hexrd/hedm/config/utils.py deleted file mode 100644 index 4733a6efe..000000000 --- a/hexrd/hedm/config/utils.py +++ /dev/null @@ -1,82 +0,0 @@ -from collections import namedtuple -import copy -import warnings - - -ExclusionParameters = namedtuple( - 'ExclusionParameters', - [ - "dmin", - "dmax", - "tthmin", - "tthmax", - "sfacmin", - "sfacmax", - "pintmin", - "pintmax", - ], -) - - -class Null: - pass - - -null = Null() - - -def merge_dicts(a, b): - """Return a merged dict, updating values from `a` with values from `b`.""" - # need to pass a deep copy of a at the top level only: - return _merge_dicts(copy.deepcopy(a), b) - - -def _merge_dicts(a, b): - for k, v in b.items(): - if isinstance(v, dict): - if a.get(k) is None: - # happens in cases where all but section head is commented - a[k] = {} - _merge_dicts(a[k], v) - else: - if v is None and a.get(k) is not None: - # entire section commented out. Inherit, don't overwrite - pass - else: - a[k] = v - return a - - -def get_exclusion_parameters(cfg, prefix): - """Return flag use saved parameters and exclusion parameters""" - # - yaml_key = lambda s: ":".join((prefix, s)) - # - # Check for value from old spec for "sfacmin"; use that if it is given, - # but if the new spec is also there, it will override. Likewise for - # "tth_max", as used in fit_grains. - # -- Should add a deprecated warning if min_sfac_ratio is used - # - sfmin_dflt = cfg.get(yaml_key("min_sfac_ratio"), None) - if sfmin_dflt is not None: - warnings.warn( - '"min_sfac_ratio" is deprecated, use "sfacmin" instead', - DeprecationWarning, - ) - # Default for reset_exclusions is True so that old config files will - # produce the same behavior. - reset_exclusions = cfg.get(yaml_key("reset_exclusions"), True) - - return ( - reset_exclusions, - ExclusionParameters( - dmin=cfg.get(yaml_key("dmin"), None), - dmax=cfg.get(yaml_key("dmax"), None), - tthmin=cfg.get(yaml_key("tthmin"), None), - tthmax=cfg.get(yaml_key("tthmax"), None), - sfacmin=cfg.get(yaml_key("sfacmin"), sfmin_dflt), - sfacmax=cfg.get(yaml_key("sfacmax"), None), - pintmin=cfg.get(yaml_key("pintmin"), None), - pintmax=cfg.get(yaml_key("pintmax"), None), - ), - ) diff --git a/hexrd/hedm/instrument/__init__.py b/hexrd/hedm/instrument/__init__.py index b5414013c..024003040 100644 --- a/hexrd/hedm/instrument/__init__.py +++ b/hexrd/hedm/instrument/__init__.py @@ -1,4 +1,4 @@ -from .hedm_instrument import ( +from hexrd.core.instrument.hedm_instrument import ( calc_angles_from_beam_vec, calc_beam_vec, centers_of_edge_vec, @@ -10,4 +10,4 @@ unwrap_dict_to_h5, unwrap_h5_to_dict, ) -from .detector import Detector +from hexrd.core.instrument.detector import Detector diff --git a/hexrd/hedm/instrument/detector.py b/hexrd/hedm/instrument/detector.py deleted file mode 100644 index 858fa21d7..000000000 --- a/hexrd/hedm/instrument/detector.py +++ /dev/null @@ -1,2128 +0,0 @@ -from abc import abstractmethod -import copy -import os -from typing import Optional - -from hexrd.core.instrument.constants import ( - COATING_DEFAULT, - FILTER_DEFAULTS, - PHOSPHOR_DEFAULT, -) -from hexrd.core.instrument.physics_package import AbstractPhysicsPackage -import numpy as np -import numba - -from hexrd.core import constants as ct -from hexrd.core import distortion as distortion_pkg -from hexrd.core import matrixutil as mutil - -# TODO: Resolve extra-core-dependency -from hexrd.hedm import xrdutil -from hexrd.core.rotations import mapAngle - -from hexrd.core.material import crystallography -from hexrd.core.material.crystallography import PlaneData - -from hexrd.core.transforms.xfcapi import ( - xy_to_gvec, - gvec_to_xy, - make_beam_rmat, - make_rmat_of_expmap, - oscill_angles_of_hkls, - angles_to_dvec, -) - -from hexrd.core.utils.decorators import memoize -from hexrd.core.gridutil import cellIndices -from hexrd.core.instrument import detector_coatings -from hexrd.core.material.utils import ( - calculate_linear_absorption_length, - calculate_incoherent_scattering, -) - -distortion_registry = distortion_pkg.Registry() - -max_workers_DFLT = max(1, os.cpu_count() - 1) - -beam_energy_DFLT = 65.351 - -# Memoize these, so each detector can avoid re-computing if nothing -# has changed. -_lorentz_factor = memoize(crystallography.lorentz_factor) -_polarization_factor = memoize(crystallography.polarization_factor) - - -class Detector: - """ - Base class for 2D detectors with functions and properties - common to planar and cylindrical detectors. This class - will be inherited by both those classes. - """ - - __pixelPitchUnit = 'mm' - - # Abstract methods that must be redefined in derived classes - @property - @abstractmethod - def detector_type(self): - raise NotImplementedError - - @abstractmethod - def cart_to_angles( - self, - xy_data, - rmat_s=None, - tvec_s=None, - tvec_c=None, - apply_distortion=False, - ): - """ - Transform cartesian coordinates to angular. - - Parameters - ---------- - xy_data : TYPE - The (n, 2) array of n (x, y) coordinates to be transformed in - either the raw or ideal cartesian plane (see `apply_distortion` - kwarg below). - rmat_s : array_like, optional - The (3, 3) COB matrix for the sample frame. The default is None. - tvec_s : array_like, optional - The (3, ) translation vector for the sample frame. - The default is None. - tvec_c : array_like, optional - The (3, ) translation vector for the crystal frame. - The default is None. - apply_distortion : bool, optional - If True, apply distortion to the inpout cartesian coordinates. - The default is False. - - Returns - ------- - tth_eta : TYPE - DESCRIPTION. - g_vec : TYPE - DESCRIPTION. - - """ - raise NotImplementedError - - @abstractmethod - def angles_to_cart( - self, - tth_eta, - rmat_s=None, - tvec_s=None, - rmat_c=None, - tvec_c=None, - apply_distortion=False, - ): - """ - Transform angular coordinates to cartesian. - - Parameters - ---------- - tth_eta : array_like - The (n, 2) array of n (tth, eta) coordinates to be transformed. - rmat_s : array_like, optional - The (3, 3) COB matrix for the sample frame. The default is None. - tvec_s : array_like, optional - The (3, ) translation vector for the sample frame. - The default is None. - rmat_c : array_like, optional - (3, 3) COB matrix for the crystal frame. - The default is None. - tvec_c : array_like, optional - The (3, ) translation vector for the crystal frame. - The default is None. - apply_distortion : bool, optional - If True, apply distortion to take cartesian coordinates to the - "warped" configuration. The default is False. - - Returns - ------- - xy_det : array_like - The (n, 2) array on the n input coordinates in the . - - """ - raise NotImplementedError - - @abstractmethod - def cart_to_dvecs(self, xy_data): - """Convert cartesian coordinates to dvectors""" - raise NotImplementedError - - @abstractmethod - def pixel_angles(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def pixel_tth_gradient(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def pixel_eta_gradient(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def calc_filter_coating_transmission(self, energy): - pass - - @property - @abstractmethod - def beam_position(self): - """ - returns the coordinates of the beam in the cartesian detector - frame {Xd, Yd, Zd}. NaNs if no intersection. - """ - raise NotImplementedError - - @property - def extra_config_kwargs(self): - return {} - - # End of abstract methods - - def __init__( - self, - rows=2048, - cols=2048, - pixel_size=(0.2, 0.2), - tvec=np.r_[0.0, 0.0, -1000.0], - tilt=ct.zeros_3, - name='default', - bvec=ct.beam_vec, - xrs_dist=None, - evec=ct.eta_vec, - saturation_level=None, - panel_buffer=None, - tth_distortion=None, - roi=None, - group=None, - distortion=None, - max_workers=max_workers_DFLT, - detector_filter: Optional[detector_coatings.Filter] = None, - detector_coating: Optional[detector_coatings.Coating] = None, - phosphor: Optional[detector_coatings.Phosphor] = None, - ): - """ - Instantiate a PlanarDetector object. - - Parameters - ---------- - rows : TYPE, optional - DESCRIPTION. The default is 2048. - cols : TYPE, optional - DESCRIPTION. The default is 2048. - pixel_size : TYPE, optional - DESCRIPTION. The default is (0.2, 0.2). - tvec : TYPE, optional - DESCRIPTION. The default is np.r_[0., 0., -1000.]. - tilt : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - name : TYPE, optional - DESCRIPTION. The default is 'default'. - bvec : TYPE, optional - DESCRIPTION. The default is ct.beam_vec. - evec : TYPE, optional - DESCRIPTION. The default is ct.eta_vec. - saturation_level : TYPE, optional - DESCRIPTION. The default is None. - panel_buffer : TYPE, optional - If a scalar or len(2) array_like, the interpretation is a border - in mm. If an array with shape (nrows, ncols), interpretation is a - boolean with True marking valid pixels. The default is None. - roi : TYPE, optional - DESCRIPTION. The default is None. - group : TYPE, optional - DESCRIPTION. The default is None. - distortion : TYPE, optional - DESCRIPTION. The default is None. - detector_filter : detector_coatings.Filter, optional - filter specifications including material type, - density and thickness. Used for absorption correction - calculations. - detector_coating : detector_coatings.Coating, optional - coating specifications including material type, - density and thickness. Used for absorption correction - calculations. - phosphor : detector_coatings.Phosphor, optional - phosphor specifications including material type, - density and thickness. Used for absorption correction - calculations. - - Returns - ------- - None. - - """ - self._name = name - - self._rows = rows - self._cols = cols - - self._pixel_size_row = pixel_size[0] - self._pixel_size_col = pixel_size[1] - - self._saturation_level = saturation_level - - self._panel_buffer = panel_buffer - - self._tth_distortion = tth_distortion - - if roi is None: - self._roi = roi - else: - assert len(roi) == 2, "roi is set via (start_row, start_col)" - self._roi = ( - (roi[0], roi[0] + self._rows), - (roi[1], roi[1] + self._cols), - ) - - self._tvec = np.array(tvec).flatten() - self._tilt = np.array(tilt).flatten() - - self._bvec = np.array(bvec).flatten() - self._xrs_dist = xrs_dist - - self._evec = np.array(evec).flatten() - - self._distortion = distortion - - self.max_workers = max_workers - - self.group = group - - if detector_filter is None: - detector_filter = detector_coatings.Filter( - **FILTER_DEFAULTS.TARDIS - ) - self.filter = detector_filter - - if detector_coating is None: - detector_coating = detector_coatings.Coating(**COATING_DEFAULT) - self.coating = detector_coating - - if phosphor is None: - phosphor = detector_coatings.Phosphor(**PHOSPHOR_DEFAULT) - self.phosphor = phosphor - - # detector ID - @property - def name(self): - return self._name - - @name.setter - def name(self, s): - assert isinstance(s, str), "requires string input" - self._name = s - - @property - def lmfit_name(self): - # lmfit requires underscores instead of dashes - return self.name.replace('-', '_') - - # properties for physical size of rectangular detector - @property - def rows(self): - return self._rows - - @rows.setter - def rows(self, x): - assert isinstance(x, int) - self._rows = x - - @property - def cols(self): - return self._cols - - @cols.setter - def cols(self, x): - assert isinstance(x, int) - self._cols = x - - @property - def pixel_size_row(self): - return self._pixel_size_row - - @pixel_size_row.setter - def pixel_size_row(self, x): - self._pixel_size_row = float(x) - - @property - def pixel_size_col(self): - return self._pixel_size_col - - @pixel_size_col.setter - def pixel_size_col(self, x): - self._pixel_size_col = float(x) - - @property - def pixel_area(self): - return self.pixel_size_row * self.pixel_size_col - - @property - def saturation_level(self): - return self._saturation_level - - @saturation_level.setter - def saturation_level(self, x): - if x is not None: - assert np.isreal(x) - self._saturation_level = x - - @property - def panel_buffer(self): - return self._panel_buffer - - @panel_buffer.setter - def panel_buffer(self, x): - """if not None, a buffer in mm (x, y)""" - if x is not None: - assert len(x) == 2 or x.ndim == 2 - self._panel_buffer = x - - @property - def tth_distortion(self): - return self._tth_distortion - - @tth_distortion.setter - def tth_distortion(self, x): - """if not None, a buffer in mm (x, y)""" - if x is not None: - assert x.ndim == 2 and x.shape == self.shape - self._tth_distortion = x - - @property - def roi(self): - return self._roi - - @roi.setter - def roi(self, vertex_array): - """ - !!! vertex array must be (r0, c0) - """ - if vertex_array is not None: - assert ( - len(vertex_array) == 2 - ), "roi is set via (start_row, start_col)" - self._roi = ( - (vertex_array[0], vertex_array[0] + self.rows), - (vertex_array[1], vertex_array[1] + self.cols), - ) - - @property - def row_dim(self): - return self.rows * self.pixel_size_row - - @property - def col_dim(self): - return self.cols * self.pixel_size_col - - @property - def row_pixel_vec(self): - return self.pixel_size_row * ( - 0.5 * (self.rows - 1) - np.arange(self.rows) - ) - - @property - def row_edge_vec(self): - return _row_edge_vec(self.rows, self.pixel_size_row) - - @property - def col_pixel_vec(self): - return self.pixel_size_col * ( - np.arange(self.cols) - 0.5 * (self.cols - 1) - ) - - @property - def col_edge_vec(self): - return _col_edge_vec(self.cols, self.pixel_size_col) - - @property - def corner_ul(self): - return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] - - @property - def corner_ll(self): - return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim] - - @property - def corner_lr(self): - return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim] - - @property - def corner_ur(self): - return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] - - @property - def shape(self): - return (self.rows, self.cols) - - @property - def tvec(self): - return self._tvec - - @tvec.setter - def tvec(self, x): - x = np.array(x).flatten() - assert len(x) == 3, 'input must have length = 3' - self._tvec = x - - @property - def tilt(self): - return self._tilt - - @tilt.setter - def tilt(self, x): - assert len(x) == 3, 'input must have length = 3' - self._tilt = np.array(x).squeeze() - - @property - def bvec(self): - return self._bvec - - @bvec.setter - def bvec(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._bvec = x - - @property - def xrs_dist(self): - return self._xrs_dist - - @xrs_dist.setter - def xrs_dist(self, x): - assert x is None or np.isscalar( - x - ), f"'source_distance' must be None or scalar; you input '{x}'" - self._xrs_dist = x - - @property - def evec(self): - return self._evec - - @evec.setter - def evec(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._evec = x - - @property - def distortion(self): - return self._distortion - - @distortion.setter - def distortion(self, x): - if x is not None: - registry = distortion_registry.distortion_registry - check_arg = np.zeros(len(registry), dtype=bool) - for i, dcls in enumerate(registry.values()): - check_arg[i] = isinstance(x, dcls) - assert np.any(check_arg), 'input distortion is not in registry!' - self._distortion = x - - @property - def rmat(self): - return make_rmat_of_expmap(self.tilt) - - @property - def normal(self): - return self.rmat[:, 2] - - # ...memoize??? - @property - def pixel_coords(self): - pix_i, pix_j = np.meshgrid( - self.row_pixel_vec, self.col_pixel_vec, indexing='ij' - ) - return pix_i, pix_j - - # ========================================================================= - # METHODS - # ========================================================================= - - def pixel_Q( - self, energy: np.floating, origin: np.ndarray = ct.zeros_3 - ) -> np.ndarray: - '''get the equivalent momentum transfer - for the angles. - - Parameters - ---------- - energy: float - incident photon energy in keV - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise Q in A^-1 - - ''' - lam = ct.keVToAngstrom(energy) - tth, _ = self.pixel_angles(origin=origin) - return 4.0 * np.pi * np.sin(tth * 0.5) / lam - - def pixel_compton_energy_loss( - self, - energy: np.floating, - origin: np.ndarray = ct.zeros_3, - ) -> np.ndarray: - '''inelastic compton scattering leads - to energy loss of the incident photons. - compute the final energy of the photons - for each pixel. - - Parameters - ---------- - energy: float - incident photon energy in keV - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise energy of inelastically - scatterd photons in keV - ''' - energy = np.asarray(energy) - tth, _ = self.pixel_angles() - ang_fact = 1 - np.cos(tth) - beta = energy / ct.cRestmasskeV - return energy / (1 + beta * ang_fact) - - def pixel_compton_attenuation_length( - self, - energy: np.floating, - density: np.floating, - formula: str, - origin: np.ndarray = ct.zeros_3, - ) -> np.ndarray: - '''each pixel intercepts inelastically - scattered photons of different energy. - the attenuation length and the transmission - for these photons are different. this function - calculate attenuatin length for each pixel - on the detector. - - Parameters - ---------- - energy: float - incident photon energy in keV - density: float - density of material in g/cc - formula: str - formula of the material scattering - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise attentuation length of compton - scattered photons - ''' - pixel_energy = self.pixel_compton_energy_loss(energy) - - pixel_attenuation_length = calculate_linear_absorption_length( - density, - formula, - pixel_energy.flatten(), - ) - return pixel_attenuation_length.reshape(self.shape) - - def compute_compton_scattering_intensity( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - origin: np.array = ct.zeros_3, - ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - '''compute the theoretical compton scattering - signal on the detector. this value is corrected - for the transmission of compton scattered photons - and normlaized before getting subtracting from the - raw intensity - - Parameters - ----------- - energy: float - energy of incident photon - rMat_s: np.ndarray - rotation matrix of sample orientation - physics_package: AbstractPhysicsPackage - physics package information - Returns - ------- - compton_intensity: np.ndarray - transmission corrected compton scattering - intensity - ''' - - q = self.pixel_Q(energy) - inc_s = calculate_incoherent_scattering( - physics_package.sample_material, q.flatten() - ).reshape(self.shape) - - inc_w = calculate_incoherent_scattering( - physics_package.window_material, q.flatten() - ).reshape(self.shape) - - t_s = self.calc_compton_physics_package_transmission( - energy, rMat_s, physics_package - ) - - t_w = self.calc_compton_window_transmission( - energy, rMat_s, physics_package - ) - - return inc_s * t_s + inc_w * t_w, t_s, t_w - - def polarization_factor(self, f_hor, f_vert, unpolarized=False): - """ - Calculated the polarization factor for every pixel. - - Parameters - ---------- - f_hor : float - the fraction of horizontal polarization. for XFELs - this is close to 1. - f_vert : TYPE - the fraction of vertical polarization, which is ~0 for XFELs. - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - s = f_hor + f_vert - if np.abs(s - 1) > ct.sqrt_epsf: - msg = ( - "sum of fraction of " - "horizontal and vertical polarizations " - "must be equal to 1." - ) - raise RuntimeError(msg) - - if f_hor < 0 or f_vert < 0: - msg = ( - "fraction of polarization in horizontal " - "or vertical directions can't be negative." - ) - raise RuntimeError(msg) - - tth, eta = self.pixel_angles() - kwargs = { - 'tth': tth, - 'eta': eta, - 'f_hor': f_hor, - 'f_vert': f_vert, - 'unpolarized': unpolarized, - } - - return _polarization_factor(**kwargs) - - def lorentz_factor(self): - """ - calculate the lorentz factor for every pixel - - Parameters - ---------- - None - - Raises - ------ - None - - Returns - ------- - numpy.ndarray - returns an array the same size as the detector panel - with each element containg the lorentz factor of the - corresponding pixel - """ - tth, eta = self.pixel_angles() - return _lorentz_factor(tth) - - def config_dict( - self, - chi=0, - tvec=ct.zeros_3, - beam_energy=beam_energy_DFLT, - beam_vector=ct.beam_vec, - sat_level=None, - panel_buffer=None, - style='yaml', - ): - """ - Return a dictionary of detector parameters. - - Optional instrument level parameters. This is a convenience function - to work with the APIs in several functions in xrdutil. - - Parameters - ---------- - chi : float, optional - DESCRIPTION. The default is 0. - tvec : array_like (3,), optional - DESCRIPTION. The default is ct.zeros_3. - beam_energy : float, optional - DESCRIPTION. The default is beam_energy_DFLT. - beam_vector : aray_like (3,), optional - DESCRIPTION. The default is ct.beam_vec. - sat_level : scalar, optional - DESCRIPTION. The default is None. - panel_buffer : scalar, array_like (2,), optional - DESCRIPTION. The default is None. - - Returns - ------- - config_dict : dict - DESCRIPTION. - - """ - assert style.lower() in ['yaml', 'hdf5'], ( - "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style - ) - - config_dict = {} - - # ===================================================================== - # DETECTOR PARAMETERS - # ===================================================================== - # transform and pixels - # - # assign local vars; listify if necessary - tilt = self.tilt - translation = self.tvec - roi = ( - None - if self.roi is None - else np.array([self.roi[0][0], self.roi[1][0]]).flatten() - ) - if style.lower() == 'yaml': - tilt = tilt.tolist() - translation = translation.tolist() - tvec = tvec.tolist() - roi = None if roi is None else roi.tolist() - - det_dict = dict( - detector_type=self.detector_type, - transform=dict( - tilt=tilt, - translation=translation, - ), - pixels=dict( - rows=int(self.rows), - columns=int(self.cols), - size=[float(self.pixel_size_row), float(self.pixel_size_col)], - ), - ) - - if roi is not None: - # Only add roi if it is not None - det_dict['pixels']['roi'] = roi - - if self.group is not None: - # Only add group if it is not None - det_dict['group'] = self.group - - # distortion - if self.distortion is not None: - dparams = self.distortion.params - if style.lower() == 'yaml': - dparams = dparams.tolist() - dist_d = dict( - function_name=self.distortion.maptype, parameters=dparams - ) - det_dict['distortion'] = dist_d - - # saturation level - if sat_level is None: - sat_level = self.saturation_level - det_dict['saturation_level'] = float(sat_level) - - # panel buffer - if panel_buffer is None: - # could be none, a 2-element list, or a 2-d array (rows, cols) - panel_buffer = copy.deepcopy(self.panel_buffer) - # !!! now we have to do some style-dependent munging of panel_buffer - if isinstance(panel_buffer, np.ndarray): - if panel_buffer.ndim == 1: - assert len(panel_buffer) == 2, "length of 1-d buffer must be 2" - # if here is a 2-element array - if style.lower() == 'yaml': - panel_buffer = panel_buffer.tolist() - elif panel_buffer.ndim == 2: - if style.lower() == 'yaml': - # !!! can't practically write array-like buffers to YAML - # so forced to clobber - print("clobbering panel buffer array in yaml-ready output") - panel_buffer = [0.0, 0.0] - else: - raise RuntimeError( - "panel buffer ndim must be 1 or 2; you specified %d" - % panel_buffer.ndmin - ) - elif panel_buffer is None: - # still None on self - # !!! this gets handled by unwrap_dict_to_h5 now - - # if style.lower() == 'hdf5': - # # !!! can't write None to hdf5; substitute with zeros - # panel_buffer = np.r_[0., 0.] - pass - det_dict['buffer'] = panel_buffer - - det_dict.update(self.extra_config_kwargs) - - # ===================================================================== - # SAMPLE STAGE PARAMETERS - # ===================================================================== - stage_dict = dict(chi=chi, translation=tvec) - - # ===================================================================== - # BEAM PARAMETERS - # ===================================================================== - # !!! make_reflection_patches is still using the vector - # azim, pola = calc_angles_from_beam_vec(beam_vector) - # beam_dict = dict( - # energy=beam_energy, - # vector=dict( - # azimuth=azim, - # polar_angle=pola - # ) - # ) - beam_dict = dict(energy=beam_energy, vector=beam_vector) - - config_dict['detector'] = det_dict - config_dict['oscillation_stage'] = stage_dict - config_dict['beam'] = beam_dict - - return config_dict - - def cartToPixel(self, xy_det, pixels=False, apply_distortion=False): - """ - Coverts cartesian coordinates to pixel coordinates - - Parameters - ---------- - xy_det : array_like - The (n, 2) vstacked array of (x, y) pairs in the reference - cartesian frame (possibly subject to distortion). - pixels : bool, optional - If True, return discrete pixel indices; otherwise fractional pixel - coordinates are returned. The default is False. - apply_distortion : bool, optional - If True, apply self.distortion to the input (if applicable). - The default is False. - - Returns - ------- - ij_det : array_like - The (n, 2) array of vstacked (i, j) coordinates in the pixel - reference frame where i is the (slow) row dimension and j is the - (fast) column dimension. - - """ - xy_det = np.atleast_2d(xy_det) - if apply_distortion and self.distortion is not None: - xy_det = self.distortion.apply(xy_det) - - npts = len(xy_det) - - tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1)) - i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5 - j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5 - - ij_det = np.vstack([i_pix, j_pix]).T - if pixels: - # Hide any runtime warnings in this conversion. Their output values - # will certainly be off the detector, which is fine. - with np.errstate(invalid='ignore'): - ij_det = np.array(np.round(ij_det), dtype=int) - - return ij_det - - def pixelToCart(self, ij_det): - """ - Convert vstacked array or list of [i,j] pixel indices - (or UL corner-based points) and convert to (x,y) in the - cartesian frame {Xd, Yd, Zd} - """ - ij_det = np.atleast_2d(ij_det) - - x = (ij_det[:, 1] + 0.5) * self.pixel_size_col + self.corner_ll[0] - y = ( - self.rows - ij_det[:, 0] - 0.5 - ) * self.pixel_size_row + self.corner_ll[1] - return np.vstack([x, y]).T - - def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): - """ - Notes - ----- - !!! assumes xy are in raw (distorted) frame, if applicable - """ - # munge kwargs - if rMat_s is None: - rMat_s = ct.identity_3x3 - if tVec_s is None: - tVec_s = ct.zeros_3x1 - if tVec_c is None: - tVec_c = ct.zeros_3x1 - - # FIXME: perhaps not necessary, but safe... - xy = np.atleast_2d(xy) - - ''' - # --------------------------------------------------------------------- - # TODO: needs testing and memoized gradient arrays! - # --------------------------------------------------------------------- - # need origin arg - origin = np.dot(rMat_s, tVec_c).flatten() + tVec_s.flatten() - - # get pixel indices - i_crds = cellIndices(self.row_edge_vec, xy[:, 1]) - j_crds = cellIndices(self.col_edge_vec, xy[:, 0]) - - ptth_grad = self.pixel_tth_gradient(origin=origin)[i_crds, j_crds] - peta_grad = self.pixel_eta_gradient(origin=origin)[i_crds, j_crds] - - return np.vstack([ptth_grad, peta_grad]).T - ''' - # call xrdutil function - ang_ps = xrdutil.angularPixelSize( - xy, - (self.pixel_size_row, self.pixel_size_col), - self.rmat, - rMat_s, - self.tvec, - tVec_s, - tVec_c, - distortion=self.distortion, - beamVec=self.bvec, - etaVec=self.evec, - ) - return ang_ps - - def clip_to_panel(self, xy, buffer_edges=True): - """ - if self.roi is not None, uses it by default - - TODO: check if need shape kwarg - TODO: optimize ROI search better than list comprehension below - TODO: panel_buffer can be a 2-d boolean mask, but needs testing - - """ - xy = np.atleast_2d(xy) - - ''' - # !!! THIS LOGIC IS OBSOLETE - if self.roi is not None: - ij_crds = self.cartToPixel(xy, pixels=True) - ii, jj = polygon(self.roi[:, 0], self.roi[:, 1], - shape=(self.rows, self.cols)) - on_panel_rows = [i in ii for i in ij_crds[:, 0]] - on_panel_cols = [j in jj for j in ij_crds[:, 1]] - on_panel = np.logical_and(on_panel_rows, on_panel_cols) - else: - ''' - xlim = 0.5 * self.col_dim - ylim = 0.5 * self.row_dim - if buffer_edges and self.panel_buffer is not None: - if self.panel_buffer.ndim == 2: - pix = self.cartToPixel(xy, pixels=True) - - roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows) - coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols) - - idx = np.logical_or(roff, coff) - - on_panel = np.full(pix.shape[0], False) - valid_pix = pix[~idx, :] - on_panel[~idx] = self.panel_buffer[ - valid_pix[:, 0], valid_pix[:, 1] - ] - else: - xlim -= self.panel_buffer[0] - ylim -= self.panel_buffer[1] - on_panel_x = np.logical_and( - xy[:, 0] >= -xlim, xy[:, 0] <= xlim - ) - on_panel_y = np.logical_and( - xy[:, 1] >= -ylim, xy[:, 1] <= ylim - ) - on_panel = np.logical_and(on_panel_x, on_panel_y) - elif not buffer_edges or self.panel_buffer is None: - on_panel_x = np.logical_and(xy[:, 0] >= -xlim, xy[:, 0] <= xlim) - on_panel_y = np.logical_and(xy[:, 1] >= -ylim, xy[:, 1] <= ylim) - on_panel = np.logical_and(on_panel_x, on_panel_y) - return xy[on_panel, :], on_panel - - def interpolate_nearest(self, xy, img, pad_with_nans=True): - """ - TODO: revisit normalization in here? - - """ - is_2d = img.ndim == 2 - right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert ( - is_2d and right_shape - ), "input image must be 2-d with shape (%d, %d)" % ( - self.rows, - self.cols, - ) - - # initialize output with nans - if pad_with_nans: - int_xy = np.nan * np.ones(len(xy)) - else: - int_xy = np.zeros(len(xy)) - - # clip away points too close to or off the edges of the detector - xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) - - # get pixel indices of clipped points - i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) - j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) - - # next interpolate across cols - int_vals = img[i_src, j_src] - int_xy[on_panel] = int_vals - return int_xy - - def interpolate_bilinear( - self, - xy, - img, - pad_with_nans=True, - clip_to_panel=True, - on_panel: Optional[np.ndarray] = None, - ): - """ - Interpolate an image array at the specified cartesian points. - - Parameters - ---------- - xy : array_like, (n, 2) - Array of cartesian coordinates in the image plane at which - to evaluate intensity. - img : array_like - 2-dimensional image array. - pad_with_nans : bool, optional - Toggle for assigning NaN to points that fall off the detector. - The default is True. - on_panel : np.ndarray, optional - If you want to skip clip_to_panel() for performance reasons, - just provide an array of which pixels are on the panel. - - Returns - ------- - int_xy : array_like, (n,) - The array of interpolated intensities at each of the n input - coordinates. - - Notes - ----- - TODO: revisit normalization in here? - """ - - is_2d = img.ndim == 2 - right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert ( - is_2d and right_shape - ), "input image must be 2-d with shape (%d, %d)" % ( - self.rows, - self.cols, - ) - - # initialize output with nans - if pad_with_nans: - int_xy = np.nan * np.ones(len(xy)) - else: - int_xy = np.zeros(len(xy)) - - if on_panel is None: - # clip away points too close to or off the edges of the detector - xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) - else: - xy_clip = xy[on_panel] - - # grab fractional pixel indices of clipped points - ij_frac = self.cartToPixel(xy_clip) - - # get floors/ceils from array of pixel _centers_ - # and fix indices running off the pixel centers - # !!! notice we already clipped points to the panel! - i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) - i_floor_img = _fix_indices(i_floor, 0, self.rows - 1) - - j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) - j_floor_img = _fix_indices(j_floor, 0, self.cols - 1) - - # ceilings from floors - i_ceil = i_floor + 1 - i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1) - - j_ceil = j_floor + 1 - j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) - - # first interpolate at top/bottom rows - row_floor_int = (j_ceil - ij_frac[:, 1]) * img[ - i_floor_img, j_floor_img - ] + (ij_frac[:, 1] - j_floor) * img[i_floor_img, j_ceil_img] - row_ceil_int = (j_ceil - ij_frac[:, 1]) * img[ - i_ceil_img, j_floor_img - ] + (ij_frac[:, 1] - j_floor) * img[i_ceil_img, j_ceil_img] - - # next interpolate across cols - int_vals = (i_ceil - ij_frac[:, 0]) * row_floor_int + ( - ij_frac[:, 0] - i_floor - ) * row_ceil_int - int_xy[on_panel] = int_vals - return int_xy - - def make_powder_rings( - self, - pd, - merge_hkls=False, - delta_tth=None, - delta_eta=10.0, - eta_period=None, - eta_list=None, - rmat_s=ct.identity_3x3, - tvec_s=ct.zeros_3, - tvec_c=ct.zeros_3, - full_output=False, - tth_distortion=None, - ): - """ - Generate points on Debye_Scherrer rings over the detector. - - !!! it is assuming that rmat_s is built from (chi, ome) as it the case - for HEDM! - - Parameters - ---------- - pd : TYPE - DESCRIPTION. - merge_hkls : TYPE, optional - DESCRIPTION. The default is False. - delta_tth : TYPE, optional - DESCRIPTION. The default is None. - delta_eta : TYPE, optional - DESCRIPTION. The default is 10.. - eta_period : TYPE, optional - DESCRIPTION. The default is None. - eta_list : TYPE, optional - DESCRIPTION. The default is None. - rmat_s : TYPE, optional - DESCRIPTION. The default is ct.identity_3x3. - tvec_s : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - tvec_c : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - full_output : TYPE, optional - DESCRIPTION. The default is False. - tth_distortion : special class, optional - Special distortion class. The default is None. - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - if tth_distortion is not None: - tnorms = mutil.rowNorm(np.vstack([tvec_s, tvec_c])) - assert ( - np.all(tnorms) < ct.sqrt_epsf - ), "If using distrotion function, translations must be zero" - - # in case you want to give it tth angles directly - if isinstance(pd, PlaneData): - pd = PlaneData(None, pd) - if delta_tth is not None: - pd.tThWidth = np.radians(delta_tth) - else: - delta_tth = np.degrees(pd.tThWidth) - - # !!! conversions, meh... - del_eta = np.radians(delta_eta) - - # do merging if asked - if merge_hkls: - _, tth_ranges = pd.getMergedRanges(cullDupl=True) - tth = np.average(tth_ranges, axis=1) - else: - tth_ranges = pd.getTThRanges() - tth = pd.getTTh() - tth_pm = tth_ranges - np.tile(tth, (2, 1)).T - sector_vertices = np.vstack( - [ - [ - i[0], - -del_eta, - i[0], - del_eta, - i[1], - del_eta, - i[1], - -del_eta, - 0.0, - 0.0, - ] - for i in tth_pm - ] - ) - else: - # Okay, we have a array-like tth specification - tth = np.array(pd).flatten() - if delta_tth is None: - raise RuntimeError( - "If supplying a 2theta list as first arg, " - + "must supply a delta_tth" - ) - tth_pm = 0.5 * delta_tth * np.r_[-1.0, 1.0] - tth_ranges = np.radians([i + tth_pm for i in tth]) # !!! units - sector_vertices = np.tile( - 0.5 - * np.radians( - [ - -delta_tth, - -delta_eta, - -delta_tth, - delta_eta, - delta_tth, - delta_eta, - delta_tth, - -delta_eta, - 0.0, - 0.0, - ] - ), - (len(tth), 1), - ) - # !! conversions, meh... - tth = np.radians(tth) - del_eta = np.radians(delta_eta) - - # for generating rings, make eta vector in correct period - if eta_period is None: - eta_period = (-np.pi, np.pi) - - if eta_list is None: - neta = int(360.0 / float(delta_eta)) - # this is the vector of ETA EDGES - eta_edges = mapAngle( - np.radians(delta_eta * np.linspace(0.0, neta, num=neta + 1)) - + eta_period[0], - eta_period, - ) - - # get eta bin centers from edges - """ - # !!! this way is probably overkill, since we have delta eta - eta_centers = np.average( - np.vstack([eta[:-1], eta[1:]), - axis=0) - """ - # !!! should be safe as eta_edges are monotonic - eta_centers = eta_edges[:-1] + 0.5 * del_eta - else: - eta_centers = np.radians(eta_list).flatten() - neta = len(eta_centers) - eta_edges = ( - np.tile(eta_centers, (2, 1)) - + np.tile(0.5 * del_eta * np.r_[-1, 1], (neta, 1)).T - ).T.flatten() - - # get chi and ome from rmat_s - # !!! API ambiguity - # !!! this assumes rmat_s was made from the composition - # !!! rmat_s = R(Xl, chi) * R(Yl, ome) - ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) - - # make list of angle tuples - angs = [ - np.vstack([i * np.ones(neta), eta_centers, ome * np.ones(neta)]) - for i in tth - ] - - # need xy coords and pixel sizes - valid_ang = [] - valid_xy = [] - map_indices = [] - npp = 5 # [ll, ul, ur, lr, center] - for i_ring in range(len(angs)): - # expand angles to patch vertices - these_angs = angs[i_ring].T - - # push to vertices to see who falls off - # FIXME: clipping is not checking if masked regions are on the - # patch interior - patch_vertices = ( - np.tile(these_angs[:, :2], (1, npp)) - + np.tile(sector_vertices[i_ring], (neta, 1)) - ).reshape(npp * neta, 2) - - # find vertices that all fall on the panel - # !!! not API ambiguity regarding rmat_s above - all_xy = self.angles_to_cart( - patch_vertices, - rmat_s=rmat_s, - tvec_s=tvec_s, - rmat_c=None, - tvec_c=tvec_c, - apply_distortion=True, - ) - - _, on_panel = self.clip_to_panel(all_xy) - - # all vertices must be on... - - patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) - patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] - - # !!! Have to apply after clipping, distortion can get wonky near - # the edeg of the panel, and it is assumed to be <~1 deg - # !!! The tth_ranges are NOT correct! - if tth_distortion is not None: - patch_valid_angs = tth_distortion.apply( - self.angles_to_cart(these_angs[patch_is_on, :2]), - return_nominal=True, - ) - patch_valid_xys = self.angles_to_cart( - patch_valid_angs, apply_distortion=True - ) - else: - patch_valid_angs = these_angs[patch_is_on, :2] - patch_valid_xys = patch_xys[:, -1, :].squeeze() - - # form output arrays - valid_ang.append(patch_valid_angs) - valid_xy.append(patch_valid_xys) - map_indices.append(patch_is_on) - # ??? is this option necessary? - if full_output: - return valid_ang, valid_xy, tth_ranges, map_indices, eta_edges - else: - return valid_ang, valid_xy, tth_ranges - - def map_to_plane(self, pts, rmat, tvec): - """ - Map detctor points to specified plane. - - Parameters - ---------- - pts : TYPE - DESCRIPTION. - rmat : TYPE - DESCRIPTION. - tvec : TYPE - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - Notes - ----- - by convention: - - n * (u*pts_l - tvec) = 0 - - [pts]_l = rmat*[pts]_m + tvec - - """ - # arg munging - pts = np.atleast_2d(pts) - npts = len(pts) - - # map plane normal & translation vector, LAB FRAME - nvec_map_lab = rmat[:, 2].reshape(3, 1) - tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1) - tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1) - - # put pts as 3-d in panel CS and transform to 3-d lab coords - pts_det = np.hstack([pts, np.zeros((npts, 1))]) - pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab - - # scaling along pts vectors to hit map plane - u = np.dot(nvec_map_lab.T, tvec_map_lab) / np.dot( - nvec_map_lab.T, pts_lab - ) - - # pts on map plane, in LAB FRAME - pts_map_lab = np.tile(u, (3, 1)) * pts_lab - - return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T - - def simulate_rotation_series( - self, - plane_data, - grain_param_list, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_ranges=[ - (-np.pi, np.pi), - ], - ome_period=(-np.pi, np.pi), - chi=0.0, - tVec_s=ct.zeros_3, - wavelength=None, - ): - """ - Simulate a monochromatic rotation series for a list of grains. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_param_list : TYPE - DESCRIPTION. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - chi : TYPE, optional - DESCRIPTION. The default is 0.. - tVec_s : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - wavelength : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - valid_ids : TYPE - DESCRIPTION. - valid_hkls : TYPE - DESCRIPTION. - valid_angs : TYPE - DESCRIPTION. - valid_xys : TYPE - DESCRIPTION. - ang_pixel_size : TYPE - DESCRIPTION. - - """ - # grab B-matrix from plane data - bMat = plane_data.latVecOps['B'] - - # reconcile wavelength - # * added sanity check on exclusions here; possible to - # * make some reflections invalid (NaN) - if wavelength is None: - wavelength = plane_data.wavelength - else: - if plane_data.wavelength != wavelength: - plane_data.wavelength = ct.keVToAngstrom(wavelength) - assert not np.any( - np.isnan(plane_data.getTTh()) - ), "plane data exclusions incompatible with wavelength" - - # vstacked G-vector id, h, k, l - full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) - - """ LOOP OVER GRAINS """ - valid_ids = [] - valid_hkls = [] - valid_angs = [] - valid_xys = [] - ang_pixel_size = [] - for gparm in grain_param_list: - - # make useful parameters - rMat_c = make_rmat_of_expmap(gparm[:3]) - tVec_c = gparm[3:6] - vInv_s = gparm[6:] - - # All possible bragg conditions as vstacked [tth, eta, ome] - # for each omega solution - angList = np.vstack( - oscill_angles_of_hkls( - full_hkls[:, 1:], - chi, - rMat_c, - bMat, - wavelength, - v_inv=vInv_s, - beam_vec=self.bvec, - ) - ) - - # filter by eta and omega ranges - # ??? get eta range from detector? - allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( - full_hkls, angList, eta_ranges, ome_ranges - ) - allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) - - # find points that fall on the panel - det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( - allAngs, - self.rmat, - rMat_c, - chi, - self.tvec, - tVec_c, - tVec_s, - self.distortion, - self.bvec, - ) - xys_p, on_panel = self.clip_to_panel(det_xy) - valid_xys.append(xys_p) - - # filter angs and hkls that are on the detector plane - # !!! check this -- seems unnecessary but the results of - # _project_on_detector_plane() can have len < the input? - # the output of _project_on_detector_plane has been modified to - # hand back the index array to remedy this JVB 2020-05-27 - if np.any(~on_plane): - allAngs = np.atleast_2d(allAngs[on_plane, :]) - allHKLs = np.atleast_2d(allHKLs[on_plane, :]) - - # grab hkls and gvec ids for this panel - valid_hkls.append(allHKLs[on_panel, 1:]) - valid_ids.append(allHKLs[on_panel, 0]) - - # reflection angles (voxel centers) and pixel size in (tth, eta) - valid_angs.append(allAngs[on_panel, :]) - ang_pixel_size.append(self.angularPixelSize(xys_p)) - return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size - - def simulate_laue_pattern( - self, - crystal_data, - minEnergy=5.0, - maxEnergy=35.0, - rmat_s=None, - tvec_s=None, - grain_params=None, - beam_vec=None, - ): - """ """ - if isinstance(crystal_data, PlaneData): - - plane_data = crystal_data - - # grab the expanded list of hkls from plane_data - hkls = np.hstack(plane_data.getSymHKLs()) - - # and the unit plane normals (G-vectors) in CRYSTAL FRAME - gvec_c = np.dot(plane_data.latVecOps['B'], hkls) - - # Filter out g-vectors going in the wrong direction. `gvec_to_xy()` used - # to do this, but not anymore. - to_keep = np.dot(gvec_c.T, self.bvec) <= 0 - - hkls = hkls[:, to_keep] - gvec_c = gvec_c[:, to_keep] - elif len(crystal_data) == 2: - # !!! should clean this up - hkls = np.array(crystal_data[0]) - bmat = crystal_data[1] - gvec_c = np.dot(bmat, hkls) - else: - raise RuntimeError( - f'argument list not understood: {crystal_data=}' - ) - nhkls_tot = hkls.shape[1] - - # parse energy ranges - # TODO: allow for spectrum parsing - multipleEnergyRanges = False - if hasattr(maxEnergy, '__len__'): - assert len(maxEnergy) == len( - minEnergy - ), 'energy cutoff ranges must have the same length' - multipleEnergyRanges = True - lmin = [] - lmax = [] - for i in range(len(maxEnergy)): - lmin.append(ct.keVToAngstrom(maxEnergy[i])) - lmax.append(ct.keVToAngstrom(minEnergy[i])) - else: - lmin = ct.keVToAngstrom(maxEnergy) - lmax = ct.keVToAngstrom(minEnergy) - - # parse grain parameters kwarg - if grain_params is None: - grain_params = np.atleast_2d( - np.hstack([np.zeros(6), ct.identity_6x1]) - ) - n_grains = len(grain_params) - - # sample rotation - if rmat_s is None: - rmat_s = ct.identity_3x3 - - # dummy translation vector... make input - if tvec_s is None: - tvec_s = ct.zeros_3 - - # beam vector - if beam_vec is None: - beam_vec = ct.beam_vec - - # ========================================================================= - # LOOP OVER GRAINS - # ========================================================================= - - # pre-allocate output arrays - xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) - hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) - angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) - dspacing = np.nan * np.ones((n_grains, nhkls_tot)) - energy = np.nan * np.ones((n_grains, nhkls_tot)) - for iG, gp in enumerate(grain_params): - rmat_c = make_rmat_of_expmap(gp[:3]) - tvec_c = gp[3:6].reshape(3, 1) - vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) - - # stretch them: V^(-1) * R * Gc - gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) - ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) - - # project - dpts = gvec_to_xy( - ghat_c_str.T, - self.rmat, - rmat_s, - rmat_c, - self.tvec, - tvec_s, - tvec_c, - beam_vec=beam_vec, - ) - - # check intersections with detector plane - canIntersect = ~np.isnan(dpts[:, 0]) - npts_in = sum(canIntersect) - - if np.any(canIntersect): - dpts = dpts[canIntersect, :].reshape(npts_in, 2) - dhkl = hkls[:, canIntersect].reshape(3, npts_in) - - rmat_b = make_beam_rmat(beam_vec, ct.eta_vec) - # back to angles - tth_eta, gvec_l = xy_to_gvec( - dpts, - self.rmat, - rmat_s, - self.tvec, - tvec_s, - tvec_c, - rmat_b=rmat_b, - ) - tth_eta = np.vstack(tth_eta).T - - # warp measured points - if self.distortion is not None: - dpts = self.distortion.apply_inverse(dpts) - - # plane spacings and energies - dsp = 1.0 / mutil.rowNorm(gvec_s_str[:, canIntersect].T) - wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) - - # clip to detector panel - _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) - - if multipleEnergyRanges: - validEnergy = np.zeros(len(wlen), dtype=bool) - for i in range(len(lmin)): - in_energy_range = np.logical_and( - wlen >= lmin[i], wlen <= lmax[i] - ) - validEnergy = validEnergy | in_energy_range - else: - validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) - - # index for valid reflections - keepers = np.where(np.logical_and(on_panel, validEnergy))[0] - - # assign output arrays - xy_det[iG][keepers, :] = dpts[keepers, :] - hkls_in[iG][:, keepers] = dhkl[:, keepers] - angles[iG][keepers, :] = tth_eta[keepers, :] - dspacing[iG, keepers] = dsp[keepers] - energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) - return xy_det, hkls_in, angles, dspacing, energy - - @staticmethod - def update_memoization_sizes(all_panels): - funcs = [ - _polarization_factor, - _lorentz_factor, - ] - - min_size = len(all_panels) - return Detector.increase_memoization_sizes(funcs, min_size) - - @staticmethod - def increase_memoization_sizes(funcs, min_size): - for f in funcs: - cache_info = f.cache_info() - if cache_info['maxsize'] < min_size: - f.set_cache_maxsize(min_size) - - def calc_physics_package_transmission( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - ) -> np.float64: - """get the transmission from the physics package - need to consider HED and HEDM samples separately - """ - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_sample = self.calc_transmission_sample( - seca, secb, energy, physics_package - ) - T_window = self.calc_transmission_window(secb, energy, physics_package) - - transmission_physics_package = T_sample * T_window - return transmission_physics_package - - def calc_compton_physics_package_transmission( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - '''calculate the attenuation of inelastically - scattered photons. since these photons lose energy, - the attenuation length is angle dependent ergo a separate - routine than elastically scattered absorption. - ''' - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_sample = self.calc_compton_transmission( - seca, secb, energy, physics_package, 'sample' - ) - T_window = self.calc_compton_transmission_window( - secb, energy, physics_package - ) - - return T_sample * T_window - - def calc_compton_window_transmission( - self, - energy: np.floating, - rMat_s: np.ndarray, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - '''calculate the attenuation of inelastically - scattered photons just fropm the window. - since these photons lose energy, the attenuation length - is angle dependent ergo a separate routine than - elastically scattered absorption. - ''' - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_window = self.calc_compton_transmission( - seca, secb, energy, physics_package, 'window' - ) - T_sample = self.calc_compton_transmission_sample( - seca, energy, physics_package - ) - - return T_sample * T_window - - def calc_transmission_sample( - self, - seca: np.array, - secb: np.array, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.array: - thickness_s = physics_package.sample_thickness # in microns - if np.isclose(thickness_s, 0): - return np.ones(self.shape) - - # in microns^-1 - mu_s = 1.0 / physics_package.sample_absorption_length(energy) - x = mu_s * thickness_s - pre = 1.0 / x / (secb - seca) - num = np.exp(-x * seca) - np.exp(-x * secb) - return pre * num - - def calc_transmission_window( - self, - secb: np.array, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.array: - material_w = physics_package.window_material - thickness_w = physics_package.window_thickness # in microns - if material_w is None or np.isclose(thickness_w, 0): - return np.ones(self.shape) - - # in microns^-1 - mu_w = 1.0 / physics_package.window_absorption_length(energy) - return np.exp(-thickness_w * mu_w * secb) - - def calc_compton_transmission( - self, - seca: np.ndarray, - secb: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - pp_layer: str, - ) -> np.ndarray: - - if pp_layer == 'sample': - formula = physics_package.sample_material - density = physics_package.sample_density - thickness = physics_package.sample_thickness - mu = 1.0 / physics_package.sample_absorption_length(energy) - mu_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, - density, - formula, - ) - elif pp_layer == 'window': - formula = physics_package.window_material - if formula is None: - return np.ones(self.shape) - - density = physics_package.window_density - thickness = physics_package.window_thickness - mu = 1.0 / physics_package.sample_absorption_length(energy) - mu_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, density, formula - ) - - if thickness <= 0: - return np.ones(self.shape) - - x1 = mu * thickness * seca - x2 = mu_prime * thickness * secb - num = np.exp(-x1) - np.exp(-x2) - return -num / (x1 - x2) - - def calc_compton_transmission_sample( - self, - seca: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - thickness_s = physics_package.sample_thickness # in microns - - mu_s = 1.0 / physics_package.sample_absorption_length(energy) - return np.exp(-mu_s * thickness_s * seca) - - def calc_compton_transmission_window( - self, - secb: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - formula = physics_package.window_material - if formula is None: - return np.ones(self.shape) - - density = physics_package.window_density # in g/cc - thickness_w = physics_package.window_thickness # in microns - - mu_w_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, density, formula - ) - return np.exp(-mu_w_prime * thickness_w * secb) - - def calc_effective_pinhole_area( - self, physics_package: AbstractPhysicsPackage - ) -> np.array: - """get the effective pinhole area correction""" - if np.isclose(physics_package.pinhole_diameter, 0) or np.isclose( - physics_package.pinhole_thickness, 0 - ): - return np.ones(self.shape) - - hod = ( - physics_package.pinhole_thickness - / physics_package.pinhole_diameter - ) - bvec = self.bvec - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cth = -dvecs[:, 2].reshape(self.shape) - tanth = np.tan(np.arccos(cth)) - f = hod * tanth - f[np.abs(f) > 1.0] = np.nan - asinf = np.arcsin(f) - return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) - - def calc_transmission_generic( - self, - secb: np.array, - thickness: np.floating, - absorption_length: np.floating, - ) -> np.array: - if np.isclose(thickness, 0): - return np.ones(self.shape) - - mu = 1.0 / absorption_length # in microns^-1 - return np.exp(-thickness * mu * secb) - - def calc_transmission_phosphor( - self, - secb: np.array, - thickness: np.floating, - readout_length: np.floating, - absorption_length: np.floating, - energy: np.floating, - pre_U0: np.floating, - ) -> np.array: - if np.isclose(thickness, 0): - return np.ones(self.shape) - - f1 = absorption_length * thickness - f2 = absorption_length * readout_length - arg = secb + 1 / f2 - return pre_U0 * energy * ((1.0 - np.exp(-f1 * arg)) / arg) - - -# ============================================================================= -# UTILITY METHODS -# ============================================================================= - - -def _fix_indices(idx, lo, hi): - nidx = np.array(idx) - off_lo = nidx < lo - off_hi = nidx > hi - nidx[off_lo] = lo - nidx[off_hi] = hi - return nidx - - -def _row_edge_vec(rows, pixel_size_row): - return pixel_size_row * (0.5 * rows - np.arange(rows + 1)) - - -def _col_edge_vec(cols, pixel_size_col): - return pixel_size_col * (np.arange(cols + 1) - 0.5 * cols) - - -# FIXME find a better place for this, and maybe include loop over pixels -@numba.njit(nogil=True, cache=True) -def _solid_angle_of_triangle(vtx_list): - norms = np.sqrt(np.sum(vtx_list * vtx_list, axis=1)) - norms_prod = norms[0] * norms[1] * norms[2] - scalar_triple_product = np.dot( - vtx_list[0], np.cross(vtx_list[2], vtx_list[1]) - ) - denominator = ( - norms_prod - + norms[0] * np.dot(vtx_list[1], vtx_list[2]) - + norms[1] * np.dot(vtx_list[2], vtx_list[0]) - + norms[2] * np.dot(vtx_list[0], vtx_list[1]) - ) - - return 2.0 * np.arctan2(scalar_triple_product, denominator) diff --git a/hexrd/hedm/instrument/hedm_instrument.py b/hexrd/hedm/instrument/hedm_instrument.py deleted file mode 100644 index 1eaacbe91..000000000 --- a/hexrd/hedm/instrument/hedm_instrument.py +++ /dev/null @@ -1,3012 +0,0 @@ -# -*- coding: utf-8 -*- -# ============================================================================= -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. -# All rights reserved. -# -# This file is part of HEXRD. For details on dowloading the source, -# see the file COPYING. -# -# Please also see the file LICENSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License (as published by the Free -# Software Foundation) version 2.1 dated February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program (see file LICENSE); if not, write to -# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, -# Boston, MA 02111-1307 USA or visit . -# ============================================================================= -""" -Created on Fri Dec 9 13:05:27 2016 - -@author: bernier2 -""" -from contextlib import contextmanager -import copy -import logging -import os -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor -from functools import partial -from typing import Optional - -from tqdm import tqdm - -import yaml - -import h5py - -import numpy as np - -from io import IOBase - -from scipy import ndimage -from scipy.linalg import logm -from skimage.measure import regionprops - -from hexrd.core import constants -from hexrd.core.imageseries import ImageSeries -from hexrd.core.imageseries.process import ProcessedImageSeries -from hexrd.core.imageseries.omega import OmegaImageSeries -from hexrd.core.fitting.utils import fit_ring -from hexrd.core.gridutil import make_tolerance_grid -from hexrd.core import matrixutil as mutil -from hexrd.core.transforms.xfcapi import ( - angles_to_gvec, - gvec_to_xy, - make_sample_rmat, - make_rmat_of_expmap, - unit_vector, -) -from hexrd.hedm import xrdutil -from hexrd.hedm.material.crystallography import PlaneData -from hexrd.core import constants as ct -from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler, mapAngle -from hexrd.core import distortion as distortion_pkg -from hexrd.core.utils.concurrent import distribute_tasks -from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict -from hexrd.core.utils.yaml import NumpyToNativeDumper -from hexrd.core.valunits import valWUnit - -# TODO: Resolve extra-workflow-dependency -from hexrd.powder.wppf import LeBail - -from hexrd.core.instrument.cylindrical_detector import CylindricalDetector -from hexrd.core.instrument.detector import ( - beam_energy_DFLT, - max_workers_DFLT, - Detector, -) -from hexrd.core.instrument.planar_detector import PlanarDetector - -from skimage.draw import polygon -from skimage.util import random_noise - -# TODO: Resolve extra-workflow-dependency -from hexrd.powder.wppf import wppfsupport - -try: - from fast_histogram import histogram1d - - fast_histogram = True -except ImportError: - from numpy import histogram as histogram1d - - fast_histogram = False - -logger = logging.getLogger() -logger.setLevel('INFO') - -# ============================================================================= -# PARAMETERS -# ============================================================================= - -instrument_name_DFLT = 'instrument' - -beam_vec_DFLT = ct.beam_vec -source_distance_DFLT = np.inf - -eta_vec_DFLT = ct.eta_vec - -panel_id_DFLT = 'generic' -nrows_DFLT = 2048 -ncols_DFLT = 2048 -pixel_size_DFLT = (0.2, 0.2) - -tilt_params_DFLT = np.zeros(3) -t_vec_d_DFLT = np.r_[0.0, 0.0, -1000.0] - -chi_DFLT = 0.0 -t_vec_s_DFLT = np.zeros(3) - -multi_ims_key = ct.shared_ims_key -ims_classes = (ImageSeries, ProcessedImageSeries, OmegaImageSeries) - -buffer_key = 'buffer' -distortion_key = 'distortion' - -# ============================================================================= -# UTILITY METHODS -# ============================================================================= - - -def generate_chunks( - nrows, ncols, base_nrows, base_ncols, row_gap=0, col_gap=0 -): - """ - Generate chunking data for regularly tiled composite detectors. - - Parameters - ---------- - nrows : int - DESCRIPTION. - ncols : int - DESCRIPTION. - base_nrows : int - DESCRIPTION. - base_ncols : int - DESCRIPTION. - row_gap : int, optional - DESCRIPTION. The default is 0. - col_gap : int, optional - DESCRIPTION. The default is 0. - - Returns - ------- - rects : array_like - The (nrows*ncols, ) list of ROI specs (see Notes). - labels : array_like - The (nrows*ncols, ) list of ROI (i, j) matrix indexing labels 'i_j'. - - Notes - ----- - ProcessedImageSeries needs a (2, 2) array for the 'rect' kwarg: - [[row_start, row_stop], - [col_start, col_stop]] - """ - row_starts = np.array([i * (base_nrows + row_gap) for i in range(nrows)]) - col_starts = np.array([i * (base_ncols + col_gap) for i in range(ncols)]) - rr = np.vstack([row_starts, row_starts + base_nrows]) - cc = np.vstack([col_starts, col_starts + base_ncols]) - rects = [] - labels = [] - for i in range(nrows): - for j in range(ncols): - this_rect = np.array([[rr[0, i], rr[1, i]], [cc[0, j], cc[1, j]]]) - rects.append(this_rect) - labels.append('%d_%d' % (i, j)) - return rects, labels - - -def chunk_instrument(instr, rects, labels, use_roi=False): - """ - Generate chunked config fro regularly tiled composite detectors. - - Parameters - ---------- - instr : TYPE - DESCRIPTION. - rects : TYPE - DESCRIPTION. - labels : TYPE - DESCRIPTION. - - Returns - ------- - new_icfg_dict : TYPE - DESCRIPTION. - - """ - icfg_dict = instr.write_config() - new_icfg_dict = dict( - beam=icfg_dict['beam'], - oscillation_stage=icfg_dict['oscillation_stage'], - detectors={}, - ) - for panel_id, panel in instr.detectors.items(): - pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] - - for pnum, pdata in enumerate(zip(rects, labels)): - rect, label = pdata - panel_name = f'{panel_id}_{label}' - - row_col_dim = np.diff(rect) # (2, 1) - shape = tuple(row_col_dim.flatten()) - center = rect[:, 0].reshape(2, 1) + 0.5 * row_col_dim - - sp_tvec = np.concatenate( - [panel.pixelToCart(center.T).flatten(), np.zeros(1)] - ) - - tvec = np.dot(panel.rmat, sp_tvec) + panel.tvec - - # new config dict - tmp_cfg = copy.deepcopy(pcfg_dict) - - # fix sizes - tmp_cfg['pixels']['rows'] = shape[0] - tmp_cfg['pixels']['columns'] = shape[1] - if use_roi: - tmp_cfg['pixels']['roi'] = (rect[0][0], rect[1][0]) - - # update tvec - tmp_cfg['transform']['translation'] = tvec.tolist() - - new_icfg_dict['detectors'][panel_name] = copy.deepcopy(tmp_cfg) - - if panel.panel_buffer is not None: - if panel.panel_buffer.ndim == 2: # have a mask array! - submask = panel.panel_buffer[ - rect[0, 0] : rect[0, 1], rect[1, 0] : rect[1, 1] - ] - new_icfg_dict['detectors'][panel_name]['buffer'] = submask - return new_icfg_dict - - -def _parse_imgser_dict(imgser_dict, det_key, roi=None): - """ - Associates a dict of imageseries to the target panel(s). - - Parameters - ---------- - imgser_dict : dict - The input dict of imageseries. Either `det_key` is in imgser_dict, or - the shared key is. Entries can be an ImageSeries object or a 2- or 3-d - ndarray of images. - det_key : str - The target detector key. - roi : tuple or None, optional - The roi of the target images. Format is - ((row_start, row_stop), (col_start, col_stop)) - The stops are used in the normal sense of a slice. The default is None. - - Raises - ------ - RuntimeError - If niether `det_key` nor the shared key is in the input imgser_dict; - Also, if the shared key is specified but the roi is None. - - Returns - ------- - ims : hexrd.core.imageseries - The desired imageseries object. - - """ - # grab imageseries for this detector - try: - ims = imgser_dict[det_key] - except KeyError: - matched_det_keys = [det_key in k for k in imgser_dict] - if multi_ims_key in imgser_dict: - images_in = imgser_dict[multi_ims_key] - elif np.any(matched_det_keys): - if sum(matched_det_keys) != 1: - raise RuntimeError(f"multiple entries found for '{det_key}'") - # use boolean array to index the proper key - # !!! these should be in the same order - img_keys = img_keys = np.asarray(list(imgser_dict.keys())) - matched_det_key = img_keys[matched_det_keys][0] # !!! only one - images_in = imgser_dict[matched_det_key] - else: - raise RuntimeError( - f"neither '{det_key}' nor '{multi_ims_key}' found" - + 'in imageseries input' - ) - - # have images now - if roi is None: - raise RuntimeError( - "roi must be specified to use shared imageseries" - ) - - if isinstance(images_in, ims_classes): - # input is an imageseries of some kind - ims = ProcessedImageSeries( - images_in, - [ - ('rectangle', roi), - ], - ) - if isinstance(images_in, OmegaImageSeries): - # if it was an OmegaImageSeries, must re-cast - ims = OmegaImageSeries(ims) - elif isinstance(images_in, np.ndarray): - # 2- or 3-d array of images - ndim = images_in.ndim - if ndim == 2: - ims = images_in[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]] - elif ndim == 3: - nrows = roi[0][1] - roi[0][0] - ncols = roi[1][1] - roi[1][0] - n_images = len(images_in) - ims = np.empty((n_images, nrows, ncols), dtype=images_in.dtype) - for i, image in images_in: - ims[i, :, :] = images_in[ - roi[0][0] : roi[0][1], roi[1][0] : roi[1][1] - ] - else: - raise RuntimeError( - f"image input dim must be 2 or 3; you gave {ndim}" - ) - return ims - - -def calc_beam_vec(azim, pola): - """ - Calculate unit beam propagation vector from - spherical coordinate spec in DEGREES. - - ...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL! - """ - tht = np.radians(azim) - phi = np.radians(pola) - bv = np.r_[ - np.sin(phi) * np.cos(tht), np.cos(phi), np.sin(phi) * np.sin(tht) - ] - return -bv - - -def calc_angles_from_beam_vec(bvec): - """ - Return the azimuth and polar angle from a beam - vector - """ - bvec = np.atleast_1d(bvec).flatten() - nvec = unit_vector(-bvec) - azim = float(np.degrees(np.arctan2(nvec[2], nvec[0]))) - pola = float(np.degrees(np.arccos(nvec[1]))) - return azim, pola - - -def migrate_instrument_config(instrument_config): - """utility function to generate old instrument config dictionary""" - cfg_list = [] - for detector_id in instrument_config['detectors']: - cfg_list.append( - dict( - detector=instrument_config['detectors'][detector_id], - oscillation_stage=instrument_config['oscillation_stage'], - ) - ) - return cfg_list - - -def angle_in_range(angle, ranges, ccw=True, units='degrees'): - """ - Return the index of the first wedge the angle is found in - - WARNING: always clockwise; assumes wedges are not overlapping - """ - tau = 360.0 - if units.lower() == 'radians': - tau = 2 * np.pi - w = np.nan - for i, wedge in enumerate(ranges): - amin = wedge[0] - amax = wedge[1] - check = amin + np.mod(angle - amin, tau) - if check < amax: - w = i - break - return w - - -# ???: move to gridutil? -def centers_of_edge_vec(edges): - assert np.asarray(edges).ndim == 1, "edges must be 1-d" - return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0) - - -def max_tth(instr): - """ - Return the maximum Bragg angle (in radians) subtended by the instrument. - - Parameters - ---------- - instr : hexrd.hedm.instrument.HEDMInstrument instance - the instrument class to evalutate. - - Returns - ------- - tth_max : float - The maximum observable Bragg angle by the instrument in radians. - """ - tth_max = 0.0 - for det in instr.detectors.values(): - ptth, peta = det.pixel_angles() - tth_max = max(np.max(ptth), tth_max) - return tth_max - - -def pixel_resolution(instr): - """ - Return the minimum, median, and maximum angular - resolution of the instrument. - - Parameters - ---------- - instr : HEDMInstrument instance - An instrument. - - Returns - ------- - tth_stats : float - min/median/max tth resolution in radians. - eta_stats : TYPE - min/median/max eta resolution in radians. - - """ - max_tth = np.inf - max_eta = np.inf - min_tth = -np.inf - min_eta = -np.inf - ang_ps_full = [] - for panel in instr.detectors.values(): - angps = panel.angularPixelSize( - np.stack(panel.pixel_coords, axis=0) - .reshape(2, np.cumprod(panel.shape)[-1]) - .T - ) - ang_ps_full.append(angps) - max_tth = min(max_tth, np.min(angps[:, 0])) - max_eta = min(max_eta, np.min(angps[:, 1])) - min_tth = max(min_tth, np.max(angps[:, 0])) - min_eta = max(min_eta, np.max(angps[:, 1])) - med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten() - return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta) - - -def max_resolution(instr): - """ - Return the maximum angular resolution of the instrument. - - Parameters - ---------- - instr : HEDMInstrument instance - An instrument. - - Returns - ------- - max_tth : float - Maximum tth resolution in radians. - max_eta : TYPE - maximum eta resolution in radians. - - """ - max_tth = np.inf - max_eta = np.inf - for panel in instr.detectors.values(): - angps = panel.angularPixelSize( - np.stack(panel.pixel_coords, axis=0) - .reshape(2, np.cumprod(panel.shape)[-1]) - .T - ) - max_tth = min(max_tth, np.min(angps[:, 0])) - max_eta = min(max_eta, np.min(angps[:, 1])) - return max_tth, max_eta - - -def _gaussian_dist(x, cen, fwhm): - sigm = fwhm / (2 * np.sqrt(2 * np.log(2))) - return np.exp(-0.5 * (x - cen) ** 2 / sigm**2) - - -def _sigma_to_fwhm(sigm): - return sigm * ct.sigma_to_fwhm - - -def _fwhm_to_sigma(fwhm): - return fwhm / ct.sigma_to_fwhm - - -# ============================================================================= -# CLASSES -# ============================================================================= - - -class HEDMInstrument(object): - """ - Abstraction of XRD instrument. - - * Distortion needs to be moved to a class with registry; tuple unworkable - * where should reference eta be defined? currently set to default config - """ - - def __init__( - self, - instrument_config=None, - image_series=None, - eta_vector=None, - instrument_name=None, - tilt_calibration_mapping=None, - max_workers=max_workers_DFLT, - physics_package=None, - active_beam_name: Optional[str] = None, - ): - self._id = instrument_name_DFLT - - self._active_beam_name = active_beam_name - self._beam_dict = {} - - if eta_vector is None: - self._eta_vector = eta_vec_DFLT - else: - self._eta_vector = eta_vector - - self.max_workers = max_workers - - self.physics_package = physics_package - - if instrument_config is None: - # Default instrument - if instrument_name is not None: - self._id = instrument_name - self._num_panels = 1 - self._create_default_beam() - - # FIXME: must add cylindrical - self._detectors = dict( - panel_id_DFLT=PlanarDetector( - rows=nrows_DFLT, - cols=ncols_DFLT, - pixel_size=pixel_size_DFLT, - tvec=t_vec_d_DFLT, - tilt=tilt_params_DFLT, - bvec=self.beam_vector, - xrs_dist=self.source_distance, - evec=self._eta_vector, - distortion=None, - roi=None, - group=None, - max_workers=self.max_workers, - ), - ) - - self._tvec = t_vec_s_DFLT - self._chi = chi_DFLT - else: - if isinstance(instrument_config, h5py.File): - tmp = {} - unwrap_h5_to_dict(instrument_config, tmp) - instrument_config = tmp['instrument'] - elif not isinstance(instrument_config, dict): - raise RuntimeError( - "instrument_config must be either an HDF5 file object" - + "or a dictionary. You gave a %s" - % type(instrument_config) - ) - if instrument_name is None: - if 'id' in instrument_config: - self._id = instrument_config['id'] - else: - self._id = instrument_name - - self._num_panels = len(instrument_config['detectors']) - - if instrument_config.get('physics_package', None) is not None: - self.physics_package = instrument_config['physics_package'] - - xrs_config = instrument_config['beam'] - is_single_beam = 'energy' in xrs_config and 'vector' in xrs_config - if is_single_beam: - # Assume single beam. Load the same way as multibeam - self._create_default_beam() - xrs_config = {self.active_beam_name: xrs_config} - - # Multi beam load - for beam_name, beam in xrs_config.items(): - self._beam_dict[beam_name] = { - 'energy': beam['energy'], - 'vector': calc_beam_vec( - beam['vector']['azimuth'], - beam['vector']['polar_angle'], - ), - 'distance': beam.get('source_distance', np.inf), - } - - # Set the active beam name if not set already - if self._active_beam_name is None: - self._active_beam_name = next(iter(self._beam_dict)) - - # now build detector dict - detectors_config = instrument_config['detectors'] - det_dict = dict.fromkeys(detectors_config) - for det_id, det_info in detectors_config.items(): - det_group = det_info.get('group') # optional detector group - pixel_info = det_info['pixels'] - affine_info = det_info['transform'] - detector_type = det_info.get('detector_type', 'planar') - filter = det_info.get('filter', None) - coating = det_info.get('coating', None) - phosphor = det_info.get('phosphor', None) - try: - saturation_level = det_info['saturation_level'] - except KeyError: - saturation_level = 2**16 - shape = (pixel_info['rows'], pixel_info['columns']) - - panel_buffer = None - if buffer_key in det_info: - det_buffer = det_info[buffer_key] - if det_buffer is not None: - if isinstance(det_buffer, np.ndarray): - if det_buffer.ndim == 2: - if det_buffer.shape != shape: - msg = ( - f'Buffer shape for {det_id} ' - f'({det_buffer.shape}) does not match ' - f'detector shape ({shape})' - ) - raise BufferShapeMismatchError(msg) - else: - assert len(det_buffer) == 2 - panel_buffer = det_buffer - elif isinstance(det_buffer, list): - panel_buffer = np.asarray(det_buffer) - elif np.isscalar(det_buffer): - panel_buffer = det_buffer * np.ones(2) - else: - raise RuntimeError( - "panel buffer spec invalid for %s" % det_id - ) - - # optional roi - roi = pixel_info.get('roi') - - # handle distortion - distortion = None - if distortion_key in det_info: - distortion_cfg = det_info[distortion_key] - if distortion_cfg is not None: - try: - func_name = distortion_cfg['function_name'] - dparams = distortion_cfg['parameters'] - distortion = distortion_pkg.get_mapping( - func_name, dparams - ) - except KeyError: - raise RuntimeError( - "problem with distortion specification" - ) - if detector_type.lower() not in DETECTOR_TYPES: - msg = f'Unknown detector type: {detector_type}' - raise NotImplementedError(msg) - - DetectorClass = DETECTOR_TYPES[detector_type.lower()] - kwargs = dict( - name=det_id, - rows=pixel_info['rows'], - cols=pixel_info['columns'], - pixel_size=pixel_info['size'], - panel_buffer=panel_buffer, - saturation_level=saturation_level, - tvec=affine_info['translation'], - tilt=affine_info['tilt'], - bvec=self.beam_vector, - xrs_dist=self.source_distance, - evec=self._eta_vector, - distortion=distortion, - roi=roi, - group=det_group, - max_workers=self.max_workers, - detector_filter=filter, - detector_coating=coating, - phosphor=phosphor, - ) - - if DetectorClass is CylindricalDetector: - # Add cylindrical detector kwargs - kwargs['radius'] = det_info.get('radius', 49.51) - - det_dict[det_id] = DetectorClass(**kwargs) - - self._detectors = det_dict - - self._tvec = np.r_[ - instrument_config['oscillation_stage']['translation'] - ] - self._chi = instrument_config['oscillation_stage']['chi'] - - # grab angles from beam vec - # !!! these are in DEGREES! - azim, pola = calc_angles_from_beam_vec(self.beam_vector) - - self.update_memoization_sizes() - - @property - def mean_detector_center(self) -> np.ndarray: - """Return the mean center for all detectors""" - centers = np.array([panel.tvec for panel in self.detectors.values()]) - return centers.sum(axis=0) / len(centers) - - def mean_group_center(self, group: str) -> np.ndarray: - """Return the mean center for detectors belonging to a group""" - centers = np.array( - [x.tvec for x in self.detectors_in_group(group).values()] - ) - return centers.sum(axis=0) / len(centers) - - @property - def detector_groups(self) -> list[str]: - groups = [] - for panel in self.detectors.values(): - group = panel.group - if group is not None and group not in groups: - groups.append(group) - - return groups - - def detectors_in_group(self, group: str) -> dict[str, Detector]: - return {k: v for k, v in self.detectors.items() if v.group == group} - - # properties for physical size of rectangular detector - @property - def id(self): - return self._id - - @property - def num_panels(self): - return self._num_panels - - @property - def detectors(self): - return self._detectors - - @property - def detector_parameters(self): - pdict = {} - for key, panel in self.detectors.items(): - pdict[key] = panel.config_dict( - self.chi, - self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - return pdict - - @property - def tvec(self): - return self._tvec - - @tvec.setter - def tvec(self, x): - x = np.array(x).flatten() - assert len(x) == 3, 'input must have length = 3' - self._tvec = x - - @property - def chi(self): - return self._chi - - @chi.setter - def chi(self, x): - self._chi = float(x) - - @property - def beam_energy(self) -> float: - return self.active_beam['energy'] - - @beam_energy.setter - def beam_energy(self, x: float): - self.active_beam['energy'] = float(x) - self.beam_dict_modified() - - @property - def beam_wavelength(self): - return ct.keVToAngstrom(self.beam_energy) - - @property - def has_multi_beam(self) -> bool: - return len(self.beam_dict) > 1 - - @property - def beam_dict(self) -> dict: - return self._beam_dict - - def _create_default_beam(self): - name = 'XRS1' - self._beam_dict[name] = { - 'energy': beam_energy_DFLT, - 'vector': beam_vec_DFLT.copy(), - 'distance': np.inf, - } - - if self._active_beam_name is None: - self._active_beam_name = name - - @property - def beam_names(self) -> list[str]: - return list(self.beam_dict) - - def xrs_beam_energy(self, beam_name: Optional[str]) -> float: - if beam_name is None: - beam_name = self.active_beam_name - - return self.beam_dict[beam_name]['energy'] - - @property - def active_beam_name(self) -> str: - return self._active_beam_name - - @active_beam_name.setter - def active_beam_name(self, name: str): - if self._active_beam_name not in self.beam_dict: - raise RuntimeError( - f'"{name}" is not present in "{self.beam_names}"' - ) - - self._active_beam_name = name - - # Update anything beam related where we need to - self._update_panel_beams() - - def beam_dict_modified(self): - # A function to call to indicate that the beam dict was modified. - # Update anything beam related where we need to - self._update_panel_beams() - - @property - def active_beam(self) -> dict: - return self.beam_dict[self.active_beam_name] - - def _update_panel_beams(self): - # FIXME: maybe we shouldn't store these on the panels? - # Might be hard to fix, though... - for panel in self.detectors.values(): - panel.bvec = self.beam_vector - panel.xrs_dist = self.source_distance - - @property - def beam_vector(self) -> np.ndarray: - return self.active_beam['vector'] - - @beam_vector.setter - def beam_vector(self, x: np.ndarray): - x = np.array(x).flatten() - if len(x) == 3: - assert ( - sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - bvec = x - elif len(x) == 2: - bvec = calc_beam_vec(*x) - else: - raise RuntimeError("input must be a unit vector or angle pair") - - # Modify the beam vector for the active beam dict - self.active_beam['vector'] = bvec - self.beam_dict_modified() - - @property - def source_distance(self): - return self.active_beam['distance'] - - @source_distance.setter - def source_distance(self, x): - assert np.isscalar( - x - ), f"'source_distance' must be a scalar; you input '{x}'" - self.active_beam['distance'] = x - self.beam_dict_modified() - - @property - def eta_vector(self): - return self._eta_vector - - @eta_vector.setter - def eta_vector(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._eta_vector = x - # ...maybe change dictionary item behavior for 3.x compatibility? - for detector_id in self.detectors: - panel = self.detectors[detector_id] - panel.evec = self._eta_vector - - # ========================================================================= - # METHODS - # ========================================================================= - - def write_config(self, file=None, style='yaml', calibration_dict={}): - """WRITE OUT YAML FILE""" - # initialize output dictionary - assert style.lower() in ['yaml', 'hdf5'], ( - "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style - ) - - par_dict = {} - - par_dict['id'] = self.id - - # Multi beam writer - beam_dict = {} - for beam_name, beam in self.beam_dict.items(): - azim, polar = calc_angles_from_beam_vec(beam['vector']) - beam_dict[beam_name] = { - 'energy': beam['energy'], - 'vector': { - 'azimuth': azim, - 'polar_angle': polar, - }, - } - if beam['distance'] != np.inf: - beam_dict[beam_name]['source_distance'] = beam['distance'] - - if len(beam_dict) == 1: - # Just write it out a single beam (classical way) - beam_dict = next(iter(beam_dict.values())) - - par_dict['beam'] = beam_dict - - if calibration_dict: - par_dict['calibration_crystal'] = calibration_dict - - ostage = dict(chi=self.chi, translation=self.tvec.tolist()) - par_dict['oscillation_stage'] = ostage - - det_dict = dict.fromkeys(self.detectors) - for det_name, detector in self.detectors.items(): - # grab panel config - # !!! don't need beam or tvec - # !!! have vetted style - pdict = detector.config_dict( - chi=self.chi, - tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style=style, - ) - det_dict[det_name] = pdict['detector'] - par_dict['detectors'] = det_dict - - # handle output file if requested - if file is not None: - if style.lower() == 'yaml': - with open(file, 'w') as f: - yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) - else: - - def _write_group(file): - instr_grp = file.create_group('instrument') - unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) - - # hdf5 - if isinstance(file, str): - with h5py.File(file, 'w') as f: - _write_group(f) - elif isinstance(file, h5py.File): - _write_group(file) - else: - raise TypeError("Unexpected file type.") - - return par_dict - - def extract_polar_maps( - self, - plane_data, - imgser_dict, - active_hkls=None, - threshold=None, - tth_tol=None, - eta_tol=0.25, - ): - """ - Extract eta-omega maps from an imageseries. - - Quick and dirty way to histogram angular patch data for make - pole figures suitable for fiber generation - - TODO: streamline projection code - TODO: normalization - !!!: images must be non-negative! - !!!: plane_data is NOT a copy! - """ - if tth_tol is not None: - plane_data.tThWidth = np.radians(tth_tol) - else: - tth_tol = np.degrees(plane_data.tThWidth) - - # make rings clipped to panel - # !!! eta_idx has the same length as plane_data.exclusions - # each entry are the integer indices into the bins - # !!! eta_edges is the list of eta bin EDGES; same for all - # detectors, so calculate it once - # !!! grab first panel - panel = next(iter(self.detectors.values())) - pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = ( - panel.make_powder_rings( - plane_data, - merge_hkls=False, - delta_eta=eta_tol, - full_output=True, - ) - ) - - if active_hkls is not None: - assert hasattr( - active_hkls, '__len__' - ), "active_hkls must be an iterable with __len__" - - # need to re-cast for element-wise operations - active_hkls = np.array(active_hkls) - - # these are all active reflection unique hklIDs - active_hklIDs = plane_data.getHKLID(plane_data.hkls, master=True) - - # find indices - idx = np.zeros_like(active_hkls, dtype=int) - for i, input_hklID in enumerate(active_hkls): - try: - idx[i] = np.where(active_hklIDs == input_hklID)[0] - except ValueError: - raise RuntimeError(f"hklID '{input_hklID}' is invalid") - tth_ranges = tth_ranges[idx] - - delta_eta = eta_edges[1] - eta_edges[0] - ncols_eta = len(eta_edges) - 1 - - ring_maps_panel = dict.fromkeys(self.detectors) - for i_d, det_key in enumerate(self.detectors): - print("working on detector '%s'..." % det_key) - - # grab panel - panel = self.detectors[det_key] - # native_area = panel.pixel_area # pixel ref area - - # pixel angular coords for the detector panel - ptth, peta = panel.pixel_angles() - - # grab imageseries for this detector - ims = _parse_imgser_dict(imgser_dict, det_key, roi=panel.roi) - - # grab omegas from imageseries and squawk if missing - try: - omegas = ims.metadata['omega'] - except KeyError: - raise RuntimeError( - f"imageseries for '{det_key}' has no omega info" - ) - - # initialize maps and assing by row (omega/frame) - nrows_ome = len(omegas) - - # init map with NaNs - shape = (len(tth_ranges), nrows_ome, ncols_eta) - ring_maps = np.full(shape, np.nan) - - # Generate ring parameters once, and re-use them for each image - ring_params = [] - for tthr in tth_ranges: - kwargs = { - 'tthr': tthr, - 'ptth': ptth, - 'peta': peta, - 'eta_edges': eta_edges, - 'delta_eta': delta_eta, - } - ring_params.append(_generate_ring_params(**kwargs)) - - # Divide up the images among processes - tasks = distribute_tasks(len(ims), self.max_workers) - func = partial( - _run_histograms, - ims=ims, - tth_ranges=tth_ranges, - ring_maps=ring_maps, - ring_params=ring_params, - threshold=threshold, - ) - - max_workers = self.max_workers - if max_workers == 1 or len(tasks) == 1: - # Just execute it serially. - for task in tasks: - func(task) - else: - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Evaluate the results via `list()`, so that if an - # exception is raised in a thread, it will be re-raised - # and visible to the user. - list(executor.map(func, tasks)) - - ring_maps_panel[det_key] = ring_maps - - return ring_maps_panel, eta_edges - - def extract_line_positions( - self, - plane_data, - imgser_dict, - tth_tol=None, - eta_tol=1.0, - npdiv=2, - eta_centers=None, - collapse_eta=True, - collapse_tth=False, - do_interpolation=True, - do_fitting=False, - tth_distortion=None, - fitting_kwargs=None, - ): - """ - Perform annular interpolation on diffraction images. - - Provides data for extracting the line positions from powder diffraction - images, pole figure patches from imageseries, or Bragg peaks from - Laue diffraction images. - - Parameters - ---------- - plane_data : hexrd.crystallography.PlaneData object or array_like - Object determining the 2theta positions for the integration - sectors. If PlaneData, this will be all non-excluded reflections, - subject to merging within PlaneData.tThWidth. If array_like, - interpreted as a list of 2theta angles IN DEGREES. - imgser_dict : dict - Dictionary of powder diffraction images, one for each detector. - tth_tol : scalar, optional - The radial (i.e. 2theta) width of the integration sectors - IN DEGREES. This arg is required if plane_data is array_like. - The default is None. - eta_tol : scalar, optional - The azimuthal (i.e. eta) width of the integration sectors - IN DEGREES. The default is 1. - npdiv : int, optional - The number of oversampling pixel subdivision (see notes). - The default is 2. - eta_centers : array_like, optional - The desired azimuthal sector centers. The default is None. If - None, then bins are distrubted sequentially from (-180, 180). - collapse_eta : bool, optional - Flag for summing sectors in eta. The default is True. - collapse_tth : bool, optional - Flag for summing sectors in 2theta. The default is False. - do_interpolation : bool, optional - If True, perform bilinear interpolation. The default is True. - do_fitting : bool, optional - If True, then perform spectrum fitting, and append the results - to the returned data. collapse_eta must also be True for this - to have any effect. The default is False. - tth_distortion : special class, optional - for special case of pinhole camera distortions. See - hexrd.hedm.xrdutil.phutil.SampleLayerDistortion (only type supported) - fitting_kwargs : dict, optional - kwargs passed to hexrd.core.fitting.utils.fit_ring if do_fitting is True - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - panel_data : dict - Dictionary over the detctors with the following structure: - [list over (merged) 2theta ranges] - [list over valid eta sectors] - [angle data , - bin intensities , - fitting results ] - - Notes - ----- - TODO: May change the array_like input units to degrees. - TODO: rename function. - - """ - - if fitting_kwargs is None: - fitting_kwargs = {} - - # ===================================================================== - # LOOP OVER DETECTORS - # ===================================================================== - logger.info("Interpolating ring data") - pbar_dets = partial( - tqdm, - total=self.num_panels, - desc="Detector", - position=self.num_panels, - ) - - # Split up the workers among the detectors - max_workers_per_detector = max(1, self.max_workers // self.num_panels) - - kwargs = { - 'plane_data': plane_data, - 'tth_tol': tth_tol, - 'eta_tol': eta_tol, - 'eta_centers': eta_centers, - 'npdiv': npdiv, - 'collapse_tth': collapse_tth, - 'collapse_eta': collapse_eta, - 'do_interpolation': do_interpolation, - 'do_fitting': do_fitting, - 'fitting_kwargs': fitting_kwargs, - 'tth_distortion': tth_distortion, - 'max_workers': max_workers_per_detector, - } - func = partial(_extract_detector_line_positions, **kwargs) - - def make_instr_cfg(panel): - return panel.config_dict( - chi=self.chi, - tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - - images = [] - for detector_id, panel in self.detectors.items(): - images.append( - _parse_imgser_dict(imgser_dict, detector_id, roi=panel.roi) - ) - - panels = [self.detectors[k] for k in self.detectors] - instr_cfgs = [make_instr_cfg(x) for x in panels] - pbp_array = np.arange(self.num_panels) - iter_args = zip(panels, instr_cfgs, images, pbp_array) - with ProcessPoolExecutor( - mp_context=constants.mp_context, max_workers=self.num_panels - ) as executor: - results = list(pbar_dets(executor.map(func, iter_args))) - - panel_data = {} - for det, res in zip(self.detectors, results): - panel_data[det] = res - - return panel_data - - def simulate_powder_pattern( - self, mat_list, params=None, bkgmethod=None, origin=None, noise=None - ): - """ - Generate powder diffraction iamges from specified materials. - - Parameters - ---------- - mat_list : array_like (n, ) - List of Material classes. - params : dict, optional - Dictionary of LeBail parameters (see Notes). The default is None. - bkgmethod : dict, optional - Background function specification. The default is None. - origin : array_like (3,), optional - Vector describing the origin of the diffrction volume. - The default is None, wiich is equivalent to [0, 0, 0]. - noise : str, optional - Flag describing type of noise to be applied. The default is None. - - Returns - ------- - img_dict : dict - Dictionary of diffraciton images over the detectors. - - Notes - ----- - TODO: add more controls for noise function. - TODO: modify hooks to LeBail parameters. - TODO: add optional volume fraction weights for phases in mat_list - """ - """ - >> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab, - saransh1@llnl.gov - >> @DATE: 01/22/2021 SS 1.0 original - >> @DETAILS: adding hook to WPPF class. this changes the input list - significantly - """ - if origin is None: - origin = self.tvec - origin = np.asarray(origin).squeeze() - assert len(origin) == 3, "origin must be a 3-element sequence" - - if bkgmethod is None: - bkgmethod = {'chebyshev': 3} - - ''' - if params is none, fill in some sane default values - only the first value is used. the rest of the values are - the upper, lower bounds and vary flag for refinement which - are not used but required for interfacing with WPPF - - zero_error : zero shift error - U, V, W : Cagliotti parameters - P, X, Y : Lorentzian parameters - eta1, eta2, eta3 : Mixing parameters - ''' - if params is None: - # params = {'zero_error': [0.0, -1., 1., True], - # 'U': [2e-1, -1., 1., True], - # 'V': [2e-2, -1., 1., True], - # 'W': [2e-2, -1., 1., True], - # 'X': [2e-1, -1., 1., True], - # 'Y': [2e-1, -1., 1., True] - # } - params = wppfsupport._generate_default_parameters_LeBail( - mat_list, - 1, - bkgmethod, - ) - ''' - use the material list to obtain the dictionary of initial intensities - we need to make sure that the intensities are properly scaled by the - lorentz polarization factor. since the calculation is done in the - LeBail class, all that means is the initial intensity needs that factor - in there - ''' - img_dict = dict.fromkeys(self.detectors) - - # find min and max tth over all panels - tth_mi = np.inf - tth_ma = 0.0 - ptth_dict = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - ptth, peta = panel.pixel_angles(origin=origin) - tth_mi = min(tth_mi, ptth.min()) - tth_ma = max(tth_ma, ptth.max()) - ptth_dict[det_key] = ptth - - ''' - now make a list of two theta and dummy ones for the experimental - spectrum this is never really used so any values should be okay. We - could also pas the integrated detector image if we would like to - simulate some realistic background. But thats for another day. - ''' - # convert angles to degrees because thats what the WPPF expects - tth_mi = np.degrees(tth_mi) - tth_ma = np.degrees(tth_ma) - - # get tth angular resolution for instrument - ang_res = max_resolution(self) - - # !!! calc nsteps by oversampling - nsteps = int(np.ceil(2 * (tth_ma - tth_mi) / np.degrees(ang_res[0]))) - - # evaulation vector for LeBail - tth = np.linspace(tth_mi, tth_ma, nsteps) - - expt = np.vstack([tth, np.ones_like(tth)]).T - - wavelength = [ - valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), - 1.0, - ] - - ''' - now go through the material list and get the intensity dictionary - ''' - intensity = {} - for mat in mat_list: - - multiplicity = mat.planeData.getMultiplicity() - - tth = mat.planeData.getTTh() - - LP = ( - (1 + np.cos(tth) ** 2) - / np.cos(0.5 * tth) - / np.sin(0.5 * tth) ** 2 - ) - - intensity[mat.name] = {} - intensity[mat.name]['synchrotron'] = ( - mat.planeData.structFact * LP * multiplicity - ) - - kwargs = { - 'expt_spectrum': expt, - 'params': params, - 'phases': mat_list, - 'wavelength': {'synchrotron': wavelength}, - 'bkgmethod': bkgmethod, - 'intensity_init': intensity, - 'peakshape': 'pvtch', - } - - self.WPPFclass = LeBail(**kwargs) - - self.simulated_spectrum = self.WPPFclass.spectrum_sim - self.background = self.WPPFclass.background - - ''' - now that we have the simulated intensities, its time to get the - two theta for the detector pixels and interpolate what the intensity - for each pixel should be - ''' - - img_dict = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - ptth = ptth_dict[det_key] - - img = np.interp( - np.degrees(ptth), - self.simulated_spectrum.x, - self.simulated_spectrum.y + self.background.y, - ) - - if noise is None: - img_dict[det_key] = img - - else: - # Rescale to be between 0 and 1 so random_noise() will work - prev_max = img.max() - img /= prev_max - - if noise.lower() == 'poisson': - im_noise = random_noise(img, mode='poisson', clip=True) - mi = im_noise.min() - ma = im_noise.max() - if ma > mi: - im_noise = (im_noise - mi) / (ma - mi) - - elif noise.lower() == 'gaussian': - im_noise = random_noise(img, mode='gaussian', clip=True) - - elif noise.lower() == 'salt': - im_noise = random_noise(img, mode='salt') - - elif noise.lower() == 'pepper': - im_noise = random_noise(img, mode='pepper') - - elif noise.lower() == 's&p': - im_noise = random_noise(img, mode='s&p') - - elif noise.lower() == 'speckle': - im_noise = random_noise(img, mode='speckle', clip=True) - - # Now scale back up - img_dict[det_key] = im_noise * prev_max - - return img_dict - - def simulate_laue_pattern( - self, - crystal_data, - minEnergy=5.0, - maxEnergy=35.0, - rmat_s=None, - grain_params=None, - ): - """ - Simulate Laue diffraction over the instrument. - - Parameters - ---------- - crystal_data : TYPE - DESCRIPTION. - minEnergy : TYPE, optional - DESCRIPTION. The default is 5.. - maxEnergy : TYPE, optional - DESCRIPTION. The default is 35.. - rmat_s : TYPE, optional - DESCRIPTION. The default is None. - grain_params : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - results : TYPE - DESCRIPTION. - - xy_det, hkls_in, angles, dspacing, energy - - TODO: revisit output; dict, or concatenated list? - """ - results = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - results[det_key] = panel.simulate_laue_pattern( - crystal_data, - minEnergy=minEnergy, - maxEnergy=maxEnergy, - rmat_s=rmat_s, - tvec_s=self.tvec, - grain_params=grain_params, - beam_vec=self.beam_vector, - ) - return results - - def simulate_rotation_series( - self, - plane_data, - grain_param_list, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_ranges=[ - (-np.pi, np.pi), - ], - ome_period=(-np.pi, np.pi), - wavelength=None, - ): - """ - Simulate a monochromatic rotation series over the instrument. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_param_list : TYPE - DESCRIPTION. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - wavelength : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - results : TYPE - DESCRIPTION. - - TODO: revisit output; dict, or concatenated list? - """ - results = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - results[det_key] = panel.simulate_rotation_series( - plane_data, - grain_param_list, - eta_ranges=eta_ranges, - ome_ranges=ome_ranges, - ome_period=ome_period, - chi=self.chi, - tVec_s=self.tvec, - wavelength=wavelength, - ) - return results - - def pull_spots( - self, - plane_data, - grain_params, - imgser_dict, - tth_tol=0.25, - eta_tol=1.0, - ome_tol=1.0, - npdiv=2, - threshold=10, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_period=None, - dirname='results', - filename=None, - output_format='text', - return_spot_list=False, - quiet=True, - check_only=False, - interp='nearest', - ): - """ - Exctract reflection info from a rotation series. - - Input must be encoded as an OmegaImageseries object. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_params : TYPE - DESCRIPTION. - imgser_dict : TYPE - DESCRIPTION. - tth_tol : TYPE, optional - DESCRIPTION. The default is 0.25. - eta_tol : TYPE, optional - DESCRIPTION. The default is 1.. - ome_tol : TYPE, optional - DESCRIPTION. The default is 1.. - npdiv : TYPE, optional - DESCRIPTION. The default is 2. - threshold : TYPE, optional - DESCRIPTION. The default is 10. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - dirname : TYPE, optional - DESCRIPTION. The default is 'results'. - filename : TYPE, optional - DESCRIPTION. The default is None. - output_format : TYPE, optional - DESCRIPTION. The default is 'text'. - return_spot_list : TYPE, optional - DESCRIPTION. The default is False. - quiet : TYPE, optional - DESCRIPTION. The default is True. - check_only : TYPE, optional - DESCRIPTION. The default is False. - interp : TYPE, optional - DESCRIPTION. The default is 'nearest'. - - Returns - ------- - compl : TYPE - DESCRIPTION. - output : TYPE - DESCRIPTION. - - """ - # grain parameters - rMat_c = make_rmat_of_expmap(grain_params[:3]) - tVec_c = grain_params[3:6] - - # grab omega ranges from first imageseries - # - # WARNING: all imageseries AND all wedges within are assumed to have - # the same omega values; put in a check that they are all the same??? - oims0 = next(iter(imgser_dict.values())) - ome_ranges = [ - np.radians([i['ostart'], i['ostop']]) - for i in oims0.omegawedges.wedges - ] - if ome_period is None: - ims = next(iter(imgser_dict.values())) - ostart = ims.omega[0, 0] - ome_period = np.radians(ostart + np.r_[0.0, 360.0]) - - # delta omega in DEGREES grabbed from first imageseries in the dict - delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] - - # make omega grid for frame expansion around reference frame - # in DEGREES - ndiv_ome, ome_del = make_tolerance_grid( - delta_ome, - ome_tol, - 1, - adjust_window=True, - ) - - # generate structuring element for connected component labeling - if ndiv_ome == 1: - label_struct = ndimage.generate_binary_structure(2, 2) - else: - label_struct = ndimage.generate_binary_structure(3, 3) - - # simulate rotation series - sim_results = self.simulate_rotation_series( - plane_data, - [ - grain_params, - ], - eta_ranges=eta_ranges, - ome_ranges=ome_ranges, - ome_period=ome_period, - ) - - # patch vertex generator (global for instrument) - tol_vec = 0.5 * np.radians( - [ - -tth_tol, - -eta_tol, - -tth_tol, - eta_tol, - tth_tol, - eta_tol, - tth_tol, - -eta_tol, - ] - ) - - # prepare output if requested - if filename is not None and output_format.lower() == 'hdf5': - this_filename = os.path.join(dirname, filename) - writer = GrainDataWriter_h5( - os.path.join(dirname, filename), - self.write_config(), - grain_params, - ) - - # ===================================================================== - # LOOP OVER PANELS - # ===================================================================== - iRefl = 0 - next_invalid_peak_id = -100 - compl = [] - output = dict.fromkeys(self.detectors) - for detector_id, panel in self.detectors.items(): - # initialize text-based output writer - if filename is not None and output_format.lower() == 'text': - output_dir = os.path.join(dirname, detector_id) - os.makedirs(output_dir, exist_ok=True) - this_filename = os.path.join(output_dir, filename) - writer = PatchDataWriter(this_filename) - - # grab panel - instr_cfg = panel.config_dict( - self.chi, - self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - native_area = panel.pixel_area # pixel ref area - - # pull out the OmegaImageSeries for this panel from input dict - ome_imgser = _parse_imgser_dict( - imgser_dict, detector_id, roi=panel.roi - ) - - # extract simulation results - sim_results_p = sim_results[detector_id] - hkl_ids = sim_results_p[0][0] - hkls_p = sim_results_p[1][0] - ang_centers = sim_results_p[2][0] - xy_centers = sim_results_p[3][0] - ang_pixel_size = sim_results_p[4][0] - - # now verify that full patch falls on detector... - # ???: strictly necessary? - # - # patch vertex array from sim - nangs = len(ang_centers) - patch_vertices = ( - np.tile(ang_centers[:, :2], (1, 4)) - + np.tile(tol_vec, (nangs, 1)) - ).reshape(4 * nangs, 2) - ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape( - len(patch_vertices), 1 - ) - - # find vertices that all fall on the panel - det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( - np.hstack([patch_vertices, ome_dupl]), - panel.rmat, - rMat_c, - self.chi, - panel.tvec, - tVec_c, - self.tvec, - panel.distortion, - ) - _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) - - # all vertices must be on... - patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) - patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] - - # re-filter... - hkl_ids = hkl_ids[patch_is_on] - hkls_p = hkls_p[patch_is_on, :] - ang_centers = ang_centers[patch_is_on, :] - xy_centers = xy_centers[patch_is_on, :] - ang_pixel_size = ang_pixel_size[patch_is_on, :] - - # TODO: add polygon testing right here! - # done - if check_only: - patch_output = [] - for i_pt, angs in enumerate(ang_centers): - # the evaluation omegas; - # expand about the central value using tol vector - ome_eval = np.degrees(angs[2]) + ome_del - - # ...vectorize the omega_to_frame function to avoid loop? - frame_indices = [ - ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval - ] - if -1 in frame_indices: - if not quiet: - msg = """ - window for (%d %d %d) falls outside omega range - """ % tuple( - hkls_p[i_pt, :] - ) - print(msg) - continue - else: - these_vertices = patch_xys[i_pt] - ijs = panel.cartToPixel(these_vertices) - ii, jj = polygon(ijs[:, 0], ijs[:, 1]) - contains_signal = False - for i_frame in frame_indices: - contains_signal = contains_signal or np.any( - ome_imgser[i_frame][ii, jj] > threshold - ) - compl.append(contains_signal) - patch_output.append((ii, jj, frame_indices)) - else: - # make the tth,eta patches for interpolation - patches = xrdutil.make_reflection_patches( - instr_cfg, - ang_centers[:, :2], - ang_pixel_size, - omega=ang_centers[:, 2], - tth_tol=tth_tol, - eta_tol=eta_tol, - rmat_c=rMat_c, - tvec_c=tVec_c, - npdiv=npdiv, - quiet=True, - ) - - # GRAND LOOP over reflections for this panel - patch_output = [] - for i_pt, patch in enumerate(patches): - - # strip relevant objects out of current patch - vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch - - prows, pcols = areas.shape - nrm_fac = areas / float(native_area) - nrm_fac = nrm_fac / np.min(nrm_fac) - - # grab hkl info - hkl = hkls_p[i_pt, :] - hkl_id = hkl_ids[i_pt] - - # edge arrays - tth_edges = vtx_angs[0][0, :] - delta_tth = tth_edges[1] - tth_edges[0] - eta_edges = vtx_angs[1][:, 0] - delta_eta = eta_edges[1] - eta_edges[0] - - # need to reshape eval pts for interpolation - xy_eval = np.vstack( - [xy_eval[0].flatten(), xy_eval[1].flatten()] - ).T - - # the evaluation omegas; - # expand about the central value using tol vector - ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del - - # ???: vectorize the omega_to_frame function to avoid loop? - frame_indices = [ - ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval - ] - - if -1 in frame_indices: - if not quiet: - msg = """ - window for (%d%d%d) falls outside omega range - """ % tuple( - hkl - ) - print(msg) - continue - else: - # initialize spot data parameters - # !!! maybe change these to nan to not fuck up writer - peak_id = next_invalid_peak_id - sum_int = np.nan - max_int = np.nan - meas_angs = np.nan * np.ones(3) - meas_xy = np.nan * np.ones(2) - - # quick check for intensity - contains_signal = False - patch_data_raw = [] - for i_frame in frame_indices: - tmp = ome_imgser[i_frame][ijs[0], ijs[1]] - contains_signal = contains_signal or np.any( - tmp > threshold - ) - patch_data_raw.append(tmp) - patch_data_raw = np.stack(patch_data_raw, axis=0) - compl.append(contains_signal) - - if contains_signal: - # initialize patch data array for intensities - if interp.lower() == 'bilinear': - patch_data = np.zeros( - (len(frame_indices), prows, pcols) - ) - for i, i_frame in enumerate(frame_indices): - patch_data[i] = panel.interpolate_bilinear( - xy_eval, - ome_imgser[i_frame], - pad_with_nans=False, - ).reshape( - prows, pcols - ) # * nrm_fac - elif interp.lower() == 'nearest': - patch_data = patch_data_raw # * nrm_fac - else: - msg = ( - "interpolation option " - + "'%s' not understood" - ) - raise RuntimeError(msg % interp) - - # now have interpolated patch data... - labels, num_peaks = ndimage.label( - patch_data > threshold, structure=label_struct - ) - slabels = np.arange(1, num_peaks + 1) - - if num_peaks > 0: - peak_id = iRefl - props = regionprops(labels, patch_data) - coms = np.vstack( - [x.weighted_centroid for x in props] - ) - if num_peaks > 1: - center = np.r_[patch_data.shape] * 0.5 - center_t = np.tile(center, (num_peaks, 1)) - com_diff = coms - center_t - closest_peak_idx = np.argmin( - np.sum(com_diff**2, axis=1) - ) - else: - closest_peak_idx = 0 - coms = coms[closest_peak_idx] - # meas_omes = \ - # ome_edges[0] + (0.5 + coms[0])*delta_ome - meas_omes = ome_eval[0] + coms[0] * delta_ome - meas_angs = np.hstack( - [ - tth_edges[0] - + (0.5 + coms[2]) * delta_tth, - eta_edges[0] - + (0.5 + coms[1]) * delta_eta, - mapAngle( - np.radians(meas_omes), ome_period - ), - ] - ) - - # intensities - # - summed is 'integrated' over interpolated - # data - # - max is max of raw input data - sum_int = np.sum( - patch_data[ - labels == slabels[closest_peak_idx] - ] - ) - max_int = np.max( - patch_data_raw[ - labels == slabels[closest_peak_idx] - ] - ) - # ???: Should this only use labeled pixels? - # Those are segmented from interpolated data, - # not raw; likely ok in most cases. - - # need MEASURED xy coords - # FIXME: overload angles_to_cart? - gvec_c = angles_to_gvec( - meas_angs, - chi=self.chi, - rmat_c=rMat_c, - beam_vec=self.beam_vector, - ) - rMat_s = make_sample_rmat( - self.chi, meas_angs[2] - ) - meas_xy = gvec_to_xy( - gvec_c, - panel.rmat, - rMat_s, - rMat_c, - panel.tvec, - self.tvec, - tVec_c, - beam_vec=self.beam_vector, - ) - if panel.distortion is not None: - meas_xy = panel.distortion.apply_inverse( - np.atleast_2d(meas_xy) - ).flatten() - # FIXME: why is this suddenly necessary??? - meas_xy = meas_xy.squeeze() - else: - patch_data = patch_data_raw - - if peak_id < 0: - # The peak is invalid. - # Decrement the next invalid peak ID. - next_invalid_peak_id -= 1 - - # write output - if filename is not None: - if output_format.lower() == 'text': - writer.dump_patch( - peak_id, - hkl_id, - hkl, - sum_int, - max_int, - ang_centers[i_pt], - meas_angs, - xy_centers[i_pt], - meas_xy, - ) - elif output_format.lower() == 'hdf5': - xyc_arr = xy_eval.reshape( - prows, pcols, 2 - ).transpose(2, 0, 1) - writer.dump_patch( - detector_id, - iRefl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - np.radians(ome_eval), - xyc_arr, - ijs, - frame_indices, - patch_data, - ang_centers[i_pt], - xy_centers[i_pt], - meas_angs, - meas_xy, - ) - - if return_spot_list: - # Full output - xyc_arr = xy_eval.reshape( - prows, pcols, 2 - ).transpose(2, 0, 1) - _patch_output = [ - detector_id, - iRefl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - np.radians(ome_eval), - xyc_arr, - ijs, - frame_indices, - patch_data, - ang_centers[i_pt], - xy_centers[i_pt], - meas_angs, - meas_xy, - ] - else: - # Trimmed output - _patch_output = [ - peak_id, - hkl_id, - hkl, - sum_int, - max_int, - ang_centers[i_pt], - meas_angs, - meas_xy, - ] - patch_output.append(_patch_output) - iRefl += 1 - output[detector_id] = patch_output - if filename is not None and output_format.lower() == 'text': - writer.close() - if filename is not None and output_format.lower() == 'hdf5': - writer.close() - return compl, output - - def update_memoization_sizes(self): - # Resize all known memoization functions to have a cache at least - # the size of the number of detectors. - all_panels = list(self.detectors.values()) - PlanarDetector.update_memoization_sizes(all_panels) - CylindricalDetector.update_memoization_sizes(all_panels) - - def calc_transmission( - self, rMat_s: np.ndarray = None - ) -> dict[str, np.ndarray]: - """calculate the transmission from the - filter and polymer coating. the inverse of this - number is the intensity correction that needs - to be applied. actual computation is done inside - the detector class - """ - if rMat_s is None: - rMat_s = ct.identity_3x3 - - energy = self.beam_energy - transmissions = {} - for det_name, det in self.detectors.items(): - transmission_filter, transmission_phosphor = ( - det.calc_filter_coating_transmission(energy) - ) - - transmission = transmission_filter * transmission_phosphor - - if self.physics_package is not None: - transmission_physics_package = ( - det.calc_physics_package_transmission( - energy, rMat_s, self.physics_package - ) - ) - effective_pinhole_area = det.calc_effective_pinhole_area( - self.physics_package - ) - - transmission = ( - transmission - * transmission_physics_package - * effective_pinhole_area - ) - - transmissions[det_name] = transmission - return transmissions - - -# ============================================================================= -# UTILITIES -# ============================================================================= - - -class PatchDataWriter(object): - """Class for dumping Bragg reflection data.""" - - def __init__(self, filename): - self._delim = ' ' - # fmt: off - header_items = ( - '# ID', 'PID', - 'H', 'K', 'L', - 'sum(int)', 'max(int)', - 'pred tth', 'pred eta', 'pred ome', - 'meas tth', 'meas eta', 'meas ome', - 'pred X', 'pred Y', - 'meas X', 'meas Y' - ) - self._header = self._delim.join([ - self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]), - self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), - self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) - ]) - # fmt: on - if isinstance(filename, IOBase): - self.fid = filename - else: - self.fid = open(filename, 'w') - print(self._header, file=self.fid) - - def __del__(self): - self.close() - - def close(self): - self.fid.close() - - def dump_patch( - self, peak_id, hkl_id, hkl, spot_int, max_int, pangs, mangs, pxy, mxy - ): - """ - !!! maybe need to check that last four inputs are arrays - """ - if mangs is None: - spot_int = np.nan - max_int = np.nan - mangs = np.nan * np.ones(3) - mxy = np.nan * np.ones(2) - - res = ( - [int(peak_id), int(hkl_id)] - + np.array(hkl, dtype=int).tolist() - + [spot_int, max_int] - + pangs.tolist() - + mangs.tolist() - + pxy.tolist() - + mxy.tolist() - ) - - output_str = self._delim.join( - [ - self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), - self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), - self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:]), - ] - ) - print(output_str, file=self.fid) - return output_str - - -class GrainDataWriter(object): - """Class for dumping grain data.""" - - def __init__(self, filename=None, array=None): - """Writes to either file or np array - - Array must be initialized with number of rows to be written. - """ - if filename is None and array is None: - raise RuntimeError( - 'GrainDataWriter must be specified with filename or array' - ) - - self.array = None - self.fid = None - - # array supersedes filename - if array is not None: - assert ( - array.shape[1] == 21 - ), f'grain data table must have 21 columns not {array.shape[21]}' - self.array = array - self._array_row = 0 - return - - self._delim = ' ' - # fmt: off - header_items = ( - '# grain ID', 'completeness', 'chi^2', - 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', - 't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]', - 'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]', - 'inv(V_s)[1,2]*sqrt(2)', - 'inv(V_s)[0,2]*sqrt(2)', - 'inv(V_s)[0,1]*sqrt(2)', - 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]', - 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]' - ) - # fmt: on - self._header = self._delim.join( - [ - self._delim.join(np.tile('{:<12}', 3)).format( - *header_items[:3] - ), - self._delim.join( - np.tile('{:<23}', len(header_items) - 3) - ).format(*header_items[3:]), - ] - ) - # fmt: on - if isinstance(filename, IOBase): - self.fid = filename - else: - self.fid = open(filename, 'w') - print(self._header, file=self.fid) - - def __del__(self): - self.close() - - def close(self): - if self.fid is not None: - self.fid.close() - - def dump_grain(self, grain_id, completeness, chisq, grain_params): - assert ( - len(grain_params) == 12 - ), "len(grain_params) must be 12, not %d" % len(grain_params) - - # extract strain - emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) - evec = mutil.symmToVecMV(emat, scale=False) - - res = ( - [int(grain_id), completeness, chisq] - + grain_params.tolist() - + evec.tolist() - ) - - if self.array is not None: - row = self._array_row - assert ( - row < self.array.shape[0] - ), f'invalid row {row} in array table' - self.array[row] = res - self._array_row += 1 - return res - - # (else) format and write to file - output_str = self._delim.join( - [ - self._delim.join(['{:<12d}', '{:<12f}', '{:<12e}']).format( - *res[:3] - ), - self._delim.join(np.tile('{:<23.16e}', len(res) - 3)).format( - *res[3:] - ), - ] - ) - print(output_str, file=self.fid) - return output_str - - -class GrainDataWriter_h5(object): - """Class for dumping grain results to an HDF5 archive. - - TODO: add material spec - """ - - def __init__(self, filename, instr_cfg, grain_params, use_attr=False): - if isinstance(filename, h5py.File): - self.fid = filename - else: - self.fid = h5py.File(filename + ".hdf5", "w") - icfg = dict(instr_cfg) - - # add instrument groups and attributes - self.instr_grp = self.fid.create_group('instrument') - unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr) - - # add grain group - self.grain_grp = self.fid.create_group('grain') - rmat_c = make_rmat_of_expmap(grain_params[:3]) - tvec_c = np.array(grain_params[3:6]).flatten() - vinv_s = np.array(grain_params[6:]).flatten() - vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) - - if use_attr: # attribute version - self.grain_grp.attrs.create('rmat_c', rmat_c) - self.grain_grp.attrs.create('tvec_c', tvec_c) - self.grain_grp.attrs.create('inv(V)_s', vinv_s) - self.grain_grp.attrs.create('vmat_s', vmat_s) - else: # dataset version - self.grain_grp.create_dataset('rmat_c', data=rmat_c) - self.grain_grp.create_dataset('tvec_c', data=tvec_c) - self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) - self.grain_grp.create_dataset('vmat_s', data=vmat_s) - - data_key = 'reflection_data' - self.data_grp = self.fid.create_group(data_key) - - for det_key in self.instr_grp['detectors'].keys(): - self.data_grp.create_group(det_key) - - # FIXME: throws exception when called after close method - # def __del__(self): - # self.close() - - def close(self): - self.fid.close() - - def dump_patch( - self, - panel_id, - i_refl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - ome_centers, - xy_centers, - ijs, - frame_indices, - spot_data, - pangs, - pxy, - mangs, - mxy, - gzip=1, - ): - """ - to be called inside loop over patches - - default GZIP level for data arrays is 1 - """ - fi = np.array(frame_indices, dtype=int) - - panel_grp = self.data_grp[panel_id] - spot_grp = panel_grp.create_group("spot_%05d" % i_refl) - spot_grp.attrs.create('peak_id', int(peak_id)) - spot_grp.attrs.create('hkl_id', int(hkl_id)) - spot_grp.attrs.create('hkl', np.array(hkl, dtype=int)) - spot_grp.attrs.create('predicted_angles', pangs) - spot_grp.attrs.create('predicted_xy', pxy) - if mangs is None: - mangs = np.nan * np.ones(3) - spot_grp.attrs.create('measured_angles', mangs) - if mxy is None: - mxy = np.nan * np.ones(3) - spot_grp.attrs.create('measured_xy', mxy) - - # get centers crds from edge arrays - # FIXME: export full coordinate arrays, or just center vectors??? - # - # ome_crd, eta_crd, tth_crd = np.meshgrid( - # ome_centers, - # centers_of_edge_vec(eta_edges), - # centers_of_edge_vec(tth_edges), - # indexing='ij') - # - # ome_dim, eta_dim, tth_dim = spot_data.shape - - # !!! for now just exporting center vectors for spot_data - tth_crd = centers_of_edge_vec(tth_edges) - eta_crd = centers_of_edge_vec(eta_edges) - - shuffle_data = True # reduces size by 20% - spot_grp.create_dataset( - 'tth_crd', - data=tth_crd, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'eta_crd', - data=eta_crd, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'ome_crd', - data=ome_centers, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'xy_centers', - data=xy_centers, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'ij_centers', - data=ijs, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'frame_indices', - data=fi, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'intensities', - data=spot_data, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - return - - -class GenerateEtaOmeMaps(object): - """ - eta-ome map class derived from new image_series and YAML config - - ...for now... - - must provide: - - self.dataStore - self.planeData - self.iHKLList - self.etaEdges # IN RADIANS - self.omeEdges # IN RADIANS - self.etas # IN RADIANS - self.omegas # IN RADIANS - - """ - - def __init__( - self, - image_series_dict, - instrument, - plane_data, - active_hkls=None, - eta_step=0.25, - threshold=None, - ome_period=(0, 360), - ): - """ - image_series must be OmegaImageSeries class - instrument_params must be a dict (loaded from yaml spec) - active_hkls must be a list (required for now) - - FIXME: get rid of omega period; should get it from imageseries - """ - - self._planeData = plane_data - - # ???: change name of iHKLList? - # ???: can we change the behavior of iHKLList? - if active_hkls is None: - self._iHKLList = plane_data.getHKLID(plane_data.hkls, master=True) - n_rings = len(self._iHKLList) - else: - assert hasattr( - active_hkls, '__len__' - ), "active_hkls must be an iterable with __len__" - self._iHKLList = active_hkls - n_rings = len(active_hkls) - - # grab a det key and corresponding imageseries (first will do) - # !!! assuming that the imageseries for all panels - # have the same length and omegas - det_key, this_det_ims = next(iter(image_series_dict.items())) - - # handle omegas - # !!! for multi wedge, enforncing monotonicity - # !!! wedges also cannot overlap or span more than 360 - omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES - delta_ome = omegas_array[0][-1] - omegas_array[0][0] - frame_mask = None - ome_period = omegas_array[0, 0] + np.r_[0.0, 360.0] # !!! be careful - if this_det_ims.omegawedges.nwedges > 1: - delta_omes = [ - (i['ostop'] - i['ostart']) / i['nsteps'] - for i in this_det_ims.omegawedges.wedges - ] - check_wedges = mutil.uniqueVectors( - np.atleast_2d(delta_omes), tol=1e-6 - ).squeeze() - assert ( - check_wedges.size == 1 - ), "all wedges must have the same delta omega to 1e-6" - # grab representative delta ome - # !!! assuming positive delta consistent with OmegaImageSeries - delta_ome = delta_omes[0] - - # grab full-range start/stop - # !!! be sure to map to the same period to enable arithmatic - # ??? safer to do this way rather than just pulling from - # the omegas attribute? - owedges = this_det_ims.omegawedges.wedges - ostart = owedges[0]['ostart'] # !!! DEGREES - ostop = float( - mapAngle(owedges[-1]['ostop'], ome_period, units='degrees') - ) - # compute total nsteps - # FIXME: need check for roundoff badness - nsteps = int((ostop - ostart) / delta_ome) - ome_edges_full = np.linspace( - ostart, ostop, num=nsteps + 1, endpoint=True - ) - omegas_array = np.vstack( - [ome_edges_full[:-1], ome_edges_full[1:]] - ).T - ome_centers = np.average(omegas_array, axis=1) - - # use OmegaImageSeries method to determine which bins have data - # !!! this array has -1 outside a wedge - # !!! again assuming the valid frame order increases monotonically - frame_mask = np.array( - [ - this_det_ims.omega_to_frame(ome)[0] != -1 - for ome in ome_centers - ] - ) - - # ???: need to pass a threshold? - eta_mapping, etas = instrument.extract_polar_maps( - plane_data, - image_series_dict, - active_hkls=active_hkls, - threshold=threshold, - tth_tol=None, - eta_tol=eta_step, - ) - - # for convenience grab map shape from first - map_shape = next(iter(eta_mapping.values())).shape[1:] - - # pack all detectors with masking - # FIXME: add omega masking - data_store = [] - for i_ring in range(n_rings): - # first handle etas - full_map = np.zeros(map_shape, dtype=float) - nan_mask_full = np.zeros( - (len(eta_mapping), map_shape[0], map_shape[1]) - ) - i_p = 0 - for det_key, eta_map in eta_mapping.items(): - nan_mask = ~np.isnan(eta_map[i_ring]) - nan_mask_full[i_p] = nan_mask - full_map[nan_mask] += eta_map[i_ring][nan_mask] - i_p += 1 - re_nan_these = np.sum(nan_mask_full, axis=0) == 0 - full_map[re_nan_these] = np.nan - - # now omegas - if frame_mask is not None: - # !!! must expand row dimension to include - # skipped omegas - tmp = np.ones((len(frame_mask), map_shape[1])) * np.nan - tmp[frame_mask, :] = full_map - full_map = tmp - data_store.append(full_map) - self._dataStore = data_store - - # set required attributes - self._omegas = mapAngle( - np.radians(np.average(omegas_array, axis=1)), - np.radians(ome_period), - ) - self._omeEdges = mapAngle( - np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), - np.radians(ome_period), - ) - - # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the - # indexer to work properly - if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf: - # !!! SIGNED delta ome - del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0]) - self._omeEdges[-1] = self._omeEdges[-2] + del_ome - - # handle etas - # WARNING: unlinke the omegas in imageseries metadata, - # these are in RADIANS and represent bin centers - self._etaEdges = etas - self._etas = self._etaEdges[:-1] + 0.5 * np.radians(eta_step) - - @property - def dataStore(self): - return self._dataStore - - @property - def planeData(self): - return self._planeData - - @property - def iHKLList(self): - return np.atleast_1d(self._iHKLList).flatten() - - @property - def etaEdges(self): - return self._etaEdges - - @property - def omeEdges(self): - return self._omeEdges - - @property - def etas(self): - return self._etas - - @property - def omegas(self): - return self._omegas - - def save(self, filename): - xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename) - - -def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): - # mark pixels in the spec'd tth range - pixels_in_tthr = np.logical_and(ptth >= tthr[0], ptth <= tthr[1]) - - # catch case where ring isn't on detector - if not np.any(pixels_in_tthr): - return None - - pixel_ids = np.where(pixels_in_tthr) - - # grab relevant eta coords using histogram - pixel_etas = peta[pixel_ids] - reta_hist = histogram(pixel_etas, eta_edges) - bins_on_detector = np.where(reta_hist)[0] - - return pixel_etas, eta_edges, pixel_ids, bins_on_detector - - -def run_fast_histogram(x, bins, weights=None): - return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), weights=weights) - - -def run_numpy_histogram(x, bins, weights=None): - return histogram1d(x, bins=bins, weights=weights)[0] - - -histogram = run_fast_histogram if fast_histogram else run_numpy_histogram - - -def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): - for i_row in range(*rows): - image = ims[i_row] - - # handle threshold if specified - if threshold is not None: - # !!! NaNs get preserved - image = np.array(image) - image[image < threshold] = 0.0 - - for i_r, tthr in enumerate(tth_ranges): - this_map = ring_maps[i_r] - params = ring_params[i_r] - if not params: - # We are supposed to skip this ring... - continue - - # Unpack the params - pixel_etas, eta_edges, pixel_ids, bins_on_detector = params - result = histogram(pixel_etas, eta_edges, weights=image[pixel_ids]) - - # Note that this preserves nan values for bins not on the detector. - this_map[i_row, bins_on_detector] = result[bins_on_detector] - - -def _extract_detector_line_positions( - iter_args, - plane_data, - tth_tol, - eta_tol, - eta_centers, - npdiv, - collapse_tth, - collapse_eta, - do_interpolation, - do_fitting, - fitting_kwargs, - tth_distortion, - max_workers, -): - panel, instr_cfg, images, pbp = iter_args - - if images.ndim == 2: - images = np.tile(images, (1, 1, 1)) - elif images.ndim != 3: - raise RuntimeError("images must be 2- or 3-d") - - # make rings - # !!! adding tth_distortion pass-through; comes in as dict over panels - tth_distr_cls = None - if tth_distortion is not None: - tth_distr_cls = tth_distortion[panel.name] - - pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( - plane_data, - merge_hkls=True, - delta_tth=tth_tol, - delta_eta=eta_tol, - eta_list=eta_centers, - tth_distortion=tth_distr_cls, - ) - - tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) - - # !!! this is only needed if doing fitting - if isinstance(plane_data, PlaneData): - tth_idx, tth_ranges = plane_data.getMergedRanges(cullDupl=True) - tth_ref = plane_data.getTTh() - tth0 = [np.degrees(tth_ref[i]) for i in tth_idx] - else: - tth0 = plane_data - - # ================================================================= - # LOOP OVER RING SETS - # ================================================================= - pbar_rings = partial( - tqdm, total=len(pow_angs), desc="Ringset", position=pbp - ) - - kwargs = { - 'instr_cfg': instr_cfg, - 'panel': panel, - 'eta_tol': eta_tol, - 'npdiv': npdiv, - 'collapse_tth': collapse_tth, - 'collapse_eta': collapse_eta, - 'images': images, - 'do_interpolation': do_interpolation, - 'do_fitting': do_fitting, - 'fitting_kwargs': fitting_kwargs, - 'tth_distortion': tth_distr_cls, - } - func = partial(_extract_ring_line_positions, **kwargs) - iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) - with ProcessPoolExecutor( - mp_context=constants.mp_context, max_workers=max_workers - ) as executor: - return list(pbar_rings(executor.map(func, iter_arg))) - - -def _extract_ring_line_positions( - iter_args, - instr_cfg, - panel, - eta_tol, - npdiv, - collapse_tth, - collapse_eta, - images, - do_interpolation, - do_fitting, - fitting_kwargs, - tth_distortion, -): - """ - Extracts data for a single Debye-Scherrer ring . - - Parameters - ---------- - iter_args : tuple - (angs [radians], - xys [mm], - tth_tol [deg], - this_tth0 [deg]) - instr_cfg : TYPE - DESCRIPTION. - panel : TYPE - DESCRIPTION. - eta_tol : TYPE - DESCRIPTION. - npdiv : TYPE - DESCRIPTION. - collapse_tth : TYPE - DESCRIPTION. - collapse_eta : TYPE - DESCRIPTION. - images : TYPE - DESCRIPTION. - do_interpolation : TYPE - DESCRIPTION. - do_fitting : TYPE - DESCRIPTION. - fitting_kwargs : TYPE - DESCRIPTION. - tth_distortion : TYPE - DESCRIPTION. - - Yields - ------ - patch_data : TYPE - DESCRIPTION. - - """ - # points are already checked to fall on detector - angs, xys, tth_tol, this_tth0 = iter_args - - # SS 01/31/25 noticed some nans in xys even after clipping - # going to do another round of masking to get rid of those - nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) - nan_mask = np.logical_or.reduce(nan_mask, 1) - if angs.ndim > 1 and xys.ndim > 1: - angs = angs[nan_mask, :] - xys = xys[nan_mask, :] - - n_images = len(images) - native_area = panel.pixel_area - - # make the tth,eta patches for interpolation - patches = xrdutil.make_reflection_patches( - instr_cfg, - angs, - panel.angularPixelSize(xys), - tth_tol=tth_tol, - eta_tol=eta_tol, - npdiv=npdiv, - quiet=True, - ) - - # loop over patches - # FIXME: fix initialization - if collapse_tth: - patch_data = np.zeros((len(angs), n_images)) - else: - patch_data = [] - for i_p, patch in enumerate(patches): - # strip relevant objects out of current patch - vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch - - # need to reshape eval pts for interpolation - xy_eval = np.vstack([xys_eval[0].flatten(), xys_eval[1].flatten()]).T - - _, on_panel = panel.clip_to_panel(xy_eval) - - if np.any(~on_panel): - continue - - if collapse_tth: - ang_data = (vtx_angs[0][0, [0, -1]], vtx_angs[1][[0, -1], 0]) - elif collapse_eta: - # !!! yield the tth bin centers - tth_centers = np.average( - np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]), axis=0 - ) - ang_data = (tth_centers, angs[i_p][-1]) - if do_fitting: - fit_data = [] - else: - ang_data = vtx_angs - - prows, pcols = areas.shape - area_fac = areas / float(native_area) - - # interpolate - if not collapse_tth: - ims_data = [] - for j_p in np.arange(len(images)): - # catch interpolation type - image = images[j_p] - if do_interpolation: - p_img = ( - panel.interpolate_bilinear( - xy_eval, - image, - ).reshape(prows, pcols) - * area_fac - ) - else: - p_img = image[ijs[0], ijs[1]] * area_fac - - # catch flat spectrum data, which will cause - # fitting to fail. - # ???: best here, or make fitting handle it? - mxval = np.max(p_img) - mnval = np.min(p_img) - if mxval == 0 or (1.0 - mnval / mxval) < 0.01: - continue - - # catch collapsing options - if collapse_tth: - patch_data[i_p, j_p] = np.average(p_img) - # ims_data.append(np.sum(p_img)) - else: - if collapse_eta: - lineout = np.average(p_img, axis=0) - ims_data.append(lineout) - if do_fitting: - if tth_distortion is not None: - # must correct tth0 - tmp = tth_distortion.apply( - panel.angles_to_cart( - np.vstack( - [ - np.radians(this_tth0), - np.tile( - ang_data[-1], len(this_tth0) - ), - ] - ).T - ), - return_nominal=True, - ) - pk_centers = np.degrees(tmp[:, 0]) - else: - pk_centers = this_tth0 - kwargs = { - 'tth_centers': np.degrees(tth_centers), - 'lineout': lineout, - 'tth_pred': pk_centers, - **fitting_kwargs, - } - result = fit_ring(**kwargs) - fit_data.append(result) - else: - ims_data.append(p_img) - if not collapse_tth: - output = [ang_data, ims_data] - if do_fitting: - output.append(fit_data) - patch_data.append(output) - - return patch_data - - -DETECTOR_TYPES = { - 'planar': PlanarDetector, - 'cylindrical': CylindricalDetector, -} - - -class BufferShapeMismatchError(RuntimeError): - # This is raised when the buffer shape does not match the detector shape - pass - - -@contextmanager -def switch_xray_source(instr: HEDMInstrument, xray_source: Optional[str]): - if xray_source is None: - # If the x-ray source is None, leave it as the current active one - yield - return - - prev_beam_name = instr.active_beam_name - instr.active_beam_name = xray_source - try: - yield - finally: - instr.active_beam_name = prev_beam_name diff --git a/hexrd/hedm/instrument/physics_package.py b/hexrd/hedm/instrument/physics_package.py deleted file mode 100644 index e0af72b8f..000000000 --- a/hexrd/hedm/instrument/physics_package.py +++ /dev/null @@ -1,302 +0,0 @@ -from abc import abstractmethod -import numpy as np -from hexrd.core.material.utils import calculate_linear_absorption_length - - -class AbstractPhysicsPackage: - """abstract class for the physics package. - there will be two separate physics package class - types -- one for HED samples and the other for - HEDM samples. - - Parameters - ---------- - sample_material : str or hexrd.core.material.Material - either the formula or a hexrd material instance - sample_density : float - density of sample material in g/cc - sample_thickness : float - sample thickness in microns - sample_geometry : FIXME - FIXME - pinhole_material : str or hexrd.core.material.Material, optional - either the formula or a hexrd material instance - pinhole_density : float - density of pinhole material in g/cc - pinhole_thickness : float - pinhole thickness in microns - pinhole_diameter : float - pinhole diameter in microns - window_material : str or hexrd.core.material.Material - either the formula or a hexrd material instance - window_density : float - density of window material in g/cc - window_thickness : float - window thickness in microns - - - Notes - ----- - [1] Rygg et al., X-ray diffraction at the National - Ignition Facility, Rev. Sci. Instrum. 91, 043902 (2020) - [2] M. Stoeckl, A. A. Solodov - Readout models for BaFBr0.85I0.15:Eu image plates - Rev. Sci. Instrum. 89, 063101 (2018 - """ - - # Abstract methods that must be redefined in derived classes - @property - @abstractmethod - def type(self): - pass - - def __init__( - self, - sample_material=None, - sample_density=None, - sample_thickness=None, - pinhole_material=None, - pinhole_density=None, - pinhole_thickness=None, - pinhole_diameter=None, - **kwargs, - ): - self._sample_material = sample_material - self._sample_density = sample_density - self._sample_thickness = sample_thickness - self._pinhole_material = pinhole_material - self._pinhole_density = pinhole_density - self._pinhole_thickness = pinhole_thickness - self._pinhole_diameter = pinhole_diameter - - @property - def attributes_to_serialize(self): - return [ - 'sample_material', - 'sample_density', - 'sample_thickness', - 'pinhole_material', - 'pinhole_density', - 'pinhole_thickness', - 'pinhole_diameter', - ] - - @property - def sample_material(self): - return self._sample_material - - @sample_material.setter - def sample_material(self, material): - self._sample_material = material - - @property - def sample_density(self): - if self._sample_density is None: - return 0.0 - return self._sample_density - - @sample_density.setter - def sample_density(self, density): - self._sample_density = density - - @property - def sample_thickness(self): - if self._sample_thickness is None: - return 0.0 - return self._sample_thickness - - @sample_thickness.setter - def sample_thickness(self, value): - self._sample_thickness = value - - @property - def pinhole_material(self): - return self._pinhole_material - - @pinhole_material.setter - def pinhole_material(self, material): - self._pinhole_material = material - - @property - def pinhole_density(self): - if self._pinhole_density is None: - return 0.0 - return self._pinhole_density - - @pinhole_density.setter - def pinhole_density(self, density): - self._pinhole_density = density - - @property - def pinhole_thickness(self): - if self._pinhole_thickness is None: - return 0.0 - return self._pinhole_thickness - - @pinhole_thickness.setter - def pinhole_thickness(self, value): - self._pinhole_thickness = value - - @property - def pinhole_radius(self): - if self.pinhole_diameter is None: - return 0.0 - return 0.5 * self.pinhole_diameter - - @pinhole_radius.setter - def pinhole_radius(self, value): - self._pinhole_diameter = 2.0 * value - - @property - def pinhole_diameter(self): - if self._pinhole_diameter is None: - return 0.0 - return self._pinhole_diameter - - @pinhole_diameter.setter - def pinhole_diameter(self, value): - self._pinhole_diameter = value - - def absorption_length(self, energy, flag): - if isinstance(energy, float): - energy_inp = np.array([energy]) - elif isinstance(energy, list): - energy_inp = np.array(energy) - elif isinstance(energy, np.ndarray): - energy_inp = energy - - if flag.lower() == 'sample': - args = ( - self.sample_density, - self.sample_material, - energy_inp, - ) - elif flag.lower() == 'window': - args = ( - self.window_density, - self.window_material, - energy_inp, - ) - elif flag.lower() == 'pinhole': - args = ( - self.pinhole_density, - self.pinhole_material, - energy_inp, - ) - abs_length = calculate_linear_absorption_length(*args) - if abs_length.shape[0] == 1: - return abs_length[0] - else: - return abs_length - - def sample_absorption_length(self, energy): - return self.absorption_length(energy, 'sample') - - def pinhole_absorption_length(self, energy): - return self.absorption_length(energy, 'pinhole') - - def serialize(self): - return {a: getattr(self, a) for a in self.attributes_to_serialize} - - def deserialize(self, **kwargs): - for key, value in kwargs.items(): - setattr(self, key, value) - - -class HEDPhysicsPackage(AbstractPhysicsPackage): - - def __init__(self, **pp_kwargs): - super().__init__(**pp_kwargs) - self._window_material = pp_kwargs.get('window_material', None) - self._window_density = pp_kwargs.get('window_density', None) - self._window_thickness = pp_kwargs.get('window_thickness', None) - - @property - def attributes_to_serialize(self): - return [ - 'sample_material', - 'sample_density', - 'sample_thickness', - 'pinhole_material', - 'pinhole_density', - 'pinhole_thickness', - 'pinhole_diameter', - 'window_material', - 'window_density', - 'window_thickness', - ] - - @property - def type(self): - return 'HED' - - @property - def window_material(self): - return self._window_material - - @window_material.setter - def window_material(self, material): - self._window_material = material - - @property - def window_density(self): - if self._window_density is None: - return 0.0 - return self._window_density - - @window_density.setter - def window_density(self, density): - self._window_density = density - - @property - def window_thickness(self): - if self._window_thickness is None: - return 0.0 - return self._window_thickness - - @window_thickness.setter - def window_thickness(self, thickness): - self._window_thickness = thickness - - def window_absorption_length(self, energy): - return self.absorption_length(energy, 'window') - - -class HEDMPhysicsPackage(AbstractPhysicsPackage): - - def __init__(self, **pp_kwargs): - super().__init__(**pp_kwargs) - self._sample_geometry = pp_kwargs.get('sample_geometry', None) - - @property - def attributes_to_serialize(self): - return [ - 'sample_material', - 'sample_density', - 'sample_thickness', - 'sample_geometry', - 'pinhole_material', - 'pinhole_density', - 'pinhole_thickness', - 'pinhole_diameter', - ] - - @property - def sample_geometry(self): - return self._sample_geometry - - @property - def sample_diameter(self): - if self.sample_geometry == 'cylinder': - return self._sample_thickness - else: - msg = ( - f'sample geometry does not have diameter ' - f'associated with it.' - ) - print(msg) - return - - @property - def type(self): - return 'HEDM' diff --git a/hexrd/hedm/material/crystallography.py b/hexrd/hedm/material/crystallography.py deleted file mode 100644 index 2d2dce52f..000000000 --- a/hexrd/hedm/material/crystallography.py +++ /dev/null @@ -1,2261 +0,0 @@ -# -*- coding: utf-8 -*- -# ============================================================================= -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. -# All rights reserved. -# -# This file is part of HEXRD. For details on dowloading the source, -# see the file COPYING. -# -# Please also see the file LICENSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License (as published by the Free -# Software Foundation) version 2.1 dated February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program (see file LICENSE); if not, write to -# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, -# Boston, MA 02111-1307 USA or visit . -# ============================================================================= -import re -import copy -import csv -import os -from math import pi -from typing import Optional, Union, Dict, List, Tuple - -import numpy as np - -from hexrd.hedm.material.unitcell import unitcell -from hexrd.core.material.unitcell import unitcell -from hexrd.core.deprecation import deprecated -from hexrd.core import constants -from hexrd.core.matrixutil import unitVector -from hexrd.core.rotations import ( - rotMatOfExpMap, - mapAngle, - applySym, - ltypeOfLaueGroup, - quatOfLaueGroup, -) -from hexrd.core.transforms import xfcapi -from hexrd.core import valunits -from hexrd.core.valunits import toFloat -from hexrd.core.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf - -"""module vars""" - -# units -dUnit = 'angstrom' -outputDegrees = False -outputDegrees_bak = outputDegrees - - -def hklToStr(hkl: np.ndarray) -> str: - """ - Converts hkl representation to a string. - - Parameters - ---------- - hkl : np.ndarray - 3 element list of h, k, and l values (Miller indices). - - Returns - ------- - str - Space-separated string representation of h, k, and l values. - - """ - return re.sub(r'[\[\]\(\)\{\},]', '', str(hkl)) - - -def tempSetOutputDegrees(val: bool) -> None: - """ - Set the global outputDegrees flag temporarily. Can be reverted with - revertOutputDegrees(). - - Parameters - ---------- - val : bool - True to output angles in degrees, False to output angles in radians. - - Returns - ------- - None - - """ - global outputDegrees, outputDegrees_bak - outputDegrees_bak = outputDegrees - outputDegrees = val - - -def revertOutputDegrees() -> None: - """ - Revert the effect of tempSetOutputDegrees(), resetting the outputDegrees - flag to its previous value (True to output in degrees, False for radians). - - Returns - ------- - None - """ - global outputDegrees, outputDegrees_bak - outputDegrees = outputDegrees_bak - - -def cosineXform( - a: np.ndarray, b: np.ndarray, c: np.ndarray -) -> tuple[np.ndarray, np.ndarray]: - """ - Spherical trig transform to take alpha, beta, gamma to expressions - for cos(alpha*). See ref below. - - [1] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector algebra and - the relations between direct and reciprocal lattice quantities''. Acta - Cryst. (1968), A24, 247--248 - - Parameters - ---------- - a : np.ndarray - List of alpha angle values (radians). - b : np.ndarray - List of beta angle values (radians). - c : np.ndarray - List of gamma angle values (radians). - - Returns - ------- - np.ndarray - List of cos(alpha*) values. - np.ndarray - List of sin(alpha*) values. - - """ - cosar = (np.cos(b) * np.cos(c) - np.cos(a)) / (np.sin(b) * np.sin(c)) - sinar = np.sqrt(1 - cosar**2) - return cosar, sinar - - -def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: - """ - Convert an energy value to a wavelength. If argument has units of length - or energy, will convert to globally specified unit type for wavelength - (dUnit). If argument is a scalar, assumed input units are keV. - """ - if isinstance(arg, valunits.valWUnit): - # arg is a valunits.valWUnit object - if arg.isLength(): - return arg.getVal(dUnit) - elif arg.isEnergy(): - e = arg.getVal('keV') - return valunits.valWUnit( - 'wavelength', 'length', constants.keVToAngstrom(e), 'angstrom' - ).getVal(dUnit) - else: - raise RuntimeError('do not know what to do with ' + str(arg)) - else: - # !!! assuming arg is in keV - return valunits.valWUnit( - 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' - ).getVal(dUnit) - - -def latticeParameters(lvec): - """ - Generates direct and reciprocal lattice vector components in a - crystal-relative RHON basis, X. The convention for fixing X to the - lattice is such that a || x1 and c* || x3, where a and c* are - direct and reciprocal lattice vectors, respectively. - """ - lnorm = np.sqrt(np.sum(lvec**2, 0)) - - a = lnorm[0] - b = lnorm[1] - c = lnorm[2] - - ahat = lvec[:, 0] / a - bhat = lvec[:, 1] / b - chat = lvec[:, 2] / c - - gama = np.arccos(np.dot(ahat, bhat)) - beta = np.arccos(np.dot(ahat, chat)) - alfa = np.arccos(np.dot(bhat, chat)) - if outputDegrees: - gama = r2d * gama - beta = r2d * beta - alfa = r2d * alfa - - return [a, b, c, alfa, beta, gama] - - -def latticePlanes( - hkls: np.ndarray, - lparms: np.ndarray, - ltype: Optional[str] = 'cubic', - wavelength: Optional[float] = 1.54059292, - strainMag: Optional[float] = None, -) -> Dict[str, np.ndarray]: - """ - Generates lattice plane data in the direct lattice for a given set - of Miller indices. Vector components are written in the - crystal-relative RHON basis, X. The convention for fixing X to the - lattice is such that a || x1 and c* || x3, where a and c* are - direct and reciprocal lattice vectors, respectively. - - USAGE: - - planeInfo = latticePlanes(hkls, lparms, **kwargs) - - INPUTS: - - 1) hkls (3 x n float ndarray) is the array of Miller indices for - the planes of interest. The vectors are assumed to be - concatenated along the 1-axis (horizontal). - - 2) lparms (1 x m float list) is the array of lattice parameters, - where m depends on the symmetry group (see below). - - The following optional arguments are recognized: - - 3) ltype=(string) is a string representing the symmetry type of - the implied Laue group. The 11 available choices are shown - below. The default value is 'cubic'. Note that each group - expects a lattice parameter array of the indicated length - and order. - - latticeType lparms - ----------- ------------ - 'cubic' a - 'hexagonal' a, c - 'trigonal' a, c - 'rhombohedral' a, alpha (in degrees) - 'tetragonal' a, c - 'orthorhombic' a, b, c - 'monoclinic' a, b, c, beta (in degrees) - 'triclinic' a, b, c, alpha, beta, gamma (in degrees) - - 4) wavelength= is a value represented the wavelength in - Angstroms to calculate bragg angles for. The default value - is for Cu K-alpha radiation (1.54059292 Angstrom) - - 5) strainMag=None - - OUTPUTS: - - 1) planeInfo is a dictionary containing the following keys/items: - - normals (3, n) double array array of the components to the - unit normals for each {hkl} in - X (horizontally concatenated) - - dspacings (n, ) double array array of the d-spacings for - each {hkl} - - tThetas (n, ) double array array of the Bragg angles for - each {hkl} relative to the - specified wavelength - - NOTES: - - *) This function is effectively a wrapper to 'latticeVectors'. - See 'help(latticeVectors)' for additional info. - - *) Lattice plane d-spacings are calculated from the reciprocal - lattice vectors specified by {hkl} as shown in Appendix 1 of - [1]. - - REFERENCES: - - [1] B. D. Cullity, ``Elements of X-Ray Diffraction, 2 - ed.''. Addison-Wesley Publishing Company, Inc., 1978. ISBN - 0-201-01174-3 - - """ - location = 'latticePlanes' - - assert ( - hkls.shape[0] == 3 - ), f"hkls aren't column vectors in call to '{location}'!" - - tag = ltype - wlen = wavelength - - # get B - L = latticeVectors(lparms, tag) - - # get G-vectors -- reciprocal vectors in crystal frame - G = np.dot(L['B'], hkls) - - # magnitudes - d = 1 / np.sqrt(np.sum(G**2, 0)) - - aconv = 1.0 - if outputDegrees: - aconv = r2d - - # two thetas - sth = wlen / 2.0 / d - mask = np.abs(sth) < 1.0 - tth = np.zeros(sth.shape) - - tth[~mask] = np.nan - tth[mask] = aconv * 2.0 * np.arcsin(sth[mask]) - - p = dict(normals=unitVector(G), dspacings=d, tThetas=tth) - - if strainMag is not None: - p['tThetasLo'] = np.zeros(sth.shape) - p['tThetasHi'] = np.zeros(sth.shape) - - mask = (np.abs(wlen / 2.0 / (d * (1.0 + strainMag))) < 1.0) & ( - np.abs(wlen / 2.0 / (d * (1.0 - strainMag))) < 1.0 - ) - - p['tThetasLo'][~mask] = np.nan - p['tThetasHi'][~mask] = np.nan - - p['tThetasLo'][mask] = ( - aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 + strainMag))) - ) - p['tThetasHi'][mask] = ( - aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 - strainMag))) - ) - - return p - - -def latticeVectors( - lparms: np.ndarray, - tag: Optional[str] = 'cubic', - radians: Optional[bool] = False, -) -> Dict[str, Union[np.ndarray, float]]: - """ - Generates direct and reciprocal lattice vector components in a - crystal-relative RHON basis, X. The convention for fixing X to the - lattice is such that a || x1 and c* || x3, where a and c* are - direct and reciprocal lattice vectors, respectively. - - USAGE: - - lattice = LatticeVectors(lparms, ) - - INPUTS: - - 1) lparms (1 x n float list) is the array of lattice parameters, - where n depends on the symmetry group (see below). - - 2) tag (string) is a case-insensitive string representing the - symmetry type of the implied Laue group. The 11 available choices - are shown below. The default value is 'cubic'. Note that each - group expects a lattice parameter array of the indicated length - and order. - - latticeType lparms - ----------- ------------ - 'cubic' a - 'hexagonal' a, c - 'trigonal' a, c - 'rhombohedral' a, alpha (in degrees) - 'tetragonal' a, c - 'orthorhombic' a, b, c - 'monoclinic' a, b, c, beta (in degrees) - 'triclinic' a, b, c, alpha, beta, gamma (in degrees) - - The following optional arguments are recognized: - - 3) radians= is a boolean flag indicating usage of radians rather - than degrees, defaults to false. - - OUTPUTS: - - 1) lattice is a dictionary containing the following keys/items: - - F (3, 3) double array transformation matrix taking - componenents in the direct - lattice (i.e. {uvw}) to the - reference, X - - B (3, 3) double array transformation matrix taking - componenents in the reciprocal - lattice (i.e. {hkl}) to X - - BR (3, 3) double array transformation matrix taking - componenents in the reciprocal - lattice to the Fable reference - frame (see notes) - - U0 (3, 3) double array transformation matrix - (orthogonal) taking - componenents in the - Fable reference frame to X - - vol double the unit cell volume - - - dparms (6, ) double list the direct lattice parameters: - [a b c alpha beta gamma] - - rparms (6, ) double list the reciprocal lattice - parameters: - [a* b* c* alpha* beta* gamma*] - - NOTES: - - *) The conventions used for assigning a RHON basis, - X -> {x1, x2, x3}, to each point group are consistent with - those published in Appendix B of [1]. Namely: a || x1 and - c* || x3. This differs from the convention chosen by the Fable - group, where a* || x1 and c || x3 [2]. - - *) The unit cell angles are defined as follows: - alpha=acos(b'*c/|b||c|), beta=acos(c'*a/|c||a|), and - gamma=acos(a'*b/|a||b|). - - *) The reciprocal lattice vectors are calculated using the - crystallographic convention, where the prefactor of 2*pi is - omitted. In this convention, the reciprocal lattice volume is - 1/V. - - *) Several relations from [3] were employed in the component - calculations. - - REFERENCES: - - [1] J. F. Nye, ``Physical Properties of Crystals: Their - Representation by Tensors and Matrices''. Oxford University - Press, 1985. ISBN 0198511655 - - [2] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, - ``Tracking: a method for structural characterization of grains - in powders or polycrystals''. J. Appl. Cryst. (2001). 34, - 744--750 - - [3] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector - algebra and the relations between direct and reciprocal - lattice quantities''. Acta Cryst. (1968), A24, 247--248 - - - """ - - # build index for sorting out lattice parameters - lattStrings = [ - 'cubic', - 'hexagonal', - 'trigonal', - 'rhombohedral', - 'tetragonal', - 'orthorhombic', - 'monoclinic', - 'triclinic', - ] - - if radians: - aconv = 1.0 - else: - aconv = pi / 180.0 # degToRad - deg90 = pi / 2.0 - deg120 = 2.0 * pi / 3.0 - # - if tag == lattStrings[0]: - # cubic - cellparms = np.r_[np.tile(lparms[0], (3,)), deg90 * np.ones((3,))] - elif tag == lattStrings[1] or tag == lattStrings[2]: - # hexagonal | trigonal (hex indices) - cellparms = np.r_[ - lparms[0], lparms[0], lparms[1], deg90, deg90, deg120 - ] - elif tag == lattStrings[3]: - # rhombohedral - cellparms = np.r_[ - np.tile(lparms[0], (3,)), np.tile(aconv * lparms[1], (3,)) - ] - elif tag == lattStrings[4]: - # tetragonal - cellparms = np.r_[lparms[0], lparms[0], lparms[1], deg90, deg90, deg90] - elif tag == lattStrings[5]: - # orthorhombic - cellparms = np.r_[lparms[0], lparms[1], lparms[2], deg90, deg90, deg90] - elif tag == lattStrings[6]: - # monoclinic - cellparms = np.r_[ - lparms[0], lparms[1], lparms[2], deg90, aconv * lparms[3], deg90 - ] - elif tag == lattStrings[7]: - # triclinic - cellparms = np.r_[ - lparms[0], - lparms[1], - lparms[2], - aconv * lparms[3], - aconv * lparms[4], - aconv * lparms[5], - ] - else: - raise RuntimeError(f'lattice tag "{tag}" is not recognized') - - alpha, beta, gamma = cellparms[3:6] - cosalfar, sinalfar = cosineXform(alpha, beta, gamma) - - a = cellparms[0] * np.r_[1, 0, 0] - b = cellparms[1] * np.r_[np.cos(gamma), np.sin(gamma), 0] - c = ( - cellparms[2] - * np.r_[ - np.cos(beta), -cosalfar * np.sin(beta), sinalfar * np.sin(beta) - ] - ) - - ad = np.sqrt(np.sum(a**2)) - bd = np.sqrt(np.sum(b**2)) - cd = np.sqrt(np.sum(c**2)) - - # Cell volume - V = np.dot(a, np.cross(b, c)) - - # F takes components in the direct lattice to X - F = np.c_[a, b, c] - - # Reciprocal lattice vectors - astar = np.cross(b, c) / V - bstar = np.cross(c, a) / V - cstar = np.cross(a, b) / V - - # and parameters - ar = np.sqrt(np.sum(astar**2)) - br = np.sqrt(np.sum(bstar**2)) - cr = np.sqrt(np.sum(cstar**2)) - - alfar = np.arccos(np.dot(bstar, cstar) / br / cr) - betar = np.arccos(np.dot(cstar, astar) / cr / ar) - gamar = np.arccos(np.dot(astar, bstar) / ar / br) - - # B takes components in the reciprocal lattice to X - B = np.c_[astar, bstar, cstar] - - cosalfar2, sinalfar2 = cosineXform(alfar, betar, gamar) - - afable = ar * np.r_[1, 0, 0] - bfable = br * np.r_[np.cos(gamar), np.sin(gamar), 0] - cfable = ( - cr - * np.r_[ - np.cos(betar), - -cosalfar2 * np.sin(betar), - sinalfar2 * np.sin(betar), - ] - ) - - BR = np.c_[afable, bfable, cfable] - U0 = np.dot(B, np.linalg.inv(BR)) - if outputDegrees: - dparms = np.r_[ad, bd, cd, r2d * np.r_[alpha, beta, gamma]] - rparms = np.r_[ar, br, cr, r2d * np.r_[alfar, betar, gamar]] - else: - dparms = np.r_[ad, bd, cd, np.r_[alpha, beta, gamma]] - rparms = np.r_[ar, br, cr, np.r_[alfar, betar, gamar]] - - return { - 'F': F, - 'B': B, - 'BR': BR, - 'U0': U0, - 'vol': V, - 'dparms': dparms, - 'rparms': rparms, - } - - -def hexagonalIndicesFromRhombohedral(hkl): - """ - converts rhombohedral hkl to hexagonal indices - """ - HKL = np.zeros((3, hkl.shape[1]), dtype='int') - - HKL[0, :] = hkl[0, :] - hkl[1, :] - HKL[1, :] = hkl[1, :] - hkl[2, :] - HKL[2, :] = hkl[0, :] + hkl[1, :] + hkl[2, :] - - return HKL - - -def rhombohedralIndicesFromHexagonal(HKL): - """ - converts hexagonal hkl to rhombohedral indices - """ - hkl = np.zeros((3, HKL.shape[1]), dtype='int') - - hkl[0, :] = 2 * HKL[0, :] + HKL[1, :] + HKL[2, :] - hkl[1, :] = -HKL[0, :] + HKL[1, :] + HKL[2, :] - hkl[2, :] = -HKL[0, :] - 2 * HKL[1, :] + HKL[2, :] - - hkl = hkl / 3.0 - return hkl - - -def rhombohedralParametersFromHexagonal(a_h, c_h): - """ - converts hexagonal lattice parameters (a, c) to rhombohedral - lattice parameters (a, alpha) - """ - a_r = np.sqrt(3 * a_h**2 + c_h**2) / 3.0 - alfa_r = 2 * np.arcsin(3.0 / (2 * np.sqrt(3 + (c_h / a_h) ** 2))) - if outputDegrees: - alfa_r = r2d * alfa_r - return a_r, alfa_r - - -def convert_Miller_direction_to_cartesian(uvw, a=1.0, c=1.0, normalize=False): - """ - Converts 3-index hexagonal Miller direction indices to components in the - crystal reference frame. - Parameters - ---------- - uvw : array_like - The (n, 3) array of 3-index hexagonal indices to convert. - a : scalar, optional - The `a` lattice parameter. The default value is 1. - c : scalar, optional - The `c` lattice parameter. The default value is 1. - normalize : bool, optional - Flag for whether or not to normalize output vectors - Returns - ------- - numpy.ndarray - The (n, 3) array of cartesian components associated with the input - direction indices. - Notes - ----- - 1) The [uv.w] the Miller-Bravais convention is in the hexagonal basis - {a1, a2, a3, c}. The basis for the output, {o1, o2, o3}, is - chosen such that - o1 || a1 - o3 || c - o2 = o3 ^ o1 - """ - u, v, w = np.atleast_2d(uvw).T - retval = np.vstack([1.5 * u * a, sqrt3by2 * a * (2 * v + u), w * c]) - if normalize: - return unitVector(retval).T - else: - return retval.T - - -def convert_Miller_direction_to_MillerBravias(uvw, suppress_redundant=True): - """ - Converts 3-index hexagonal Miller direction indices to 4-index - Miller-Bravais direction indices. - Parameters - ---------- - uvw : array_like - The (n, 3) array of 3-index hexagonal Miller indices to convert. - suppress_redundant : bool, optional - Flag to suppress the redundant 3rd index. The default is True. - Returns - ------- - numpy.ndarray - The (n, 3) or (n, 4) array -- depending on kwarg -- of Miller-Bravis - components associated with the input Miller direction indices. - Notes - ----- - * NOT for plane normals!!! - """ - u, v, w = np.atleast_2d(uvw).T - retval = np.vstack([(2 * u - v) / 3, (2 * v - u) / 3, w]).T - rem = np.vstack([np.mod(np.tile(i[0], 2), i[1:]) for i in retval]) - rem[abs(rem) < epsf] = np.nan - lcm = np.nanmin(rem, axis=1) - lcm[np.isnan(lcm)] = 1 - retval = retval / np.tile(lcm, (3, 1)).T - if suppress_redundant: - return retval - else: - t = np.atleast_2d(1 - np.sum(retval[:2], axis=1)).T - return np.hstack([retval[:, :2], t, np.atleast_2d(retval[:, 2]).T]) - - -def convert_MillerBravias_direction_to_Miller(UVW): - """ - Converts 4-index hexagonal Miller-Bravais direction indices to - 3-index Miller direction indices. - Parameters - ---------- - UVW : array_like - The (n, 3) array of **non-redundant** Miller-Bravais direction indices - to convert. - Returns - ------- - numpy.ndarray - The (n, 3) array of Miller direction indices associated with the - input Miller-Bravais indices. - Notes - ----- - * NOT for plane normals!!! - """ - U, V, W = np.atleast_2d(UVW).T - return np.vstack([2 * U + V, 2 * V + U, W]) - - -class PlaneData(object): - """ - Careful with ordering: Outputs are ordered by the 2-theta for the - hkl unless you get self._hkls directly, and this order can change - with changes in lattice parameters (lparms); setting and getting - exclusions works on the current hkl ordering, not the original - ordering (in self._hkls), but exclusions are stored in the - original ordering in case the hkl ordering does change with - lattice parameters - - if not None, tThWidth takes priority over strainMag in setting - two-theta ranges; changing strainMag automatically turns off - tThWidth - """ - - def __init__(self, hkls: Optional[np.ndarray], *args, **kwargs) -> None: - """ - Constructor for PlaneData - - Parameters - ---------- - hkls : np.ndarray - Miller indices to be used in the plane data. Can be None if - args is another PlaneData object - - *args - Unnamed arguments. Could be in the format of `lparms, laueGroup, - wavelength, strainMag`, or just a `PlaneData` object. - - **kwargs - Valid keyword arguments include: - - doTThSort - - exclusions - - tThMax - - tThWidth - """ - self._doTThSort = True - self._exclusions = None - self._tThMax = None - - if len(args) == 4: - lparms, laueGroup, wavelength, strainMag = args - tThWidth = None - self._wavelength = processWavelength(wavelength) - self._lparms = self._parseLParms(lparms) - elif len(args) == 1 and isinstance(args[0], PlaneData): - other = args[0] - lparms, laueGroup, wavelength, strainMag, tThWidth = ( - other.getParams() - ) - self._wavelength = wavelength - self._lparms = lparms - self._doTThSort = other._doTThSort - self._exclusions = other._exclusions - self._tThMax = other._tThMax - if hkls is None: - hkls = other._hkls - else: - raise NotImplementedError(f'args : {args}') - - self._laueGroup = laueGroup - self._hkls = copy.deepcopy(hkls) - self._strainMag = strainMag - self._structFact = np.ones(self._hkls.shape[1]) - self.tThWidth = tThWidth - - # ... need to implement tThMin too - if 'doTThSort' in kwargs: - self._doTThSort = kwargs.pop('doTThSort') - if 'exclusions' in kwargs: - self._exclusions = kwargs.pop('exclusions') - if 'tThMax' in kwargs: - self._tThMax = toFloat(kwargs.pop('tThMax'), 'radians') - if 'tThWidth' in kwargs: - self.tThWidth = kwargs.pop('tThWidth') - if len(kwargs) > 0: - raise RuntimeError( - f'have unparsed keyword arguments with keys: {kwargs.keys()}' - ) - - # This is only used to calculate the structure factor if invalidated - self._unitcell: unitcell = None - - self._calc() - - def _calc(self): - symmGroup = ltypeOfLaueGroup(self._laueGroup) - self._q_sym = quatOfLaueGroup(self._laueGroup) - _, latVecOps, hklDataList = PlaneData.makePlaneData( - self._hkls, - self._lparms, - self._q_sym, - symmGroup, - self._strainMag, - self.wavelength, - ) - 'sort by tTheta' - tThs = np.array( - [hklDataList[iHKL]['tTheta'] for iHKL in range(len(hklDataList))] - ) - if self._doTThSort: - # sorted hkl -> _hkl - # _hkl -> sorted hkl - self.tThSort = np.argsort(tThs) - self.tThSortInv = np.empty(len(hklDataList), dtype=int) - self.tThSortInv[self.tThSort] = np.arange(len(hklDataList)) - self.hklDataList = [hklDataList[iHKL] for iHKL in self.tThSort] - else: - self.tThSort = np.arange(len(hklDataList)) - self.tThSortInv = np.arange(len(hklDataList)) - self.hklDataList = hklDataList - self._latVecOps = latVecOps - self.nHKLs = len(self.getHKLs()) - - def __str__(self): - s = '========== plane data ==========\n' - s += 'lattice parameters:\n ' + str(self.lparms) + '\n' - s += f'two theta width: ({str(self.tThWidth)})\n' - s += f'strain magnitude: ({str(self.strainMag)})\n' - s += f'beam energy ({str(self.wavelength)})\n' - s += 'hkls: (%d)\n' % self.nHKLs - s += str(self.getHKLs()) - return s - - def getParams(self): - """ - Getter for the parameters of the plane data. - - Returns - ------- - tuple - The parameters of the plane data. In the order of - _lparams, _laueGroup, _wavelength, _strainMag, tThWidth - - """ - return ( - self._lparms, - self._laueGroup, - self._wavelength, - self._strainMag, - self.tThWidth, - ) - - def getNhklRef(self) -> int: - """ - Get the total number of hkl's in the plane data, not ignoring - ones that are excluded in exclusions. - - Returns - ------- - int - The total number of hkl's in the plane data. - """ - return len(self.hklDataList) - - @property - def hkls(self) -> np.ndarray: - """ - hStacked Hkls of the plane data (Miller indices). - """ - return self.getHKLs().T - - @hkls.setter - def hkls(self, hkls): - raise NotImplementedError('for now, not allowing hkls to be reset') - - @property - def tThMax(self) -> Optional[float]: - """ - Maximum 2-theta value of the plane data. - - float or None - """ - return self._tThMax - - @tThMax.setter - def tThMax(self, t_th_max: Union[float, valunits.valWUnit]) -> None: - self._tThMax = toFloat(t_th_max, 'radians') - - @property - def exclusions(self) -> np.ndarray: - """ - Excluded HKL's the plane data. - - Set as type np.ndarray, as a mask of length getNhklRef(), a list of - indices to be excluded, or a list of ranges of indices. - - Read as a mask of length getNhklRef(). - """ - retval = np.zeros(self.getNhklRef(), dtype=bool) - if self._exclusions is not None: - # report in current hkl ordering - retval[:] = self._exclusions[self.tThSortInv] - if self._tThMax is not None: - for iHKLr, hklData in enumerate(self.hklDataList): - if hklData['tTheta'] > self._tThMax: - retval[iHKLr] = True - return retval - - @exclusions.setter - def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: - excl = np.zeros(len(self.hklDataList), dtype=bool) - if new_exclusions is not None: - exclusions = np.atleast_1d(new_exclusions) - if len(exclusions) == len(self.hklDataList): - assert ( - exclusions.dtype == 'bool' - ), 'Exclusions should be bool if full length' - # convert from current hkl ordering to _hkl ordering - excl[:] = exclusions[self.tThSort] - else: - if len(exclusions.shape) == 1: - # treat exclusions as indices - excl[self.tThSort[exclusions]] = True - elif len(exclusions.shape) == 2: - # treat exclusions as ranges of indices - for r in exclusions: - excl[self.tThSort[r[0] : r[1]]] = True - else: - raise RuntimeError( - f'Unclear behavior for shape {exclusions.shape}' - ) - self._exclusions = excl - self.nHKLs = np.sum(np.logical_not(self._exclusions)) - - def exclude( - self, - dmin: Optional[float] = None, - dmax: Optional[float] = None, - tthmin: Optional[float] = None, - tthmax: Optional[float] = None, - sfacmin: Optional[float] = None, - sfacmax: Optional[float] = None, - pintmin: Optional[float] = None, - pintmax: Optional[float] = None, - ) -> None: - """ - Set exclusions according to various parameters - - Any hkl with a value below any min or above any max will be excluded. So - to be included, an hkl needs to have values between the min and max - for all of the conditions given. - - Note that method resets the tThMax attribute to None. - - PARAMETERS - ---------- - dmin: float > 0 - minimum lattice spacing (angstroms) - dmax: float > 0 - maximum lattice spacing (angstroms) - tthmin: float > 0 - minimum two theta (radians) - tthmax: float > 0 - maximum two theta (radians) - sfacmin: float > 0 - minimum structure factor as a proportion of maximum - sfacmax: float > 0 - maximum structure factor as a proportion of maximum - pintmin: float > 0 - minimum powder intensity as a proportion of maximum - pintmax: float > 0 - maximum powder intensity as a proportion of maximum - """ - excl = np.zeros(self.getNhklRef(), dtype=bool) - self.exclusions = None - self.tThMax = None - - if (dmin is not None) or (dmax is not None): - d = np.array(self.getPlaneSpacings()) - if dmin is not None: - excl[d < dmin] = True - if dmax is not None: - excl[d > dmax] = True - - if (tthmin is not None) or (tthmax is not None): - tth = self.getTTh() - if tthmin is not None: - excl[tth < tthmin] = True - if tthmax is not None: - excl[tth > tthmax] = True - - if (sfacmin is not None) or (sfacmax is not None): - sfac = self.structFact - sfac = sfac / sfac.max() - if sfacmin is not None: - excl[sfac < sfacmin] = True - if sfacmax is not None: - excl[sfac > sfacmax] = True - - if (pintmin is not None) or (pintmax is not None): - pint = self.powder_intensity - pint = pint / pint.max() - if pintmin is not None: - excl[pint < pintmin] = True - if pintmax is not None: - excl[pint > pintmax] = True - - self.exclusions = excl - - def _parseLParms( - self, lparms: List[Union[valunits.valWUnit, float]] - ) -> List[float]: - lparmsDUnit = [] - for lparmThis in lparms: - if isinstance(lparmThis, valunits.valWUnit): - if lparmThis.isLength(): - lparmsDUnit.append(lparmThis.getVal(dUnit)) - elif lparmThis.isAngle(): - # plumbing set up to default to degrees - # for lattice parameters - lparmsDUnit.append(lparmThis.getVal('degrees')) - else: - raise RuntimeError( - f'Do not know what to do with {lparmThis}' - ) - else: - lparmsDUnit.append(lparmThis) - return lparmsDUnit - - @property - def lparms(self) -> List[float]: - """ - Lattice parameters of the plane data. - - Can be set as a List[float | valWUnit], but will be converted to - List[float]. - """ - return self._lparms - - @lparms.setter - def lparms(self, lparms: List[Union[valunits.valWUnit, float]]) -> None: - self._lparms = self._parseLParms(lparms) - self._calc() - - @property - def strainMag(self) -> Optional[float]: - """ - Strain magnitude of the plane data. - - float or None - """ - return self._strainMag - - @strainMag.setter - def strainMag(self, strain_mag: float) -> None: - self._strainMag = strain_mag - self.tThWidth = None - self._calc() - - @property - def wavelength(self) -> float: - """ - Wavelength of the plane data. - - Set as float or valWUnit. - - Read as float - """ - return self._wavelength - - @wavelength.setter - def wavelength(self, wavelength: Union[float, valunits.valWUnit]) -> None: - wavelength = processWavelength(wavelength) - # Do not re-compute if it is almost the same - if np.isclose(self._wavelength, wavelength): - return - - self._wavelength = wavelength - self._calc() - - def invalidate_structure_factor(self, ucell: unitcell) -> None: - """ - It can be expensive to compute the structure factor - This method just invalidates it, providing a unit cell, - so that it can be lazily computed from the unit cell. - - Parameters: - ----------- - unitcell : unitcell - The unit cell to be used to compute the structure factor - """ - self._structFact = None - self._hedm_intensity = None - self._powder_intensity = None - self._unitcell = ucell - - def _compute_sf_if_needed(self): - any_invalid = ( - self._structFact is None - or self._hedm_intensity is None - or self._powder_intensity is None - ) - if any_invalid and self._unitcell is not None: - # Compute the structure factor first. - # This can be expensive to do, so we lazily compute it when needed. - hkls = self.getHKLs(allHKLs=True) - self.structFact = self._unitcell.CalcXRSF(hkls) - - @property - def structFact(self) -> np.ndarray: - """ - Structure factors for each hkl. - - np.ndarray - """ - self._compute_sf_if_needed() - return self._structFact[~self.exclusions] - - @structFact.setter - def structFact(self, structFact: np.ndarray) -> None: - self._structFact = structFact - multiplicity = self.getMultiplicity(allHKLs=True) - tth = self.getTTh(allHKLs=True) - - hedm_intensity = ( - structFact * lorentz_factor(tth) * polarization_factor(tth) - ) - - powderI = hedm_intensity * multiplicity - - # Now scale them - hedm_intensity = 100.0 * hedm_intensity / np.nanmax(hedm_intensity) - powderI = 100.0 * powderI / np.nanmax(powderI) - - self._hedm_intensity = hedm_intensity - self._powder_intensity = powderI - - @property - def powder_intensity(self) -> np.ndarray: - """ - Powder intensity for each hkl. - """ - self._compute_sf_if_needed() - return self._powder_intensity[~self.exclusions] - - @property - def hedm_intensity(self) -> np.ndarray: - """ - HEDM (high energy x-ray diffraction microscopy) intensity for each hkl. - """ - self._compute_sf_if_needed() - return self._hedm_intensity[~self.exclusions] - - @staticmethod - def makePlaneData( - hkls: np.ndarray, - lparms: np.ndarray, - qsym: np.ndarray, - symmGroup, - strainMag, - wavelength, - ) -> Tuple[ - Dict[str, np.ndarray], Dict[str, Union[np.ndarray, float]], List[Dict] - ]: - """ - Generate lattice plane data from inputs. - - Parameters: - ----------- - hkls: np.ndarray - Miller indices, as in crystallography.latticePlanes - lparms: np.ndarray - Lattice parameters, as in crystallography.latticePlanes - qsym: np.ndarray - (4, n) containing quaternions of symmetry - symmGroup: str - Tag for the symmetry (Laue) group of the lattice. Can generate from - ltypeOfLaueGroup - strainMag: float - Swag of strain magnitudes - wavelength: float - Wavelength - - Returns: - ------- - dict: - Dictionary containing lattice plane data - dict: - Dictionary containing lattice vector operators - list: - List of dictionaries, each containing the data for one hkl - """ - - tempSetOutputDegrees(False) - latPlaneData = latticePlanes( - hkls, - lparms, - ltype=symmGroup, - strainMag=strainMag, - wavelength=wavelength, - ) - - latVecOps = latticeVectors(lparms, symmGroup) - - hklDataList = [] - for iHKL in range(len(hkls.T)): - # need transpose because of convention for hkls ordering - - """ - latVec = latPlaneData['normals'][:,iHKL] - # ... if not spots, may be able to work with a subset of these - latPlnNrmlList = applySym( - np.c_[latVec], qsym, csFlag=True, cullPM=False - ) - """ - # returns UN-NORMALIZED lattice plane normals - latPlnNrmls = applySym( - np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), - qsym, - csFlag=True, - cullPM=False, - ) - - # check for +/- in symmetry group - latPlnNrmlsM = applySym( - np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), - qsym, - csFlag=False, - cullPM=False, - ) - - csRefl = latPlnNrmls.shape[1] == latPlnNrmlsM.shape[1] - - # added this so that I retain the actual symmetric - # integer hkls as well - symHKLs = np.array( - np.round(np.dot(latVecOps['F'].T, latPlnNrmls)), dtype='int' - ) - - hklDataList.append( - dict( - hklID=iHKL, - hkl=hkls[:, iHKL], - tTheta=latPlaneData['tThetas'][iHKL], - dSpacings=latPlaneData['dspacings'][iHKL], - tThetaLo=latPlaneData['tThetasLo'][iHKL], - tThetaHi=latPlaneData['tThetasHi'][iHKL], - latPlnNrmls=unitVector(latPlnNrmls), - symHKLs=symHKLs, - centrosym=csRefl, - ) - ) - - revertOutputDegrees() - return latPlaneData, latVecOps, hklDataList - - @property - def laueGroup(self) -> str: - """ - This is the Schoenflies tag, describing symmetry group of the lattice. - Note that setting this with incompatible lattice parameters will - cause an error. If changing both, use set_laue_and_lparms. - - str - """ - return self._laueGroup - - @laueGroup.setter - def laueGroup(self, laueGroup: str) -> None: - self._laueGroup = laueGroup - self._calc() - - def set_laue_and_lparms( - self, laueGroup: str, lparms: List[Union[valunits.valWUnit, float]] - ) -> None: - """ - Set the Laue group and lattice parameters simultaneously - - When the Laue group changes, the lattice parameters may be - incompatible, and cause an error in self._calc(). This function - allows us to update both the Laue group and lattice parameters - simultaneously to avoid this issue. - - Parameters: - ----------- - laueGroup : str - The symmetry (Laue) group to be set - lparms : List[valunits.valWUnit | float] - Lattice parameters to be set - """ - self._laueGroup = laueGroup - self._lparms = self._parseLParms(lparms) - self._calc() - - @property - def q_sym(self) -> np.ndarray: - """ - Quaternions of symmetry for each hkl, generated from the Laue group - - np.ndarray((4, n)) - """ - return self._q_sym # rotations.quatOfLaueGroup(self._laueGroup) - - def getPlaneSpacings(self) -> List[float]: - """ - Plane spacings for each hkl. - - Returns: - ------- - List[float] - List of plane spacings for each hkl - """ - dspacings = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - dspacings.append(hklData['dSpacings']) - return dspacings - - @property - def latVecOps(self) -> Dict[str, Union[np.ndarray, float]]: - """ - gets lattice vector operators as a new (deepcopy) - - Returns: - ------- - Dict[str, np.ndarray | float] - Dictionary containing lattice vector operators - """ - return copy.deepcopy(self._latVecOps) - - def _thisHKL(self, iHKLr: int) -> bool: - hklData = self.hklDataList[iHKLr] - if self._exclusions is not None: - if self._exclusions[self.tThSortInv[iHKLr]]: - return False - if self._tThMax is not None: - if hklData['tTheta'] > self._tThMax or np.isnan(hklData['tTheta']): - return False - return True - - def _getTThRange(self, iHKLr: int) -> Tuple[float, float]: - hklData = self.hklDataList[iHKLr] - if self.tThWidth is not None: # tThHi-tThLo < self.tThWidth - tTh = hklData['tTheta'] - tThHi = tTh + self.tThWidth * 0.5 - tThLo = tTh - self.tThWidth * 0.5 - else: - tThHi = hklData['tThetaHi'] - tThLo = hklData['tThetaLo'] - return (tThLo, tThHi) - - def getTThRanges(self, strainMag: Optional[float] = None) -> np.ndarray: - """ - Get the 2-theta ranges for included hkls - - Parameters: - ----------- - strainMag : Optional[float] - Optional swag of strain magnitude - - Returns: - ------- - np.ndarray: - hstacked array of hstacked tThLo and tThHi for each hkl (n x 2) - """ - tThRanges = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - if strainMag is None: - tThRanges.append(self._getTThRange(iHKLr)) - else: - hklData = self.hklDataList[iHKLr] - d = hklData['dSpacings'] - tThLo = 2.0 * np.arcsin( - self._wavelength / 2.0 / (d * (1.0 + strainMag)) - ) - tThHi = 2.0 * np.arcsin( - self._wavelength / 2.0 / (d * (1.0 - strainMag)) - ) - tThRanges.append((tThLo, tThHi)) - return np.array(tThRanges) - - def getMergedRanges( - self, cullDupl: Optional[bool] = False - ) -> Tuple[List[List[int]], List[List[float]]]: - """ - Return indices and ranges for specified planeData, merging where - there is overlap based on the tThWidth and line positions - - Parameters: - ----------- - cullDupl : (optional) bool - If True, cull duplicate 2-theta values (within sqrt_epsf). Defaults - to False. - - Returns: - -------- - List[List[int]] - List of indices for each merged range - - List[List[float]] - List of merged ranges, (n x 2) - """ - tThs = self.getTTh() - tThRanges = self.getTThRanges() - - # if you end exlcusions in a doublet (or multiple close rings) - # then this will 'fail'. May need to revisit... - nonoverlapNexts = np.hstack( - (tThRanges[:-1, 1] < tThRanges[1:, 0], True) - ) - iHKLLists = [] - mergedRanges = [] - hklsCur = [] - tThLoIdx = 0 - tThHiCur = 0.0 - for iHKL, nonoverlapNext in enumerate(nonoverlapNexts): - tThHi = tThRanges[iHKL, -1] - if not nonoverlapNext: - if cullDupl and abs(tThs[iHKL] - tThs[iHKL + 1]) < sqrt_epsf: - continue - else: - hklsCur.append(iHKL) - tThHiCur = tThHi - else: - hklsCur.append(iHKL) - tThHiCur = tThHi - iHKLLists.append(hklsCur) - mergedRanges.append([tThRanges[tThLoIdx, 0], tThHiCur]) - tThLoIdx = iHKL + 1 - hklsCur = [] - return iHKLLists, mergedRanges - - def getTTh(self, allHKLs: Optional[bool] = False) -> np.ndarray: - """ - Get the 2-theta values for each hkl. - - Parameters: - ----------- - allHKLs : (optional) bool - If True, return all 2-theta values, even if they are excluded in - the current planeData. Default is False. - - Returns: - ------- - np.ndarray - Array of 2-theta values for each hkl - """ - tTh = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not allHKLs and not self._thisHKL(iHKLr): - continue - tTh.append(hklData['tTheta']) - return np.array(tTh) - - def getMultiplicity(self, allHKLs: Optional[bool] = False) -> np.ndarray: - """ - Get the multiplicity for each hkl (number of symHKLs). - - Paramters: - ---------- - allHKLs : (optional) bool - If True, return all multiplicities, even if they are excluded in - the current planeData. Defaults to false. - - Returns - ------- - np.ndarray - Array of multiplicities for each hkl - """ - # ... JVB: is this incorrect? - multip = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if allHKLs or self._thisHKL(iHKLr): - multip.append(hklData['symHKLs'].shape[1]) - return np.array(multip) - - def getHKLID( - self, - hkl: Union[int, Tuple[int, int, int], np.ndarray], - master: Optional[bool] = False, - ) -> Union[List[int], int]: - """ - Return the unique ID of a list of hkls. - - Parameters - ---------- - hkl : int | tuple | list | numpy.ndarray - The input hkl. If an int, or a list of ints, it just passes - through (FIXME). - If a tuple, treated as a single (h, k, l). - If a list of lists/tuples, each is treated as an (h, k, l). - If an numpy.ndarray, it is assumed to have shape (3, N) with the - N (h, k, l) vectors stacked column-wise - - master : bool, optional - If True, return the master hklID, else return the index from the - external (sorted and reduced) list. - - Returns - ------- - hkl_ids : list - The list of requested hklID values associate with the input. - - Notes - ----- - TODO: revisit this weird API??? - - Changes: - ------- - 2020-05-21 (JVB) -- modified to handle all symmetric equavlent reprs. - """ - if hasattr(hkl, '__setitem__'): # tuple does not have __setitem__ - if isinstance(hkl, np.ndarray): - # if is ndarray, assume is 3xN - return [self._getHKLID(x, master=master) for x in hkl.T] - else: - return [self._getHKLID(x, master=master) for x in hkl] - else: - return self._getHKLID(hkl, master=master) - - def _getHKLID( - self, - hkl: Union[int, Tuple[int, int, int], np.ndarray], - master: Optional[bool] = False, - ) -> int: - """ - for hkl that is a tuple, return externally visible hkl index - """ - if isinstance(hkl, int): - return hkl - else: - hklList = self.getSymHKLs() # !!! list, reduced by exclusions - intl_hklIDs = np.asarray([i['hklID'] for i in self.hklDataList]) - intl_hklIDs_sorted = intl_hklIDs[~self.exclusions[self.tThSortInv]] - dHKLInv = {} - for iHKL, symHKLs in enumerate(hklList): - idx = intl_hklIDs_sorted[iHKL] if master else iHKL - for thisHKL in symHKLs.T: - dHKLInv[tuple(thisHKL)] = idx - try: - return dHKLInv[tuple(hkl)] - except KeyError: - raise RuntimeError( - f"hkl '{tuple(hkl)}' is not present in this material!" - ) - - def getHKLs(self, *hkl_ids: int, **kwargs) -> Union[List[str], np.ndarray]: - """ - Returns the powder HKLs subject to specified options. - - Parameters - ---------- - *hkl_ids : int - Optional list of specific master hklIDs. - **kwargs : dict - One or more of the following keyword arguments: - asStr : bool - If True, return a list of strings. The default is False. - thisTTh : scalar | None - If not None, only return hkls overlapping the specified - 2-theta (in radians). The default is None. - allHKLs : bool - If True, then ignore exlcusions. The default is False. - - Raises - ------ - TypeError - If an unknown kwarg is passed. - RuntimeError - If an invalid hklID is passed. - - Returns - ------- - hkls : list | numpy.ndarray - Either a list of hkls as strings (if asStr=True) or a vstacked - array of hkls. - - Notes - ----- - !!! the shape of the return value when asStr=False is the _transpose_ - of the typical return value for self.get_hkls() and self.hkls! - This _may_ change to avoid confusion, but going to leave it for - now so as not to break anything. - - 2022/08/05 JVB: - - Added functionality to handle optional hklID args - - Updated docstring - """ - # kwarg parsing - opts = dict(asStr=False, thisTTh=None, allHKLs=False) - if len(kwargs) > 0: - # check keys - for k, v in kwargs.items(): - if k not in opts: - raise TypeError( - f"getHKLs() got an unexpected keyword argument '{k}'" - ) - opts.update(kwargs) - - hkls = [] - if len(hkl_ids) == 0: - for iHKLr, hklData in enumerate(self.hklDataList): - if not opts['allHKLs']: - if not self._thisHKL(iHKLr): - continue - if opts['thisTTh'] is not None: - tThLo, tThHi = self._getTThRange(iHKLr) - if opts['thisTTh'] < tThHi and opts['thisTTh'] > tThLo: - hkls.append(hklData['hkl']) - else: - hkls.append(hklData['hkl']) - else: - # !!! changing behavior here; if the hkl_id is invalid, raises - # RuntimeError, and if allHKLs=True and the hkl_id is - # excluded, it also raises a RuntimeError - all_hkl_ids = np.asarray([i['hklID'] for i in self.hklDataList]) - sorted_excl = self.exclusions[self.tThSortInv] - idx = np.zeros(len(self.hklDataList), dtype=int) - for i, hkl_id in enumerate(hkl_ids): - # find ordinal index of current hklID - try: - idx[i] = int(np.where(all_hkl_ids == hkl_id)[0]) - except TypeError: - raise RuntimeError( - f"Requested hklID '{hkl_id}'is invalid!" - ) - if sorted_excl[idx[i]] and not opts['allHKLs']: - raise RuntimeError( - f"Requested hklID '{hkl_id}' is excluded!" - ) - hkls.append(self.hklDataList[idx[i]]['hkl']) - - # handle output kwarg - if opts['asStr']: - return list(map(hklToStr, np.array(hkls))) - else: - return np.array(hkls) - - def getSymHKLs( - self, - asStr: Optional[bool] = False, - withID: Optional[bool] = False, - indices: Optional[List[int]] = None, - ) -> Union[List[List[str]], List[np.ndarray]]: - """ - Return all symmetry HKLs. - - Parameters - ---------- - asStr : bool, optional - If True, return the symmetry HKLs as strings. The default is False. - withID : bool, optional - If True, return the symmetry HKLs with the hklID. The default is - False. Does nothing if asStr is True. - indices : list[inr], optional - Optional list of indices of hkls to include. - - Returns - ------- - sym_hkls : list list of strings, or list of numpy.ndarray - List of symmetry HKLs for each HKL, either as strings or as a - vstacked array. - """ - sym_hkls = [] - hkl_index = 0 - if indices is not None: - indB = np.zeros(self.nHKLs, dtype=bool) - indB[np.array(indices)] = True - else: - indB = np.ones(self.nHKLs, dtype=bool) - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - if indB[hkl_index]: - hkls = hklData['symHKLs'] - if asStr: - sym_hkls.append(list(map(hklToStr, np.array(hkls).T))) - elif withID: - sym_hkls.append( - np.vstack( - [ - np.tile(hklData['hklID'], (1, hkls.shape[1])), - hkls, - ] - ) - ) - else: - sym_hkls.append(np.array(hkls)) - hkl_index += 1 - return sym_hkls - - @staticmethod - def makeScatteringVectors( - hkls: np.ndarray, - rMat_c: np.ndarray, - bMat: np.ndarray, - wavelength: float, - chiTilt: Optional[float] = None, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - Static method for calculating g-vectors and scattering vector angles - for specified hkls, subject to the bragg conditions specified by - lattice vectors, orientation matrix, and wavelength - - Parameters - ---------- - hkls : np.ndarray - (3, n) array of hkls. - rMat_c : np.ndarray - (3, 3) rotation matrix from the crystal to the sample frame. - bMat : np.ndarray, optional - (3, 3) COB from reciprocal lattice frame to the crystal frame. - wavelength : float - xray wavelength in Angstroms. - chiTilt : float, optional - 0 <= chiTilt <= 90 degrees, defaults to 0 - - Returns - ------- - gVec_s : np.ndarray - (3, n) array of g-vectors (reciprocal lattice) in the sample frame. - oangs0 : np.ndarray - (3, n) array containing the feasible (2-theta, eta, ome) triplets - for each input hkl (first solution) - oangs1 : np.ndarray - (3, n) array containing the feasible (2-theta, eta, ome) triplets - for each input hkl (second solution) - - FIXME: must do testing on strained bMat - """ - # arg munging - chi = float(chiTilt) if chiTilt is not None else 0.0 - rMat_c = rMat_c.squeeze() - - # these are the reciprocal lattice vectors in the SAMPLE FRAME - # ** NOTE ** - # if strained, assumes that you handed it a bMat calculated from - # strained [a, b, c] in the CRYSTAL FRAME - gVec_s = np.dot(rMat_c, np.dot(bMat, hkls)) - - dim0 = gVec_s.shape[0] - if dim0 != 3: - raise ValueError(f'Number of lattice plane normal dims is {dim0}') - - # call model from transforms now - oangs0, oangs1 = xfcapi.oscill_angles_of_hkls( - hkls.T, chi, rMat_c, bMat, wavelength - ) - - return gVec_s, oangs0.T, oangs1.T - - def _makeScatteringVectors( - self, - rMat: np.ndarray, - bMat: Optional[np.ndarray] = None, - chiTilt: Optional[float] = None, - ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: - """ - modeled after QFromU.m - """ - - if bMat is None: - bMat = self._latVecOps['B'] - - Qs_vec = [] - Qs_ang0 = [] - Qs_ang1 = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - thisQs, thisAng0, thisAng1 = PlaneData.makeScatteringVectors( - hklData['symHKLs'], - rMat, - bMat, - self._wavelength, - chiTilt=chiTilt, - ) - Qs_vec.append(thisQs) - Qs_ang0.append(thisAng0) - Qs_ang1.append(thisAng1) - - return Qs_vec, Qs_ang0, Qs_ang1 - - def calcStructFactor(self, atominfo): - """ - Calculates unit cell structure factors as a function of hkl - USAGE: - FSquared = calcStructFactor(atominfo,hkls,B) - INPUTS: - 1) atominfo (m x 1 float ndarray) the first threee columns of the - matrix contain fractional atom positions [uvw] of atoms in the unit - cell. The last column contains the number of electrons for a given atom - 2) hkls (3 x n float ndarray) is the array of Miller indices for - the planes of interest. The vectors are assumed to be - concatenated along the 1-axis (horizontal) - 3) B (3 x 3 float ndarray) is a matrix of reciprocal lattice basis - vectors,where each column contains a reciprocal lattice basis vector - ({g}=[B]*{hkl}) - OUTPUTS: - 1) FSquared (n x 1 float ndarray) array of structure factors, - one for each hkl passed into the function - """ - r = atominfo[:, 0:3] - elecNum = atominfo[:, 3] - hkls = self.hkls - B = self.latVecOps['B'] - sinThOverLamdaList, ffDataList = LoadFormFactorData() - FSquared = np.zeros(hkls.shape[1]) - - for jj in np.arange(0, hkls.shape[1]): - # ???: probably have other functions for this - # Calculate G for each hkl - # Calculate magnitude of G for each hkl - G = ( - hkls[0, jj] * B[:, 0] - + hkls[1, jj] * B[:, 1] - + hkls[2, jj] * B[:, 2] - ) - magG = np.sqrt(G[0] ** 2 + G[1] ** 2 + G[2] ** 2) - - # Begin calculating form factor - F = 0 - for ii in np.arange(0, r.shape[0]): - ff = RetrieveAtomicFormFactor( - elecNum[ii], magG, sinThOverLamdaList, ffDataList - ) - exparg = complex( - 0.0, - 2.0 - * np.pi - * ( - hkls[0, jj] * r[ii, 0] - + hkls[1, jj] * r[ii, 1] - + hkls[2, jj] * r[ii, 2] - ), - ) - F += ff * np.exp(exparg) - - """ - F = sum_atoms(ff(Q)*e^(2*pi*i(hu+kv+lw))) - """ - FSquared[jj] = np.real(F * np.conj(F)) - - return FSquared - - # OLD DEPRECATED PLANE_DATA STUFF ==================================== - @deprecated(new_func="len(self.hkls.T)", removal_date="2025-08-01") - def getNHKLs(self): - return len(self.getHKLs()) - - @deprecated(new_func="self.exclusions", removal_date="2025-08-01") - def get_exclusions(self): - return self.exclusions - - @deprecated(new_func="self.exclusions=...", removal_date="2025-08-01") - def set_exclusions(self, exclusions): - self.exclusions = exclusions - - @deprecated( - new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", - removal_date="2025-08-01", - ) - def getLatticeType(self): - return ltypeOfLaueGroup(self.laueGroup) - - @deprecated(new_func="self.q_sym", removal_date="2025-08-01") - def getQSym(self): - return self.q_sym - - -@deprecated(removal_date='2025-01-01') -def getFriedelPair(tth0, eta0, *ome0, **kwargs): - """ - Get the diffractometer angular coordinates in degrees for - the Friedel pair of a given reflection (min angular distance). - - AUTHORS: - - J. V. Bernier -- 10 Nov 2009 - - USAGE: - - ome1, eta1 = getFriedelPair(tth0, eta0, *ome0, - display=False, - units='degrees', - convention='hexrd') - - INPUTS: - - 1) tth0 is a list (or ndarray) of 1 or n the bragg angles (2theta) for - the n reflections (tiled to match eta0 if only 1 is given). - - 2) eta0 is a list (or ndarray) of 1 or n azimuthal coordinates for the n - reflections (tiled to match tth0 if only 1 is given). - - 3) ome0 is a list (or ndarray) of 1 or n reference oscillation - angles for the n reflections (denoted omega in [1]). This argument - is optional. - - 4) Keyword arguments may be one of the following: - - Keyword Values|{default} Action - -------------- -------------- -------------- - 'display' True|{False} toggles display to cmd line - 'units' 'radians'|{'degrees'} sets units for input angles - 'convention' 'fable'|{'hexrd'} sets conventions defining - the angles (see below) - 'chiTilt' None the inclination (about Xlab) of - the oscillation axis - - OUTPUTS: - - 1) ome1 contains the oscialltion angle coordinates of the - Friedel pairs associated with the n input reflections, relative to ome0 - (i.e. ome1 = + ome0). Output is in DEGREES! - - 2) eta1 contains the azimuthal coordinates of the Friedel - pairs associated with the n input reflections. Output units are - controlled via the module variable 'outputDegrees' - - NOTES: - - !!!: The ouputs ome1, eta1 are written using the selected convention, but - the units are alway degrees. May change this to work with Nathan's - global... - - !!!: In the 'fable' convention [1], {XYZ} form a RHON basis where X is - downstream, Z is vertical, and eta is CCW with +Z defining eta = 0. - - !!!: In the 'hexrd' convention [2], {XYZ} form a RHON basis where Z is - upstream, Y is vertical, and eta is CCW with +X defining eta = 0. - - REFERENCES: - - [1] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, - ``Tracking: a method for structural characterization of grains in - powders or polycrystals''. J. Appl. Cryst. (2001). 34, 744--750 - - [2] J. V. Bernier, M. P. Miller, J. -S. Park, and U. Lienert, - ``Quantitative Stress Analysis of Recrystallized OFHC Cu Subject - to Deformed In Situ'', J. Eng. Mater. Technol. (2008). 130. - DOI:10.1115/1.2870234 - """ - - dispFlag = False - fableFlag = False - chi = None - c1 = 1.0 - c2 = pi / 180.0 - - eta0 = np.atleast_1d(eta0) - tth0 = np.atleast_1d(tth0) - ome0 = np.atleast_1d(ome0) - - if eta0.ndim != 1: - raise RuntimeError('azimuthal input must be 1-D') - - npts = len(eta0) - - if tth0.ndim != 1: - raise RuntimeError('Bragg angle input must be not 1-D') - else: - if len(tth0) != npts: - if len(tth0) == 1: - tth0 *= np.ones(npts) - elif npts == 1: - npts = len(tth0) - eta0 *= np.ones(npts) - else: - raise RuntimeError( - 'the azimuthal and Bragg angle inputs are inconsistent' - ) - - if len(ome0) == 0: - ome0 = np.zeros(npts) # dummy ome0 - elif len(ome0) == 1 and npts > 1: - ome0 *= np.ones(npts) - else: - if len(ome0) != npts: - raise RuntimeError( - 'your oscialltion angle input is inconsistent; ' - + f'it has length {len(ome0)} while it should be {npts}' - ) - - # keyword args processing - kwarglen = len(kwargs) - if kwarglen > 0: - argkeys = list(kwargs.keys()) - for i in range(kwarglen): - if argkeys[i] == 'display': - dispFlag = kwargs[argkeys[i]] - elif argkeys[i] == 'convention': - if kwargs[argkeys[i]].lower() == 'fable': - fableFlag = True - elif argkeys[i] == 'units': - if kwargs[argkeys[i]] == 'radians': - c1 = 180.0 / pi - c2 = 1.0 - elif argkeys[i] == 'chiTilt': - if kwargs[argkeys[i]] is not None: - chi = kwargs[argkeys[i]] - - # a little talkback... - if dispFlag: - if fableFlag: - print('\nUsing Fable angle convention\n') - else: - print('\nUsing image-based angle convention\n') - - # mapped eta input - # - in DEGREES, thanks to c1 - eta0 = mapAngle(c1 * eta0, [-180, 180], units='degrees') - if fableFlag: - eta0 = 90 - eta0 - - # must put args into RADIANS - # - eta0 is in DEGREES, - # - the others are in whatever was entered, hence c2 - eta0 = d2r * eta0 - tht0 = c2 * tth0 / 2 - if chi is not None: - chi = c2 * chi - else: - chi = 0 - - """ - SYSTEM SOLVE - - - cos(chi)cos(eta)cos(theta)sin(x) - cos(chi)sin(theta)cos(x) \ - = sin(theta) - sin(chi)sin(eta)cos(theta) - - - Identity: a sin x + b cos x = sqrt(a**2 + b**2) sin (x + alpha) - - / - | atan(b/a) for a > 0 - alpha < - | pi + atan(b/a) for a < 0 - \ - - => sin (x + alpha) = c / sqrt(a**2 + b**2) - - must use both branches for sin(x) = n: - x = u (+ 2k*pi) | x = pi - u (+ 2k*pi) - """ - cchi = np.cos(chi) - schi = np.sin(chi) - ceta = np.cos(eta0) - seta = np.sin(eta0) - ctht = np.cos(tht0) - stht = np.sin(tht0) - - nchi = np.c_[0.0, cchi, schi].T - - gHat0_l = -np.vstack([ceta * ctht, seta * ctht, stht]) - - a = cchi * ceta * ctht - b = -cchi * stht - c = stht + schi * seta * ctht - - # form solution - abMag = np.sqrt(a * a + b * b) - assert np.all(abMag > 0), "Beam vector specification is infeasible!" - phaseAng = np.arctan2(b, a) - rhs = c / abMag - rhs[abs(rhs) > 1.0] = np.nan - rhsAng = np.arcsin(rhs) - - # write ome angle output arrays (NaNs persist here) - ome1 = rhsAng - phaseAng - ome2 = np.pi - rhsAng - phaseAng - - ome1 = mapAngle(ome1, [-np.pi, np.pi], units='radians') - ome2 = mapAngle(ome2, [-np.pi, np.pi], units='radians') - - ome_stack = np.vstack([ome1, ome2]) - - min_idx = np.argmin(abs(ome_stack), axis=0) - - ome_min = ome_stack[min_idx, list(range(len(ome1)))] - eta_min = np.nan * np.ones_like(ome_min) - - # mark feasible reflections - goodOnes = ~np.isnan(ome_min) - - numGood = np.sum(goodOnes) - tmp_eta = np.empty(numGood) - tmp_gvec = gHat0_l[:, goodOnes] - for i in range(numGood): - rchi = rotMatOfExpMap(np.tile(ome_min[goodOnes][i], (3, 1)) * nchi) - gHat_l = np.dot(rchi, tmp_gvec[:, i].reshape(3, 1)) - tmp_eta[i] = np.arctan2(gHat_l[1], gHat_l[0]) - eta_min[goodOnes] = tmp_eta - - # everybody back to DEGREES! - # - ome1 is in RADIANS here - # - convert and put into [-180, 180] - ome1 = mapAngle( - mapAngle(r2d * ome_min, [-180, 180], units='degrees') + c1 * ome0, - [-180, 180], - units='degrees', - ) - - # put eta1 in [-180, 180] - eta1 = mapAngle(r2d * eta_min, [-180, 180], units='degrees') - - if not outputDegrees: - ome1 *= d2r - eta1 *= d2r - - return ome1, eta1 - - -def getDparms( - lp: np.ndarray, lpTag: str, radians: Optional[bool] = True -) -> np.ndarray: - """ - Utility routine for getting dparms, that is the lattice parameters - without symmetry -- 'triclinic' - - Parameters - ---------- - lp : np.ndarray - Parsed lattice parameters - lpTag : str - Tag for the symmetry group of the lattice (from Laue group) - radians : bool, optional - Whether or not to use radians for angles, default is True - - Returns - ------- - np.ndarray - The lattice parameters without symmetry. - """ - latVecOps = latticeVectors(lp, tag=lpTag, radians=radians) - return latVecOps['dparms'] - - -def LoadFormFactorData(): - """ - Script to read in a csv file containing information relating the - magnitude of Q (sin(th)/lambda) to atomic form factor - Notes: - Atomic form factor data gathered from the International Tables of - Crystallography: - P. J. Brown, A. G. Fox, E. N. Maslen, M. A. O'Keefec and B. T. M. Willis, - "Chapter 6.1. Intensity of diffracted intensities", International Tables - for Crystallography (2006). Vol. C, ch. 6.1, pp. 554-595 - """ - - dir1 = os.path.split(valunits.__file__) - dataloc = os.path.join(dir1[0], 'data', 'FormFactorVsQ.csv') - - data = np.zeros((62, 99), float) - - # FIXME: marked broken by DP - jj = 0 - with open(dataloc, 'rU') as csvfile: - datareader = csv.reader(csvfile, dialect=csv.excel) - for row in datareader: - ii = 0 - for val in row: - data[jj, ii] = float(val) - ii += 1 - jj += 1 - - sinThOverLamdaList = data[:, 0] - ffDataList = data[:, 1:] - - return sinThOverLamdaList, ffDataList - - -def RetrieveAtomicFormFactor(elecNum, magG, sinThOverLamdaList, ffDataList): - """Interpolates between tabulated data to find the atomic form factor - for an atom with elecNum electrons for a given magnitude of Q - USAGE: - ff = RetrieveAtomicFormFactor(elecNum,magG,sinThOverLamdaList,ffDataList) - INPUTS: - 1) elecNum, (1 x 1 float) number of electrons for atom of interest - 2) magG (1 x 1 float) magnitude of G - 3) sinThOverLamdaList (n x 1 float ndarray) form factor data is tabulated - in terms of sin(theta)/lambda (A^-1). - 3) ffDataList (n x m float ndarray) form factor data is tabulated in terms - of sin(theta)/lambda (A^-1). Each column corresponds to a different - number of electrons - OUTPUTS: - 1) ff (n x 1 float) atomic form factor for atom and hkl of interest - NOTES: - Data should be calculated in terms of G at some point - """ - sinThOverLambda = 0.5 * magG - # lambda=2*d*sin(th) - # lambda=2*sin(th)/G - # 1/2*G=sin(th)/lambda - - ff = np.interp( - sinThOverLambda, sinThOverLamdaList, ffDataList[:, (elecNum - 1)] - ) - - return ff - - -def lorentz_factor(tth: np.ndarray) -> np.ndarray: - """ - 05/26/2022 SS adding lorentz factor computation - to the detector so that it can be compenstated for in the - intensity correction - - Parameters - ---------- - tth: np.ndarray - 2-theta of every pixel in radians - - Returns - ------- - np.ndarray - Lorentz factor for each pixel - """ - - theta = 0.5 * tth - - cth = np.cos(theta) - sth2 = np.sin(theta) ** 2 - - return 1.0 / (4.0 * cth * sth2) - - -def polarization_factor( - tth: np.ndarray, - unpolarized: Optional[bool] = True, - eta: Optional[np.ndarray] = None, - f_hor: Optional[float] = None, - f_vert: Optional[float] = None, -) -> np.ndarray: - """ - 06/14/2021 SS adding lorentz polarization factor computation - to the detector so that it can be compenstated for in the - intensity correction - - 05/26/2022 decoupling lorentz factor from polarization factor - - parameters: tth two theta of every pixel in radians - if unpolarized is True, all subsequent arguments are optional - eta azimuthal angle of every pixel - f_hor fraction of horizontal polarization - (~1 for XFELs) - f_vert fraction of vertical polarization - (~0 for XFELs) - notice f_hor + f_vert = 1 - - FIXME, called without parameters like eta, f_hor, f_vert, but they default - to none in the current implementation, which will throw an error. - """ - - ctth2 = np.cos(tth) ** 2 - - if unpolarized: - return (1 + ctth2) / 2 - - seta2 = np.sin(eta) ** 2 - ceta2 = np.cos(eta) ** 2 - return f_hor * (seta2 + ceta2 * ctth2) + f_vert * (ceta2 + seta2 * ctth2) diff --git a/hexrd/hedm/material/unitcell.py b/hexrd/hedm/material/unitcell.py deleted file mode 100644 index 117f5b0cf..000000000 --- a/hexrd/hedm/material/unitcell.py +++ /dev/null @@ -1,2056 +0,0 @@ -import importlib.resources -import numpy as np -from numba import njit -from hexrd.core import constants -from hexrd.core.material import spacegroup, symbols, symmetry -from hexrd.hedm.ipfcolor import sphere_sector, colorspace -from hexrd.core.valunits import valWUnit -import hexrd.core.resources -import warnings -import h5py -from pathlib import Path -from scipy.interpolate import interp1d -import time - -eps = constants.sqrt_epsf -ENERGY_ID = 0 -REAL_F1_ID = 1 -IMAG_F2_ID = 2 -MU_ID = 3 -COH_INCOH_ID = 4 -MU_K_ID = 6 -WAV_ID = 7 - -''' calculate dot product of two vectors in any space 'd' 'r' or 'c' ''' - - -@njit(cache=True, nogil=True) -def _calclength(u, mat): - return np.sqrt(np.dot(u, np.dot(mat, u))) - - -@njit(cache=True, nogil=True) -def _calcstar(v, sym, mat): - vsym = np.atleast_2d(v) - for s in sym: - vp = np.dot(np.ascontiguousarray(s), v) - # check if this is new - isnew = True - for vec in vsym: - vv = vp - vec - dist = _calclength(vv, mat) - if dist < 1e-3: - isnew = False - break - if isnew: - vp = np.atleast_2d(vp) - vsym = np.vstack((vsym, vp)) - - return vsym - - -class unitcell: - ''' - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 10/09/2018 SS 1.0 original - @DATE: 10/15/2018 SS 1.1 added space group handling - >> @DETAILS: this is the unitcell class - - ''' - - # initialize the unitcell class - # need lattice parameters and space group data from HDF5 file - def __init__( - self, - lp, - sgnum, - atomtypes, - charge, - atominfo, - U, - dmin, - beamenergy, - sgsetting=0, - ): - - self._tstart = time.time() - self.pref = 0.4178214 - - self.atom_type = atomtypes - self.chargestates = charge - self.atom_pos = atominfo - - self._dmin = dmin - - self.lparms = lp - - self.U = U - ''' - initialize interpolation from table for anomalous scattering - ''' - self.InitializeInterpTable() - - ''' - sets x-ray energy - calculate wavelength - also calculates anomalous form factors for xray scattering - ''' - self.voltage = beamenergy * 1000.0 - ''' - calculate symmetry - ''' - self.sgsetting = sgsetting - self.sgnum = sgnum - - self._tstop = time.time() - self.tinit = self._tstop - self._tstart - - def GetPgLg(self): - ''' - simple subroutine to get point and laue groups - to maintain consistency for planedata initialization - in the materials class - ''' - for k in list(_pgDict.keys()): - if self.sgnum in k: - pglg = _pgDict[k] - self._pointGroup = pglg[0] - self._laueGroup = pglg[1] - self._supergroup = pglg[2] - self._supergroup_laue = pglg[3] - - def CalcWavelength(self): - # wavelength in nm - self.wavelength = ( - constants.cPlanck - * constants.cLight - / constants.cCharge - / self.voltage - ) - self.wavelength *= 1e9 - - def calcBetaij(self): - - self.betaij = np.zeros([3, 3, self.atom_ntype]) - for i in range(self.U.shape[0]): - U = self.U[i, :] - self.betaij[:, :, i] = np.array( - [[U[0], U[3], U[4]], [U[3], U[1], U[5]], [U[4], U[5], U[2]]] - ) - - self.betaij[:, :, i] *= 2.0 * np.pi**2 * self._aij - - def calcmatrices(self): - - a = self.a - b = self.b - c = self.c - - alpha = np.radians(self.alpha) - beta = np.radians(self.beta) - gamma = np.radians(self.gamma) - - ca = np.cos(alpha) - cb = np.cos(beta) - cg = np.cos(gamma) - sa = np.sin(alpha) - sb = np.sin(beta) - sg = np.sin(gamma) - tg = np.tan(gamma) - - ''' - direct metric tensor - ''' - self._dmt = np.array( - [ - [a**2, a * b * cg, a * c * cb], - [a * b * cg, b**2, b * c * ca], - [a * c * cb, b * c * ca, c**2], - ] - ) - self._vol = np.sqrt(np.linalg.det(self.dmt)) - - if self.vol < 1e-5: - warnings.warn('unitcell volume is suspiciously small') - - ''' - reciprocal metric tensor - ''' - self._rmt = np.linalg.inv(self.dmt) - - ''' - direct structure matrix - ''' - self._dsm = np.array( - [ - [a, b * cg, c * cb], - [0.0, b * sg, -c * (cb * cg - ca) / sg], - [0.0, 0.0, self.vol / (a * b * sg)], - ] - ) - - self._dsm[np.abs(self._dsm) < eps] = 0.0 - - ''' - reciprocal structure matrix - ''' - self._rsm = np.array( - [ - [1.0 / a, 0.0, 0.0], - [-1.0 / (a * tg), 1.0 / (b * sg), 0.0], - [ - b * c * (cg * ca - cb) / (self.vol * sg), - a * c * (cb * cg - ca) / (self.vol * sg), - a * b * sg / self.vol, - ], - ] - ) - - self._rsm[np.abs(self._rsm) < eps] = 0.0 - - ast = self.CalcLength([1, 0, 0], 'r') - bst = self.CalcLength([0, 1, 0], 'r') - cst = self.CalcLength([0, 0, 1], 'r') - - self._aij = np.array( - [ - [ast**2, ast * bst, ast * cst], - [bst * ast, bst**2, bst * cst], - [cst * ast, cst * bst, cst**2], - ] - ) - - ''' transform between any crystal space to any other space. - choices are 'd' (direct), 'r' (reciprocal) and 'c' (cartesian)''' - - def TransSpace(self, v_in, inspace, outspace): - if inspace == outspace: - return v_in - if inspace == 'd': - if outspace == 'r': - v_out = np.dot(v_in, self.dmt) - elif outspace == 'c': - v_out = np.dot(self.dsm, v_in) - else: - raise ValueError( - 'inspace in "d" but outspace can\'t be identified' - ) - - elif inspace == 'r': - if outspace == 'd': - v_out = np.dot(v_in, self.rmt) - elif outspace == 'c': - v_out = np.dot(self.rsm, v_in) - else: - raise ValueError( - 'inspace in "r" but outspace can\'t be identified' - ) - - elif inspace == 'c': - if outspace == 'r': - v_out = np.dot(v_in, self.dsm) - elif outspace == 'd': - v_out = np.dot(v_in, self.rsm) - else: - raise ValueError( - 'inspace in "c" but outspace can\'t be identified' - ) - - else: - raise ValueError('incorrect inspace argument') - - return v_out - - ''' calculate dot product of two vectors in any space 'd' 'r' or 'c' ''' - - def CalcDot(self, u, v, space): - - if space == 'd': - dot = np.dot(u, np.dot(self.dmt, v)) - elif space == 'r': - dot = np.dot(u, np.dot(self.rmt, v)) - elif space == 'c': - dot = np.dot(u, v) - else: - raise ValueError('space is unidentified') - - return dot - - def CalcLength(self, u, space): - - if space == 'd': - mat = self.dmt - # vlen = np.sqrt(np.dot(u, np.dot(self.dmt, u))) - elif space == 'r': - mat = self.rmt - # vlen = np.sqrt(np.dot(u, np.dot(self.rmt, u))) - elif space == 'c': - mat = np.eye(3) - # vlen = np.linalg.norm(u) - else: - raise ValueError('incorrect space argument') - - uu = np.array(u).astype(np.float64) - return _calclength(uu, mat) - - ''' normalize vector in any space 'd' 'r' or 'c' ''' - - def NormVec(self, u, space): - ulen = self.CalcLength(u, space) - return u / ulen - - ''' calculate angle between two vectors in any space''' - - def CalcAngle(self, u, v, space): - - ulen = self.CalcLength(u, space) - vlen = self.CalcLength(v, space) - - dot = self.CalcDot(u, v, space) / ulen / vlen - if np.isclose(np.abs(dot), 1.0): - dot = np.sign(dot) - angle = np.arccos(dot) - - return angle - - ''' calculate cross product between two vectors in any space. - - cross product of two vectors in direct space is a vector in - reciprocal space - - cross product of two vectors in reciprocal space is a vector in - direct space - - the outspace specifies if a conversion needs to be made - - @NOTE: iv is the switch (0/1) which will either turn division - by volume of the unit cell on or off.''' - - def CalcCross(self, p, q, inspace, outspace, vol_divide=False): - iv = 0 - if vol_divide: - vol = self.vol - else: - vol = 1.0 - - pxq = np.array( - [ - p[1] * q[2] - p[2] * q[1], - p[2] * q[0] - p[0] * q[2], - p[0] * q[1] - p[1] * q[0], - ] - ) - - if inspace == 'd': - ''' - cross product vector is in reciprocal space - and can be converted to direct or cartesian space - ''' - pxq *= vol - - if outspace == 'r': - pass - elif outspace == 'd': - pxq = self.TransSpace(pxq, 'r', 'd') - elif outspace == 'c': - pxq = self.TransSpace(pxq, 'r', 'c') - else: - raise ValueError( - 'inspace is ' 'd' ' but outspace is unidentified' - ) - - elif inspace == 'r': - ''' - cross product vector is in direct space and - can be converted to any other space - ''' - pxq /= vol - if outspace == 'r': - pxq = self.TransSpace(pxq, 'd', 'r') - elif outspace == 'd': - pass - elif outspace == 'c': - pxq = self.TransSpace(pxq, 'd', 'c') - else: - raise ValueError( - 'inspace is ' 'r' ' but outspace is unidentified' - ) - - elif inspace == 'c': - ''' - cross product is already in cartesian space so no - volume factor is involved. can be converted to any - other space too - ''' - if outspace == 'r': - pxq = self.TransSpace(pxq, 'c', 'r') - elif outspace == 'd': - pxq = self.TransSpace(pxq, 'c', 'd') - elif outspace == 'c': - pass - else: - raise ValueError( - 'inspace is ' 'c' ' but outspace is unidentified' - ) - - else: - raise ValueError('inspace is unidentified') - - return pxq - - def GenerateRecipPGSym(self): - - self.SYM_PG_r = self.SYM_PG_d[0, :, :] - self.SYM_PG_r = np.broadcast_to(self.SYM_PG_r, [1, 3, 3]) - - self.SYM_PG_r_laue = self.SYM_PG_d[0, :, :] - self.SYM_PG_r_laue = np.broadcast_to(self.SYM_PG_r_laue, [1, 3, 3]) - - for i in range(1, self.npgsym): - g = self.SYM_PG_d[i, :, :] - g = np.dot(self.dmt, np.dot(g, self.rmt)) - g = np.round(np.broadcast_to(g, [1, 3, 3])) - self.SYM_PG_r = np.concatenate((self.SYM_PG_r, g)) - - for i in range(1, self.SYM_PG_d_laue.shape[0]): - g = self.SYM_PG_d_laue[i, :, :] - g = np.dot(self.dmt, np.dot(g, self.rmt)) - g = np.round(np.broadcast_to(g, [1, 3, 3])) - self.SYM_PG_r_laue = np.concatenate((self.SYM_PG_r_laue, g)) - - self.SYM_PG_r = self.SYM_PG_r.astype(np.int32) - self.SYM_PG_r_laue = self.SYM_PG_r_laue.astype(np.int32) - - def GenerateCartesianPGSym(self): - ''' - use the direct point group symmetries to generate the - symmetry operations in the cartesian frame. this is used - to reduce directions to the standard stereographi tringle - ''' - self.SYM_PG_c = [] - self.SYM_PG_c_laue = [] - - for sop in self.SYM_PG_d: - self.SYM_PG_c.append(np.dot(self.dsm, np.dot(sop, self.rsm.T))) - - self.SYM_PG_c = np.array(self.SYM_PG_c) - self.SYM_PG_c[np.abs(self.SYM_PG_c) < eps] = 0.0 - - if self._pointGroup == self._laueGroup: - self.SYM_PG_c_laue = self.SYM_PG_c - else: - for sop in self.SYM_PG_d_laue: - self.SYM_PG_c_laue.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T)) - ) - self.SYM_PG_c_laue = np.array(self.SYM_PG_c_laue) - self.SYM_PG_c_laue[np.abs(self.SYM_PG_c_laue) < eps] = 0.0 - - ''' - use the point group symmetry of the supergroup - to generate the equivalent operations in the - cartesian reference frame - - SS 11/23/2020 added supergroup symmetry operations - SS 11/24/2020 fix monoclinic groups separately since - the supergroup for monoclinic is orthorhombic - ''' - supergroup = self._supergroup - sym_supergroup = symmetry.GeneratePGSYM(supergroup) - - supergroup_laue = self._supergroup_laue - sym_supergroup_laue = symmetry.GeneratePGSYM(supergroup_laue) - - if self.latticeType in ('monoclinic', 'triclinic'): - ''' - for monoclinic groups c2 and c2h, the supergroups are - orthorhombic, so no need to convert from direct to - cartesian as they are identical - ''' - self.SYM_PG_supergroup = sym_supergroup - self.SYM_PG_supergroup_laue = sym_supergroup_laue - - else: - - self.SYM_PG_supergroup = [] - self.SYM_PG_supergroup_laue = [] - - for sop in sym_supergroup: - self.SYM_PG_supergroup.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T)) - ) - - self.SYM_PG_supergroup = np.array(self.SYM_PG_supergroup) - self.SYM_PG_supergroup[np.abs(self.SYM_PG_supergroup) < eps] = 0.0 - - for sop in sym_supergroup_laue: - self.SYM_PG_supergroup_laue.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T)) - ) - - self.SYM_PG_supergroup_laue = np.array(self.SYM_PG_supergroup_laue) - self.SYM_PG_supergroup_laue[ - np.abs(self.SYM_PG_supergroup_laue) < eps - ] = 0.0 - - ''' - the standard setting for the monoclinic system has the b-axis aligned - with the 2-fold axis. this needs to be accounted for when reduction to - the standard stereographic triangle is performed. the siplest way is to - rotate all symmetry elements by 90 about the x-axis - - the supergroups for the monoclinic groups are orthorhombic so they need - not be rotated as they have the c* axis already aligned with the z-axis - SS 12/10/2020 - ''' - if self.latticeType == 'monoclinic': - - om = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]]) - - for i, s in enumerate(self.SYM_PG_c): - ss = np.dot(om, np.dot(s, om.T)) - self.SYM_PG_c[i, :, :] = ss - - for i, s in enumerate(self.SYM_PG_c_laue): - ss = np.dot(om, np.dot(s, om.T)) - self.SYM_PG_c_laue[i, :, :] = ss - ''' - for the triclinic group c1, the supergroups are the monoclinic group m - therefore we need to rotate the mirror to be perpendicular to the z-axis - same shouldn't be done for the group ci, since the supergroup is just the - triclinic group c1!! - SS 12/10/2020 - ''' - if self._pointGroup == 'c1': - om = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]]) - - for i, s in enumerate(self.SYM_PG_supergroup): - ss = np.dot(om, np.dot(s, om.T)) - self.SYM_PG_supergroup[i, :, :] = ss - - for i, s in enumerate(self.SYM_PG_supergroup_laue): - ss = np.dot(om, np.dot(s, om.T)) - self.SYM_PG_supergroup_laue[i, :, :] = ss - - def CalcOrbit(self, v, reduceToUC=True): - """ - @date 03/04/2021 SS 1.0 original - - @details calculate the equivalent position for the - space group symmetry. this function will replace the - code in the CalcPositions subroutine. - - @params v is the factional coordinates in direct space - reduceToUC reduces the position to the - fundamental fractional unit cell (0-1) - """ - - asym_pos = [] - n = 1 - if v.shape[0] != 3: - raise RuntimeError("fractional coordinate in not 3-d") - r = v - # using wigner-sietz notation - r = np.hstack((r, 1.0)) - - asym_pos = np.broadcast_to(r[0:3], [1, 3]) - - for symmat in self.SYM_SG: - # get new position - rnew = np.dot(symmat, r) - rr = rnew[0:3] - - if reduceToUC: - # reduce to fundamental unitcell with fractional - # coordinates between 0-1 - rr = np.modf(rr)[0] - rr[rr < 0.0] += 1.0 - rr[np.abs(rr) < 1.0e-6] = 0.0 - - # check if this is new - isnew = True - for j in range(n): - v = rr - asym_pos[j] - dist = self.CalcLength(v, 'd') - if dist < 1e-3: - isnew = False - break - - # if its new add this to the list - if isnew: - asym_pos = np.vstack((asym_pos, rr)) - n += 1 - - numat = n - - return asym_pos, numat - - def CalcStar(self, v, space, applyLaue=False): - ''' - this function calculates the symmetrically equivalent hkls (or uvws) - for the reciprocal (or direct) point group symmetry. - ''' - if space == 'd': - mat = self.dmt.astype(np.float64) - if applyLaue: - sym = self.SYM_PG_d_laue.astype(np.float64) - else: - sym = self.SYM_PG_d.astype(np.float64) - elif space == 'r': - mat = self.rmt.astype(np.float64) - if applyLaue: - sym = self.SYM_PG_r_laue.astype(np.float64) - else: - sym = self.SYM_PG_r.astype(np.float64) - elif space == 'c': - mat = np.eye(3) - if applyLaue: - sym = self.SYM_PG_c_laue.astype(np.float64) - else: - sym = self.SYM_PG_c.astype(np.float64) - else: - raise ValueError('CalcStar: unrecognized space.') - - vv = np.array(v).astype(np.float64) - return _calcstar(vv, sym, mat) - - def CalcPositions(self): - ''' - calculate the asymmetric positions in the fundamental unitcell - used for structure factor calculations - ''' - numat = [] - asym_pos = [] - - for i in range(self.atom_ntype): - - v = self.atom_pos[i, 0:3] - apos, n = self.CalcOrbit(v) - - asym_pos.append(apos) - numat.append(n) - - self.numat = np.array(numat) - self.asym_pos = asym_pos - - def remove_duplicate_atoms(self, atom_pos=None, tol=1e-3): - """ - @date 03/04/2021 SS 1.0 original - - @details it was requested that a functionality be - added which can remove duplicate atoms from the - atom_pos field such that no two atoms are closer that - the distance specified by "tol" (lets assume its in A) - steps involved are as follows: - 1. get the star (or orbit) oe each point in atom_pos - 2. if any points in the orbits are within tol, then - remove the second point (the first point will be - preserved by convention) - 3. update the densities, interptables for structure factors - etc. - - @params tol tolerance of distance between points specified - in A - """ - - if atom_pos is None: - atom_pos = self.atom_pos - - atom_pos_fixed = [] - idx = [] - """ - go through the atom_pos and remove the atoms that are duplicate - """ - for i in range(atom_pos.shape[0]): - pos = atom_pos[i, 0:3] - occ = atom_pos[i, 3] - v1, n1 = self.CalcOrbit(pos) - if i == 0: - atom_pos_fixed.append(np.hstack([pos, occ])) - idx.append(i) - else: - isclose = False - for j, uniqpos in enumerate(atom_pos_fixed): - pos2 = uniqpos[0:3] - occ2 = uniqpos[3] - # cases with fractional occupancy on same site - if np.all(np.isclose(pos, pos2)) and (occ + occ2 <= 1.0): - atom_pos_fixed.append(np.hstack([pos, occ])) - idx.append(i) - isclose = True - break - else: - v2, n2 = self.CalcOrbit(pos2) - for v in v2: - vv = np.tile(v, [v1.shape[0], 1]) - vv = vv - v1 - - for vvv in vv: - # check if distance less than tol - # the factor of 10 is for A --> nm - if self.CalcLength(vvv, 'd') < tol / 10.0: - # if true then its a repeated atom - isclose = True - break - - if isclose: - break - if isclose: - break - if not isclose: - atom_pos_fixed.append(np.hstack([pos, occ])) - idx.append(i) - - idx = np.array(idx) - atom_pos_fixed = np.array(atom_pos_fixed) - atom_type = self.atom_type[idx] - chargestates = [self.chargestates[i] for i in idx] - - if self.aniU: - U = self.U[idx, :] - else: - U = self.U[idx] - - self.atom_type = atom_type - self.chargestates = chargestates - self.atom_pos = atom_pos_fixed - - self.U = U - ''' - initialize interpolation from table for anomalous scattering - ''' - self.InitializeInterpTable() - self.CalcPositions() - self.CalcDensity() - self.calc_absorption_length() - - def CalcDensity(self): - ''' - calculate density, average atomic weight (avA) - and average atomic number(avZ) - ''' - self.avA = 0.0 - self.avZ = 0.0 - - for i in range(self.atom_ntype): - ''' - atype is atom type i.e. atomic number - numat is the number of atoms of atype - atom_pos(i,3) has the occupation factor - ''' - atype = self.atom_type[i] - numat = self.numat[i] - occ = self.atom_pos[i, 3] - - # -1 due to 0 indexing in python - self.avA += numat * constants.atom_weights[atype - 1] * occ - - self.avZ += numat * atype - - self.density = self.avA / (self.vol * 1.0e-21 * constants.cAvogadro) - - av_natom = np.dot(self.numat, self.atom_pos[:, 3]) - - self.avA /= av_natom - self.avZ /= np.sum(self.numat) - - ''' calculate the maximum index of diffraction vector along - each of the three reciprocal - basis vectors ''' - - def init_max_g_index(self): - """ - added 03/17/2021 SS - """ - self.ih = 1 - self.ik = 1 - self.il = 1 - - def CalcMaxGIndex(self): - self.init_max_g_index() - - while ( - 1.0 - / self.CalcLength(np.array([self.ih, 0, 0], dtype=np.float64), 'r') - > self.dmin - ): - self.ih = self.ih + 1 - - while ( - 1.0 - / self.CalcLength(np.array([0, self.ik, 0], dtype=np.float64), 'r') - > self.dmin - ): - self.ik = self.ik + 1 - - while ( - 1.0 - / self.CalcLength(np.array([0, 0, self.il], dtype=np.float64), 'r') - > self.dmin - ): - self.il = self.il + 1 - - def InitializeInterpTable(self): - - f_anomalous_data = [] - self.pe_cs = {} - data = ( - importlib.resources.files(hexrd.core.resources) - .joinpath('Anomalous.h5') - .open('rb') - ) - with h5py.File(data, 'r') as fid: - for i in range(0, self.atom_ntype): - - Z = self.atom_type[i] - elem = constants.ptableinverse[Z] - - if Z <= 92: - gid = fid.get('/' + elem) - data = np.array(gid.get('data')) - self.pe_cs[elem] = interp1d( - data[:, WAV_ID], data[:, MU_ID] + data[:, COH_INCOH_ID] - ) - data = data[:, [WAV_ID, REAL_F1_ID, IMAG_F2_ID]] - f_anomalous_data.append(data) - else: - wav = np.linspace(1.16e2, 2.86399992e-03, 189) - zs = np.ones_like(wav) * Z - zrs = np.zeros_like(wav) - data_zs = np.vstack((wav, zs, zrs)).T - self.pe_cs[elem] = interp1d(wav, zrs) - f_anomalous_data.append(data_zs) - - n = max([x.shape[0] for x in f_anomalous_data]) - self.f_anomalous_data = np.zeros([self.atom_ntype, n, 3]) - self.f_anomalous_data_sizes = np.zeros( - [ - self.atom_ntype, - ], - dtype=np.int32, - ) - - for i in range(self.atom_ntype): - nd = f_anomalous_data[i].shape[0] - self.f_anomalous_data_sizes[i] = nd - self.f_anomalous_data[i, :nd, :] = f_anomalous_data[i] - - def CalcXRSF(self, hkl): - from hexrd.powder.wppf.xtal import _calcxrsf - - ''' - the 1E-2 is to convert to A^-2 - since the fitting is done in those units - ''' - fNT = np.zeros( - [ - self.atom_ntype, - ] - ) - frel = np.zeros( - [ - self.atom_ntype, - ] - ) - scatfac = np.zeros([self.atom_ntype, 11]) - f_anomalous_data = self.f_anomalous_data - - hkl2d = np.atleast_2d(hkl).astype(np.float64) - nref = hkl2d.shape[0] - - multiplicity = np.ones( - [ - nref, - ] - ) - w_int = 1.0 - - occ = self.atom_pos[:, 3] - aniU = self.aniU - if aniU: - betaij = self.betaij - else: - betaij = self.U - - self.asym_pos_arr = np.zeros([self.numat.max(), self.atom_ntype, 3]) - for i in range(0, self.atom_ntype): - nn = self.numat[i] - self.asym_pos_arr[:nn, i, :] = self.asym_pos[i] - - self.numat = np.zeros(self.atom_ntype, dtype=np.int32) - for i in range(0, self.atom_ntype): - self.numat[i] = self.asym_pos[i].shape[0] - Z = self.atom_type[i] - elem = constants.ptableinverse[Z] - scatfac[i, :] = constants.scatfac[elem] - if Z <= 92: - frel[i] = constants.frel[elem] - fNT[i] = constants.fNT[elem] - - sf, sf_raw = _calcxrsf( - hkl2d, - nref, - multiplicity, - w_int, - self.wavelength, - self.rmt.astype(np.float64), - self.atom_type, - self.atom_ntype, - betaij, - occ, - self.asym_pos_arr, - self.numat, - scatfac, - fNT, - frel, - f_anomalous_data, - self.f_anomalous_data_sizes, - ) - - return sf_raw - - """ - molecular mass calculates the molar weight of the unit cell - since the unitcell can have multiple formular units, this - might be greater than the molecular weight - """ - - def calc_unitcell_mass(self): - a_mass = constants.atom_weights[self.atom_type - 1] - return np.sum(a_mass * self.numat) - - """ - calculate the number density in 1/micron^3 - number density = density * Avogadro / unitcell mass - the 1e-12 factor converts from 1/cm^3 to 1/micron^3 - """ - - def calc_number_density(self): - M = self.calc_unitcell_mass() - Na = constants.cAvogadro - - return 1e-12 * self.density * Na / M - - def calc_absorption_cross_sec(self): - - abs_cs_total = 0.0 - for i in range(self.atom_ntype): - Z = self.atom_type[i] - elem = constants.ptableinverse[Z] - abs_cs_total += ( - self.pe_cs[elem](self.wavelength) - * self.numat[i] - / np.sum(self.numat) - ) - return abs_cs_total - - """ - calculate the absorption coefficient which is - calculated using the sum of photoeffect, compton and - rayleigh cross ections. the pair and triplet production - cross sections etc are not applicable in the energy range - of interest and therefore neglected. - - attenuation coeff = sigma_total * density - - attenuation_length = 1/attenuation_coeff - - NOTE: units will be microns!! - - """ - - def calc_absorption_length(self): - # re = 2.8179403e-9 # in microns - # N = self.calc_number_density() - abs_cs_total = self.calc_absorption_cross_sec() - - # the 1e4 factor converts wavelength from cm -> micron - self.absorption_length = 1e4 / (abs_cs_total * self.density) - - """ - calculate bragg angle for a reflection. returns Nan if - the reflections is not possible for the voltage/wavelength - """ - - def CalcBraggAngle(self, hkl): - glen = self.CalcLength(hkl, 'r') - sth = self.wavelength * glen * 0.5 - return np.arcsin(sth) - - def ChooseSymmetric(self, hkllist, InversionSymmetry=True): - ''' - this function takes a list of hkl vectors and - picks out a subset of the list picking only one - of the symmetrically equivalent one. The convention - is to choose the hkl with the most positive components. - ''' - mask = np.ones(hkllist.shape[0], dtype=bool) - laue = InversionSymmetry - - for i, g in enumerate(hkllist): - if mask[i]: - - geqv = self.CalcStar(g, 'r', applyLaue=laue) - - for r in geqv[1:,]: - rid = np.where(np.all(r == hkllist, axis=1)) - mask[rid] = False - - hkl = hkllist[mask, :].astype(np.int32) - - hkl_max = [] - - for g in hkl: - geqv = self.CalcStar(g, 'r', applyLaue=laue) - loc = np.argmax(np.sum(geqv, axis=1)) - gmax = geqv[loc, :] - hkl_max.append(gmax) - - return np.array(hkl_max).astype(np.int32) - - def SortHKL(self, hkllist): - ''' - this function sorts the hkllist by increasing |g| - i.e. decreasing d-spacing. If two vectors are same - length, then they are ordered with increasing - priority to l, k and h - ''' - glen = [] - for g in hkllist: - glen.append(np.round(self.CalcLength(g, 'r'), 8)) - - # glen = np.atleast_2d(np.array(glen,dtype=float)).T - dtype = [ - ('glen', float), - ('max', int), - ('sum', int), - ('h', int), - ('k', int), - ('l', int), - ] - - a = [] - for i, gl in enumerate(glen): - g = hkllist[i, :] - a.append((gl, np.max(g), np.sum(g), g[0], g[1], g[2])) - a = np.array(a, dtype=dtype) - - isort = np.argsort(a, order=['glen', 'max', 'sum', 'l', 'k', 'h']) - return hkllist[isort, :] - - def getHKLs(self, dmin): - ''' - this function generates the symetrically unique set of - hkls up to a given dmin. - dmin is in nm - ''' - ''' - always have the centrosymmetric condition because of - Friedels law for xrays so only 4 of the 8 octants - are sampled for unique hkls. By convention we will - ignore all l < 0 - ''' - - hmin = -self.ih - 1 - hmax = self.ih - kmin = -self.ik - 1 - kmax = self.ik - lmin = -1 - lmax = self.il - - hkllist = np.array( - [ - [ih, ik, il] - for ih in np.arange(hmax, hmin, -1) - for ik in np.arange(kmax, kmin, -1) - for il in np.arange(lmax, lmin, -1) - ] - ) - - hkl_allowed = spacegroup.Allowed_HKLs(self.sgnum, hkllist) - - hkl = [] - dsp = [] - - hkl_dsp = [] - - for g in hkl_allowed: - - # ignore [0 0 0] as it is the direct beam - if np.sum(np.abs(g)) != 0: - - dspace = 1.0 / self.CalcLength(g, 'r') - - if dspace >= dmin: - hkl_dsp.append(g) - - ''' - we now have a list of g vectors which are all within dmin range - plus the systematic absences due to lattice centering and glide - planes/screw axis has been taken care of - - the next order of business is to go through the list and only pick - out one of the symetrically equivalent hkls from the list. - ''' - hkl_dsp = np.array(hkl_dsp).astype(np.int32) - ''' - the inversionsymmetry switch enforces the application of the inversion - symmetry regradless of whether the crystal has the symmetry or not - this is necessary in the case of xrays due to friedel's law - ''' - hkl = self.ChooseSymmetric(hkl_dsp, InversionSymmetry=True) - - ''' - finally sort in order of decreasing dspacing - ''' - self.hkls = self.SortHKL(hkl) - - return self.hkls - - ''' - set some properties for the unitcell class. only the lattice - parameters, space group and asymmetric positions can change, - but all the dependent parameters will be automatically updated - ''' - - def Required_lp(self, p): - return _rqpDict[self.latticeType][1](p) - - def Required_C(self, C): - return np.array([C[x] for x in _StiffnessDict[self._laueGroup][0]]) - - def MakeStiffnessMatrix(self, inp_Cvals): - if len(inp_Cvals) != len(_StiffnessDict[self._laueGroup][0]): - x = len(_StiffnessDict[self._laueGroup][0]) - msg = ( - f"number of constants entered is not correct." - f" need a total of {x} independent constants." - ) - raise IOError(msg) - - # initialize all zeros and fill the supplied values - C = np.zeros([6, 6]) - for i, x in enumerate(_StiffnessDict[self._laueGroup][0]): - - C[x] = inp_Cvals[i] - - # enforce the equality constraints - C = _StiffnessDict[self._laueGroup][1](C) - - # finally fill the lower triangular matrix - for i in range(6): - for j in range(i): - C[i, j] = C[j, i] - - self.stiffness = C - self.compliance = np.linalg.inv(C) - - def inside_spheretriangle(self, conn, dir3, hemisphere, switch): - ''' - check if direction is inside a spherical triangle - the logic used as follows: - if determinant of [A B x], [A x C] and [x B C] are - all same sign, then the sphere is inside the traingle - formed by A, B and C - - returns a mask with inside as True and outside as False - - 11/23/2020 SS switch is now a string specifying which - symmetry group to use for reducing directions - 11/23/2020 SS catching cases when vertices are empty - ''' - - ''' - first get vertices of the triangles in the - ''' - vertex = self.sphere_sector.vertices[switch] - # if switch == 'pg': - # vertex = self.sphere_sector.vertices - - # elif switch == 'laue': - # vertex = self.sphere_sector.vertices_laue - - # elif switch == 'super': - # vertex = self.sphere_sector.vertices_supergroup - - # elif switch == 'superlaue': - # vertex = self.sphere_sector.vertices_supergroup_laue - - A = np.atleast_2d(vertex[:, conn[0]]).T - B = np.atleast_2d(vertex[:, conn[1]]).T - C = np.atleast_2d(vertex[:, conn[2]]).T - - mask = [] - for x in dir3: - - x2 = np.atleast_2d(x).T - d1 = np.linalg.det(np.hstack((A, B, x2))) - d2 = np.linalg.det(np.hstack((A, x2, C))) - d3 = np.linalg.det(np.hstack((x2, B, C))) - ''' - catching cases very close to FZ boundary when the - determinant can be very small positive or negative - number - ''' - if np.abs(d1) < eps: - d1 = 0.0 - if np.abs(d2) < eps: - d2 = 0.0 - if np.abs(d3) < eps: - d3 = 0.0 - - ss = np.unique(np.sign([d1, d2, d3])) - if hemisphere == 'upper': - if np.all(ss >= 0.0): - mask.append(True) - else: - mask.append(False) - - elif hemisphere == 'both': - if len(ss) == 1: - mask.append(True) - elif len(ss) == 2: - if 0 in ss: - mask.append(True) - else: - mask.append(False) - elif len(ss) == 3: - mask.append(False) - - mask = np.array(mask) - return mask - - ''' - @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - - @date 10/28/2020 SS 1.0 original - 11/23/2020 SS 1.1 the laueswitch has been changed from a boolean - variable to a string input with threee possible values - @params dir3 : n x 3 array of directions to reduce - switch switch to decide which symmetry group to use. one of four: - (a) 'pg' use the cartesian point group symmetry - (b) 'laue' use the laue symmetry - (c) 'super' use the supergroup symmetry used in coloring - (d) 'superlaue' use the supergroup of the laue group - - @detail this subroutine takes a direction vector and uses the point group - symmetry of the unitcell to reduce it to the fundamental stereographic - triangle for that point group. this function is used in generating the IPF - color legend for orientations. for now we are assuming dir3 is a nx3 array - of directions. - ''' - - def reduce_dirvector(self, dir3, switch='pg'): - ''' - check if the dimensions of the dir3 array is to spec - ''' - idx = np.arange(dir3.shape[0], dtype=np.int32) - dir3 = np.ascontiguousarray(np.atleast_2d(dir3)) - if dir3.ndim != 2: - raise RuntimeError("reduce_dirvector: invalid shape of dir3 array") - - ''' - check if the direction vector is a unit vector or not. - if it is not normalize it to get a unit vector. the dir vector - is in the sample frame, so by default it is assumed to be in a - orthonormal cartesian frame. this defines the normalization as - just division by the L2 norm - ''' - eps = constants.sqrt_epsf - - if np.all(np.abs(np.linalg.norm(dir3, axis=1) - 1.0) < eps): - dir3n = dir3 - else: - if np.all(np.linalg.norm(dir3) > eps): - dir3n = dir3 / np.tile(np.linalg.norm(dir3, axis=1), [3, 1]).T - else: - raise RuntimeError( - "atleast one of the input direction seems \ - to be a null vector" - ) - - ''' - we need both the symmetry reductions for the point group and laue group - this will be used later on in the coloring routines to determine if the - points needs to be moved to the southern hemisphere or not - ''' - dir3_copy = np.copy(dir3n) - dir3_reduced = np.array([]) - idx_copy = np.copy(idx) - idx_red = np.array([], dtype=np.int32) - ''' - laue switch is used to determine which set of symmetry operations to - loop over - ''' - hemisphere = self.sphere_sector.hemisphere[switch] - ntriangle = self.sphere_sector.ntriangle[switch] - connectivity = self.sphere_sector.connectivity[switch] - - if switch == 'pg': - sym = self.SYM_PG_c - - elif switch == 'super': - sym = self.SYM_PG_supergroup - - elif switch == 'laue': - sym = self.SYM_PG_c_laue - - elif switch == 'superlaue': - sym = self.SYM_PG_supergroup_laue - - for sop in sym: - - if dir3_copy.size != 0: - - dir3_sym = np.dot(sop, dir3_copy.T).T - - mask = np.zeros(dir3_sym.shape[0]).astype(bool) - - if ntriangle == 0: - if hemisphere == 'both': - mask = np.ones(dir3_sym.shape[0], dtype=bool) - elif hemisphere == 'upper': - mask = dir3_sym[:, 2] >= 0.0 - else: - for ii in range(ntriangle): - tmpmask = self.inside_spheretriangle( - connectivity[:, ii], dir3_sym, hemisphere, switch - ) - mask = np.logical_or(mask, tmpmask) - - if np.sum(mask) > 0: - if dir3_reduced.size != 0: - dir3_reduced = np.vstack( - (dir3_reduced, dir3_sym[mask, :]) - ) - idx_red = np.hstack((idx_red, idx[mask])) - else: - dir3_reduced = np.copy(dir3_sym[mask, :]) - idx_red = np.copy(idx[mask]) - - dir3_copy = dir3_copy[np.logical_not(mask), :] - idx = idx[np.logical_not(mask)] - else: - break - dir3_r = np.zeros(dir3_reduced.shape) - dir3_r[idx_red, :] = dir3_reduced - - return dir3_r - - def color_directions(self, dir3, laueswitch): - ''' - @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - @DATE 11/12/2020 SS 1.0 original - @PARAM dir3 is crystal direction obtained by multiplying inverse of - crystal orientation with reference direction - laueswitch perform reducion based on lauegroup or the point group - - @DETAIL this is the routine which makes the calls to sphere_sector - class which correctly color the orientations for this crystal class. the - logic is as follows: - - 1. reduce direction to fundamental zone of point group - 2. reduce to fundamental zone of super group - 3. If both are same, then color (hsl) assigned by polar and azimuth - 4. If different, then barycenter lightness is replaced by 1-L (equivalent to - replaceing barycenter to pi-theta) - ''' - - if laueswitch: - ''' - this is the case where we color orientations based on the laue group - of the crystal. this is always going to be the case with x-ray which - introduces inversion symmetry. For other probes, this is not the case. - ''' - dir3_red = self.reduce_dirvector(dir3, switch='laue') - dir3_red_supergroup = self.reduce_dirvector( - dir3, switch='superlaue' - ) - switch = 'superlaue' - - else: - ''' - follow the logic in the function description - ''' - dir3_red = self.reduce_dirvector(dir3, switch='pg') - dir3_red_supergroup = self.reduce_dirvector(dir3, switch='super') - switch = 'super' - - mask = np.linalg.norm(dir3_red - dir3_red_supergroup, axis=1) < eps - hsl = self.sphere_sector.get_color(dir3_red_supergroup, mask, switch) - - rgb = colorspace.hsl2rgb(hsl) - return rgb - - def color_orientations( - self, rmats, ref_dir=np.array([0.0, 0.0, 1.0]), laueswitch=True - ): - ''' - @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - @DATE 11/12/2020 SS 1.0 original - @PARAM rmats rotation matrices of size nx3x3 - ref_dir reference direction of the sample frame along which all crystal - directions are colored - laueswitch should we use laue group for coloring or not - @DETAIL this is a simple routine which takes orientations as rotations matrices - and a reference sample direction ([0 0 1] by default) and returns the directions in - the crystal reference frame. Note that the crystal orientations is defined as the the - orientation which takes the """SAMPLE""" reference frame TO the """CRYSTAL""" frame. - Since we are computing the conversion from crystal to sample, we will need to INVERT - these matrices. Thanksfully, this is just a transpose - - ''' - - ''' - first make sure that the rotation matric is size nx3x3 - ''' - if rmats.ndim == 2: - rmats = np.atleast_3d(rmats).T - else: - assert ( - rmats.ndim == 3 - ), "rotations matrices need to \ - be nx3x3. Please check size." - - ''' - obtain the direction vectors by simple matrix multiplication of transpose - of rotation matrix with the reference direction - ''' - dir3 = [] - for r in rmats: - dir3.append(np.dot(r.T, ref_dir)) - - dir3 = np.array(dir3) - ''' - finally get the rgb colors - ''' - rgb = self.color_directions(dir3, laueswitch) - return rgb - - def is_editable(self, lp_name): - """ - @author Saransh Singh, Lawrence Livermore National Lab - @date 03/17/2021 SS 1.0 original - @details check if a certain field in the lattice parameter - is editable. this depends on the space group number or the - lattice class - """ - - _lpnamelist = list(_lpname) - index = _lpnamelist.index(lp_name) - editable_fields = _rqpDict[self.latticeType][0] - return index in editable_fields - - def convert_lp_to_valunits(self, lp): - """ - added 03/17/2021 SS - """ - lp_valunit = [] - for i in range(6): - if i < 3: - lp_valunit.append(valWUnit('lp', 'length', lp[i], 'nm')) - - else: - lp_valunit.append(valWUnit('lp', 'angle', lp[i], 'degrees')) - - return lp_valunit - - def fill_correct_lp_vals(self, lp, val, lp_name): - """ - added 03/17/2021 SS - """ - index = list(_lpname).index(lp_name) - lp[index] = val - lp_red = [lp[i] for i in _rqpDict[self.latticeType][0]] - lp = _rqpDict[self.latticeType][1](lp_red) - lp_valunit = self.convert_lp_to_valunits(lp) - return lp_valunit - - @property - def compliance(self): - # Compliance in TPa⁻¹. Stiffness is in GPa. - if not hasattr(self, 'stiffness'): - raise AttributeError('Stiffness not set on unit cell') - - return np.linalg.inv(self.stiffness / 1.0e3) - - @compliance.setter - def compliance(self, v): - # Compliance in TPa⁻¹. Stiffness is in GPa. - self.stiffness = np.linalg.inv(v) * 1.0e3 - - # lattice constants as properties - - @property - def lparms(self): - return [self.a, self.b, self.c, self.alpha, self.beta, self.gamma] - - @lparms.setter - def lparms(self, lp): - """ - set the lattice parameters here - """ - self._a = lp[0].getVal("nm") - self._b = lp[1].getVal("nm") - self._c = lp[2].getVal("nm") - self._alpha = lp[3].getVal("degrees") - self._beta = lp[4].getVal("degrees") - self._gamma = lp[5].getVal("degrees") - self.calcmatrices() - self.init_max_g_index() - self.CalcMaxGIndex() - if hasattr(self, 'numat'): - self.CalcDensity() - - @property - def lparms_reduced(self): - lp = self.lparms - lp_red = [lp[i] for i in _rqpDict[self.latticeType][0]] - return lp_red - - @property - def a(self): - return self._a - - @a.setter - def a(self, val): - if self.is_editable("a"): - lp = self.lparms - lp_valunit = self.fill_correct_lp_vals(lp, val, "a") - self.lparms = lp_valunit - else: - msg = f"not an editable field" f" for this space group" - raise RuntimeError(msg) - - @property - def b(self): - return self._b - - @b.setter - def b(self, val): - if self.is_editable("b"): - lp = self.lparms - lp_valunit = self.fill_correct_lp_vals(lp, val, "b") - self.lparms = lp_valunit - else: - msg = f"not an editable field" f" for this space group" - raise RuntimeError(msg) - - @property - def c(self): - return self._c - - @c.setter - def c(self, val): - if self.is_editable("c"): - lp = self.lparms - lp_valunit = self.fill_correct_lp_vals(lp, val, "c") - self.lparms = lp_valunit - else: - msg = f"not an editable field" f" for this space group" - raise RuntimeError(msg) - - @property - def alpha(self): - return self._alpha - - @alpha.setter - def alpha(self, val): - if self.is_editable("alpha"): - lp = self.lparms - lp_valunit = self.fill_correct_lp_vals(lp, val, "alpha") - self.lparms = lp_valunit - else: - msg = f"not an editable field" f" for this space group" - raise RuntimeError(msg) - - @property - def beta(self): - return self._beta - - @beta.setter - def beta(self, val): - if self.is_editable("beta"): - lp = self.lparms - lp_valunit = self.fill_correct_lp_vals(lp, val, "beta") - self.lparms = lp_valunit - else: - msg = f"not an editable field" f" for this space group" - raise RuntimeError(msg) - - @property - def gamma(self): - return self._gamma - - @gamma.setter - def gamma(self, val): - if self.is_editable("gamma"): - lp = self.lparms - lp_valunit = self.fill_correct_lp_vals(lp, val, "gamma") - self.lparms = lp_valunit - else: - msg = f"not an editable field" f" for this space group" - raise RuntimeError(msg) - - @property - def dmin(self): - return self._dmin - - @dmin.setter - def dmin(self, v): - if self._dmin == v: - return - self._dmin = v - # Update the Max G Index - self.CalcMaxGIndex() - - @property - def U(self): - return self._U - - @U.setter - def U(self, Uarr): - self._U = Uarr - self.aniU = False - if Uarr.ndim > 1: - self.aniU = True - self.calcBetaij() - - @property - def voltage(self): - return self._voltage - - @voltage.setter - def voltage(self, v): - self._voltage = v - self.CalcWavelength() - - @property - def wavelength(self): - return self._mlambda - - @wavelength.setter - def wavelength(self, mlambda): - self._mlambda = mlambda - - # space group number - @property - def sgnum(self): - return self._sym_sgnum - - @sgnum.setter - def sgnum(self, val): - if not (isinstance(val, int)): - raise ValueError('space group should be integer') - if not ((val >= 1) and (val <= 230)): - raise ValueError('space group number should be between 1 and 230.') - - self._sym_sgnum = val - self.sg_hmsymbol = symbols.pstr_spacegroup[val - 1].strip() - - ( - self.SYM_SG, - self.SYM_PG_d, - self.SYM_PG_d_laue, - self.centrosymmetric, - self.symmorphic, - ) = symmetry.GenerateSGSym(self.sgnum, self.sgsetting) - - self.latticeType = symmetry.latticeType(self.sgnum) - - self.nsgsym = self.SYM_SG.shape[0] - self.npgsym = self.SYM_PG_d.shape[0] - - self.GenerateRecipPGSym() - - ''' - asymmetric positions due to space group symmetry - used for structure factor calculations - ''' - self.CalcPositions() - self.GetPgLg() - - ''' - SS 11/10/2020 added cartesian PG sym for reducing directions - to standard stereographic triangle - ''' - self.GenerateCartesianPGSym() - - ''' - SS 11/11/2020 adding the sphere_sector class initialization here - ''' - self.sphere_sector = sphere_sector.sector( - self._pointGroup, - self._laueGroup, - self._supergroup, - self._supergroup_laue, - ) - self.CalcDensity() - self.calc_absorption_length() - - @property - def pgnum(self): - return constants.SYM_PG_to_PGNUM[self.point_group] - - @property - def point_group(self): - return self._pointGroup - - @property - def atom_pos(self): - return self._atom_pos - - @atom_pos.setter - def atom_pos(self, val): - """ - SS 03/08/2021 fixing some issues with - updating asymmetric positions after - updating atominfo - fixing - """ - if hasattr(self, 'atom_type'): - if self.atom_ntype != val.shape[0]: - msg = ( - f"incorrect number of atom positions." - f" number of atom type = {self.atom_ntype} " - f" and number of" - f" atom positions = {val.shape[0]}." - ) - raise ValueError(msg) - - self._atom_pos = val - """ - update only if its not the first time - """ - if hasattr(self, 'asym_pos'): - self.CalcPositions() - - if hasattr(self, 'density'): - self.CalcDensity() - self.calc_absorption_length() - - @property - def atom_ntype(self): - return self.atom_type.shape[0] - - # asymmetric positions in unit cell - @property - def asym_pos(self): - return self._asym_pos - - @asym_pos.setter - def asym_pos(self, val): - assert ( - type(val) == list - ), 'input type to asymmetric positions should be list' - self._asym_pos = val - - @property - def numat(self): - return self._numat - - @numat.setter - def numat(self, val): - assert ( - val.shape[0] == self.atom_ntype - ), 'shape of numat is not consistent' - self._numat = val - - # direct metric tensor is read only - @property - def dmt(self): - return self._dmt - - # reciprocal metric tensor is read only - @property - def rmt(self): - return self._rmt - - # direct structure matrix is read only - @property - def dsm(self): - return self._dsm - - # reciprocal structure matrix is read only - @property - def rsm(self): - return self._rsm - - @property - def num_atom(self): - return np.sum(self.numat) - - @property - def vol(self): - return self._vol - - @property - def vol_per_atom(self): - # vol per atom in A^3 - return 1e3 * self.vol / self.num_atom - - -_rqpDict = { - 'triclinic': (tuple(range(6)), lambda p: p), # all 6 - # note beta - 'monoclinic': ((0, 1, 2, 4), lambda p: (p[0], p[1], p[2], 90, p[3], 90)), - 'orthorhombic': ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), - 'tetragonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), - 'trigonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - 'hexagonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - 'cubic': ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), -} - -_lpname = np.array(['a', 'b', 'c', 'alpha', 'beta', 'gamma']) - -laue_1 = 'ci' -laue_2 = 'c2h' -laue_3 = 'd2h' -laue_4 = 'c4h' -laue_5 = 'd4h' -laue_6 = 's6' -laue_7 = 'd3d' -laue_8 = 'c6h' -laue_9 = 'd6h' -laue_10 = 'th' -laue_11 = 'oh' - - -''' -these supergroups are the three exceptions to the coloring scheme -the point groups are not topological and can't have no discontinuities -in the IPF coloring scheme. they are -1, -3 and -4 point groups. -''' -supergroup_00 = 'c1' -supergroup_01 = 'c4' -supergroup_02 = 'c3' - -supergroup_1 = 'cs' -supergroup_2 = 'c2v' -supergroup_3 = 'd2h' -supergroup_4 = 'c4v' -supergroup_5 = 'd4h' -supergroup_6 = 'c3v' -supergroup_7 = 'c6v' -supergroup_8 = 'd3h' -supergroup_9 = 'd6h' -supergroup_10 = 'td' -supergroup_11 = 'oh' - - -def _sgrange(min, max): - return tuple(range(min, max + 1)) # inclusive range - - -''' -11/20/2020 SS added supergroup to the list which is used -for coloring the fundamental zone IPF -''' -_pgDict = { - _sgrange(1, 1): ('c1', laue_1, supergroup_1, supergroup_00), # Triclinic - _sgrange(2, 2): ('ci', laue_1, supergroup_00, supergroup_00), # laue 1 - _sgrange(3, 5): ('c2', laue_2, supergroup_2, supergroup_3), # Monoclinic - _sgrange(6, 9): ('cs', laue_2, supergroup_1, supergroup_3), - _sgrange(10, 15): ('c2h', laue_2, supergroup_3, supergroup_3), # laue 2 - _sgrange(16, 24): ( - 'd2', - laue_3, - supergroup_3, - supergroup_3, - ), # Orthorhombic - _sgrange(25, 46): ('c2v', laue_3, supergroup_2, supergroup_3), - _sgrange(47, 74): ('d2h', laue_3, supergroup_3, supergroup_3), # laue 3 - _sgrange(75, 80): ('c4', laue_4, supergroup_4, supergroup_5), # Tetragonal - _sgrange(81, 82): ('s4', laue_4, supergroup_01, supergroup_5), - _sgrange(83, 88): ('c4h', laue_4, supergroup_5, supergroup_5), # laue 4 - _sgrange(89, 98): ('d4', laue_5, supergroup_5, supergroup_5), - _sgrange(99, 110): ('c4v', laue_5, supergroup_4, supergroup_5), - _sgrange(111, 122): ('d2d', laue_5, supergroup_5, supergroup_5), - _sgrange(123, 142): ('d4h', laue_5, supergroup_5, supergroup_5), # laue 5 - # Trigonal # laue 6 [also c3i] - _sgrange(143, 146): ('c3', laue_6, supergroup_6, supergroup_02), - _sgrange(147, 148): ('s6', laue_6, supergroup_02, supergroup_02), - _sgrange(149, 155): ('d3', laue_7, supergroup_7, supergroup_9), - _sgrange(156, 161): ('c3v', laue_7, supergroup_6, supergroup_9), - _sgrange(162, 167): ('d3d', laue_7, supergroup_9, supergroup_9), # laue 7 - _sgrange(168, 173): ( - 'c6', - laue_8, - supergroup_7, - supergroup_9, - ), # Hexagonal - _sgrange(174, 174): ('c3h', laue_8, supergroup_7, supergroup_9), - _sgrange(175, 176): ('c6h', laue_8, supergroup_9, supergroup_9), # laue 8 - _sgrange(177, 182): ('d6', laue_9, supergroup_9, supergroup_9), - _sgrange(183, 186): ('c6v', laue_9, supergroup_7, supergroup_9), - _sgrange(187, 190): ('d3h', laue_9, supergroup_9, supergroup_9), - _sgrange(191, 194): ('d6h', laue_9, supergroup_9, supergroup_9), # laue 9 - _sgrange(195, 199): ('t', laue_10, supergroup_10, supergroup_11), # Cubic - _sgrange(200, 206): ( - 'th', - laue_10, - supergroup_11, - supergroup_11, - ), # laue 10 - _sgrange(207, 214): ('o', laue_11, supergroup_11, supergroup_11), - _sgrange(215, 220): ('td', laue_11, supergroup_10, supergroup_11), - _sgrange(221, 230): ( - 'oh', - laue_11, - supergroup_11, - supergroup_11, - ), # laue 11 -} - -''' -this dictionary has the mapping from laue group to number of elastic -constants needed in the voight 6x6 stiffness matrix. the compliance -matrix is just the inverse of the stiffness matrix -taken from International Tables for Crystallography Volume H -Powder diffraction -Edited by C. J. Gilmore, J. A. Kaduk and H. Schenk -''' -# independent components for the triclinic laue group -type1 = [] -for i in range(6): - for j in range(i, 6): - type1.append((i, j)) -type1 = tuple(type1) - -# independent components for the monoclinic laue group -# C14 = C15 = C24 = C25 = C34 = C35 = C46 = C56 = 0 -type2 = list(type1) -type2.remove((0, 3)) -type2.remove((0, 4)) -type2.remove((1, 3)) -type2.remove((1, 4)) -type2.remove((2, 3)) -type2.remove((2, 4)) -type2.remove((3, 5)) -type2.remove((4, 5)) -type2 = tuple(type2) - -# independent components for the orthorhombic laue group -# Above, plus C16 = C26 = C36 = C45 = 0 -type3 = list(type2) -type3.remove((0, 5)) -type3.remove((1, 5)) -type3.remove((2, 5)) -type3.remove((3, 4)) -type3 = tuple(type3) - -# independent components for the cyclic tetragonal laue group -# monoclinic, plus C36 = C45 = 0, C22 = C11, C23 = C13, C26 = −C16, C55 = C44 -type4 = list(type2) -type4.remove((2, 5)) -type4.remove((3, 4)) -type4.remove((1, 1)) -type4.remove((1, 2)) -type4.remove((1, 5)) -type4.remove((4, 4)) -type4 = tuple(type4) - -# independent components for the dihedral tetragonal laue group -# Above, plus C16 = 0 -type5 = list(type4) -type5.remove((0, 5)) -type5 = tuple(type5) - -# independent components for the trigonal laue group -# C16 = C26 = C34 = C35 = C36 = C45 = 0, C22 = C11, C23 = C13, C24 = −C14, -# C25 = −C15, C46 = −C15, C55 = C44, C56 = C14, C66 = (C11 − C12)/2 -type6 = list(type1) -type6.remove((0, 5)) -type6.remove((1, 5)) -type6.remove((2, 3)) -type6.remove((2, 4)) -type6.remove((2, 5)) -type6.remove((3, 4)) -type6.remove((1, 1)) -type6.remove((1, 2)) -type6.remove((1, 3)) -type6.remove((1, 4)) -type6.remove((3, 5)) -type6.remove((4, 4)) -type6.remove((4, 5)) -type6.remove((5, 5)) -type6 = tuple(type6) - -# independent components for the rhombohedral laue group -# Above, plus C15 = 0 -type7 = list(type6) -type7.remove((0, 4)) -type7 = tuple(type7) - -# independent components for the hexagonal laue group -# Above, plus C14 = 0 -type8 = list(type7) -type8.remove((0, 3)) -type8 = tuple(type8) - -# independent components for the cubic laue group -# As for dihedral tetragonal, plus C13 = C12, C33 = C11, C66 = C44 -type9 = list(type5) -type9.remove((0, 2)) -type9.remove((2, 2)) -type9.remove((5, 5)) - -''' -these lambda functions take care of the equality constrains in the -matrices. if there are no equality constraints, then the identity -function is used -C22 = C11, C23 = C13, C24 = −C14, -# C25 = −C15, C46 = −C15, C55 = C44, C56 = C14, C66 = (C11 − C12)/2 -''' - - -def identity(x): - return x - - -def C_cyclictet_eq(x): - x[1, 1] = x[0, 0] - x[1, 2] = x[0, 2] - x[1, 5] = -x[0, 5] - x[4, 4] = x[3, 3] - return x - - -def C_trigonal_eq(x): - x[1, 1] = x[0, 0] - x[1, 2] = x[0, 2] - x[1, 3] = -x[0, 3] - x[1, 4] = -x[0, 4] - x[3, 5] = -x[0, 4] - x[4, 4] = x[3, 3] - x[4, 5] = x[0, 3] - x[5, 5] = 0.5 * (x[0, 0] - x[0, 1]) - return x - - -def C_cubic_eq(x): - x[0, 2] = x[0, 1] - x[2, 2] = x[0, 0] - x[5, 5] = x[3, 3] - x[1, 1] = x[0, 0] - x[1, 2] = x[0, 2] - x[1, 5] = -x[0, 5] - x[4, 4] = x[3, 3] - return x - - -_StiffnessDict = { - # triclinic, all 21 components in upper triangular matrix needed - laue_1: [type1, identity], - laue_2: [type2, identity], # monoclinic, 13 components needed - laue_3: [type3, identity], # orthorhombic, 9 components needed - laue_4: [type4, C_cyclictet_eq], # cyclic tetragonal, 7 components needed - # dihedral tetragonal, 6 components needed - laue_5: [type5, C_cyclictet_eq], - laue_6: [type6, C_trigonal_eq], # trigonal I, 7 components - laue_7: [type7, C_trigonal_eq], # rhombohedral, 6 components - laue_8: [type8, C_trigonal_eq], # cyclic hexagonal, 5 components needed - laue_9: [type8, C_trigonal_eq], # dihedral hexagonal, 5 components - laue_10: [type9, C_cubic_eq], # cubic, 3 components - laue_11: [type9, C_cubic_eq], # cubic, 3 components -} diff --git a/hexrd/hedm/xrdutil/__init__.py b/hexrd/hedm/xrdutil/__init__.py index da7dfa681..d42777b8d 100644 --- a/hexrd/hedm/xrdutil/__init__.py +++ b/hexrd/hedm/xrdutil/__init__.py @@ -1,7 +1,7 @@ from .utils import * - -# For now import these private members! -from .utils import _project_on_detector_plane -from .utils import _project_on_detector_cylinder from .utils import _fetch_hkls_from_planedata from .utils import _filter_hkls_eta_ome + +#TODO: Fully separate out the utils.py scripts +from hexrd.hed.xrdutil.utils import * +from hexrd.laue.xrdutil.utils import * \ No newline at end of file diff --git a/hexrd/hedm/xrdutil/utils.py b/hexrd/hedm/xrdutil/utils.py index d8150ded1..a07bcd860 100644 --- a/hexrd/hedm/xrdutil/utils.py +++ b/hexrd/hedm/xrdutil/utils.py @@ -28,7 +28,7 @@ from typing import Optional, Union, Any, Generator -from hexrd.hedm.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData from hexrd.core.distortion.distortionabc import DistortionABC import numba @@ -40,7 +40,8 @@ from hexrd.core import rotations as rot from hexrd.core import gridutil as gutil -from hexrd.hedm.material.crystallography import processWavelength, PlaneData +from hexrd.hed.xrdutil.utils import _project_on_detector_plane +from hexrd.core.material.crystallography import processWavelength, PlaneData from hexrd.core.transforms import xfcapi from hexrd.core.valunits import valWUnit @@ -535,369 +536,6 @@ def _filter_hkls_eta_ome( return allAngs, allHKLs -def _project_on_detector_plane( - allAngs: np.ndarray, - rMat_d: np.ndarray, - rMat_c: np.ndarray, - chi: float, - tVec_d: np.ndarray, - tVec_c: np.ndarray, - tVec_s: np.ndarray, - distortion: DistortionABC, - beamVec: np.ndarray = constants.beam_vec, -) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - utility routine for projecting a list of (tth, eta, ome) onto the - detector plane parameterized by the args - """ - gVec_cs = xfcapi.angles_to_gvec( - allAngs, chi=chi, rmat_c=rMat_c, beam_vec=beamVec - ) - - rMat_ss = xfcapi.make_sample_rmat(chi, allAngs[:, 2]) - - tmp_xys = xfcapi.gvec_to_xy( - gVec_cs, - rMat_d, - rMat_ss, - rMat_c, - tVec_d, - tVec_s, - tVec_c, - beam_vec=beamVec, - ) - - valid_mask = ~(np.isnan(tmp_xys[:, 0]) | np.isnan(tmp_xys[:, 1])) - - det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) - - # apply distortion if specified - if distortion is not None: - det_xy = distortion.apply_inverse(det_xy) - - return det_xy, rMat_ss, valid_mask - - -def _project_on_detector_cylinder( - allAngs: np.ndarray, - chi: float, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - physical_size: np.ndarray, - angle_extent: float, - distortion: DistortionABC = None, - beamVec: np.ndarray = constants.beam_vec, - etaVec: np.ndarray = constants.eta_vec, - tVec_s: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, - tVec_c: np.ndarray = constants.zeros_3x1, -) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - utility routine for projecting a list of (tth, eta, ome) onto the - detector plane parameterized by the args. this function does the - computation for a cylindrical detector - """ - dVec_cs = xfcapi.angles_to_dvec( - allAngs, chi=chi, rmat_c=np.eye(3), beam_vec=beamVec, eta_vec=etaVec - ) - - rMat_ss = np.tile(rmat_s, [allAngs.shape[0], 1, 1]) - - tmp_xys, valid_mask = _dvecToDetectorXYcylinder( - dVec_cs, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=tVec_s, - rmat_s=rmat_s, - tVec_c=tVec_c, - ) - - det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) - - # apply distortion if specified - if distortion is not None: - det_xy = distortion.apply_inverse(det_xy) - - return det_xy, rMat_ss, valid_mask - - -def _dvecToDetectorXYcylinder( - dVec_cs: np.ndarray, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - physical_size: np.ndarray, - angle_extent: float, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -) -> tuple[np.ndarray, np.ndarray]: - - cvec = _unitvec_to_cylinder( - dVec_cs, - caxis, - paxis, - radius, - tVec_d, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - cvec_det, valid_mask = _clip_to_cylindrical_detector( - cvec, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - xy_det = _dewarp_from_cylinder( - cvec_det, - tVec_d, - caxis, - paxis, - radius, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - return xy_det, valid_mask - - -def _unitvec_to_cylinder( - uvw: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - tvec: np.ndarray, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -) -> np.ndarray: - """ - get point where unitvector uvw - intersect the cylindrical detector. - this will give points which are - outside the actual panel. the points - will be clipped to the panel later - - Parameters - ---------- - uvw : numpy.ndarray - unit vectors stacked row wise (nx3) shape - - Returns - ------- - numpy.ndarray - (x,y,z) vectors point which intersect with - the cylinder with (nx3) shape - """ - naxis = np.cross(caxis, paxis) - naxis = naxis / np.linalg.norm(naxis) - - tvec_c_l = np.dot(rmat_s, tVec_c) - - delta = tvec - (radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l)) - num = uvw.shape[0] - cx = np.atleast_2d(caxis).T - - delta_t = np.tile(delta, [num, 1]) - - t1 = np.dot(uvw, delta.T) - t2 = np.squeeze(np.dot(uvw, cx)) - t3 = np.squeeze(np.dot(delta, cx)) - t4 = np.dot(uvw, cx) - - A = np.squeeze(1 - t4**2) - B = t1 - t2 * t3 - C = radius**2 - np.linalg.norm(delta) ** 2 + t3**2 - - mask = np.abs(A) < 1e-10 - beta = np.zeros( - [ - num, - ] - ) - - beta[~mask] = (B[~mask] + np.sqrt(B[~mask] ** 2 + A[~mask] * C)) / A[~mask] - - beta[mask] = np.nan - return np.tile(beta, [3, 1]).T * uvw - - -def _clip_to_cylindrical_detector( - uvw: np.ndarray, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - physical_size: np.ndarray, - angle_extent: float, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -) -> tuple[np.ndarray, np.ndarray]: - """ - takes in the intersection points uvw - with the cylindrical detector and - prunes out points which don't actually - hit the actual panel - - Parameters - ---------- - uvw : numpy.ndarray - unit vectors stacked row wise (nx3) shape - - Returns - ------- - numpy.ndarray - (x,y,z) vectors point which fall on panel - with (mx3) shape - """ - # first get rid of points which are above - # or below the detector - naxis = np.cross(caxis, paxis) - num = uvw.shape[0] - - cx = np.atleast_2d(caxis).T - nx = np.atleast_2d(naxis).T - - tvec_c_l = np.dot(rmat_s, tVec_c) - - delta = tVec_d - ( - radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) - ) - - delta_t = np.tile(delta, [num, 1]) - - uvwp = uvw - delta_t - dp = np.dot(uvwp, cx) - - uvwpxy = uvwp - np.tile(dp, [1, 3]) * np.tile(cx, [1, num]).T - - size = physical_size - tvec = np.atleast_2d(tVec_d).T - - # ycomp = uvwp - np.tile(tVec_d,[num, 1]) - mask1 = np.squeeze(np.abs(dp) > size[0] * 0.5) - uvwp[mask1, :] = np.nan - - # next get rid of points that fall outside - # the polar angle range - - ang = np.dot(uvwpxy, nx) / radius - ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) - - ang = np.arccos(ang) - mask2 = np.squeeze(ang >= angle_extent) - mask = np.logical_or(mask1, mask2) - res = uvw.copy() - res[mask, :] = np.nan - - return res, ~mask - - -def _dewarp_from_cylinder( - uvw: np.ndarray, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -): - """ - routine to convert cylindrical coordinates - to cartesian coordinates in image frame - """ - naxis = np.cross(caxis, paxis) - naxis = naxis / np.linalg.norm(naxis) - - cx = np.atleast_2d(caxis).T - px = np.atleast_2d(paxis).T - nx = np.atleast_2d(naxis).T - num = uvw.shape[0] - - tvec_c_l = np.dot(rmat_s, tVec_c) - - delta = tVec_d - ( - radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) - ) - - delta_t = np.tile(delta, [num, 1]) - - uvwp = uvw - delta_t - - uvwpxy = uvwp - np.tile(np.dot(uvwp, cx), [1, 3]) * np.tile(cx, [1, num]).T - - sgn = np.sign(np.dot(uvwpxy, px)) - sgn[sgn == 0.0] = 1.0 - ang = np.dot(uvwpxy, nx) / radius - ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) - ang = np.arccos(ang) - xcrd = np.squeeze(radius * ang * sgn) - ycrd = np.squeeze(np.dot(uvwp, cx)) - return np.vstack((xcrd, ycrd)).T - - -def _warp_to_cylinder( - cart: np.ndarray, - tVec_d: np.ndarray, - radius: float, - caxis: np.ndarray, - paxis: np.ndarray, - tVec_s: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, - tVec_c: np.ndarray = constants.zeros_3x1, - normalize: bool = True, -) -> np.ndarray: - """ - routine to convert cartesian coordinates - in image frame to cylindrical coordinates - """ - tvec = np.atleast_2d(tVec_d).T - if tVec_s.ndim == 1: - tVec_s = np.atleast_2d(tVec_s).T - if tVec_c.ndim == 1: - tVec_c = np.atleast_2d(tVec_c).T - num = cart.shape[0] - naxis = np.cross(paxis, caxis) - x = cart[:, 0] - y = cart[:, 1] - th = x / radius - xp = radius * np.sin(th) - xn = radius * (1 - np.cos(th)) - - ccomp = np.tile(y, [3, 1]).T * np.tile(caxis, [num, 1]) - pcomp = np.tile(xp, [3, 1]).T * np.tile(paxis, [num, 1]) - ncomp = np.tile(xn, [3, 1]).T * np.tile(naxis, [num, 1]) - cart3d = pcomp + ccomp + ncomp - - tVec_c_l = np.dot(rmat_s, tVec_c) - - res = cart3d + np.tile(tvec - tVec_s - tVec_c_l, [1, num]).T - - if normalize: - return res / np.tile(np.linalg.norm(res, axis=1), [3, 1]).T - else: - return res - - def _dvec_to_angs( dvecs: np.ndarray, bvec: np.ndarray, evec: np.ndarray ) -> tuple[np.ndarray, np.ndarray]: @@ -1058,143 +696,6 @@ def simulateGVecs( return valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps -@deprecated(new_func=simlp, removal_date='2025-01-01') -def simulateLauePattern( - hkls, - bMat, - rmat_d, - tvec_d, - panel_dims, - panel_buffer=5, - minEnergy=8, - maxEnergy=24, - rmat_s=np.eye(3), - grain_params=None, - distortion=None, - beamVec=None, -): - - if beamVec is None: - beamVec = constants.beam_vec - - # parse energy ranges - multipleEnergyRanges = False - if hasattr(maxEnergy, '__len__'): - assert len(maxEnergy) == len( - minEnergy - ), 'energy cutoff ranges must have the same length' - multipleEnergyRanges = True - lmin = [processWavelength(e) for e in maxEnergy] - lmax = [processWavelength(e) for e in minEnergy] - else: - lmin = processWavelength(maxEnergy) - lmax = processWavelength(minEnergy) - - # process crystal rmats and inverse stretches - if grain_params is None: - grain_params = np.atleast_2d( - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0] - ) - - n_grains = len(grain_params) - - # dummy translation vector... make input - tvec_s = np.zeros((3, 1)) - - # number of hkls - nhkls_tot = hkls.shape[1] - - # unit G-vectors in crystal frame - ghat_c = mutil.unitVector(np.dot(bMat, hkls)) - - # pre-allocate output arrays - xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) - hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) - angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) - dspacing = np.nan * np.ones((n_grains, nhkls_tot)) - energy = np.nan * np.ones((n_grains, nhkls_tot)) - - """ - LOOP OVER GRAINS - """ - - for iG, gp in enumerate(grain_params): - rmat_c = xfcapi.make_rmat_of_expmap(gp[:3]) - tvec_c = gp[3:6].reshape(3, 1) - vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) - - # stretch them: V^(-1) * R * Gc - ghat_s_str = mutil.unitVector(np.dot(vInv_s, np.dot(rmat_c, ghat_c))) - ghat_c_str = np.dot(rmat_c.T, ghat_s_str) - - # project - dpts = xfcapi.gvec_to_xy( - ghat_c_str.T, - rmat_d, - rmat_s, - rmat_c, - tvec_d, - tvec_s, - tvec_c, - beam_vec=beamVec, - ).T - - # check intersections with detector plane - canIntersect = ~np.isnan(dpts[0, :]) - npts_in = sum(canIntersect) - - if np.any(canIntersect): - dpts = dpts[:, canIntersect].reshape(2, npts_in) - dhkl = hkls[:, canIntersect].reshape(3, npts_in) - - rmat_b = xfcapi.make_beam_rmat(beamVec, constants.eta_vec) - - # back to angles - tth_eta, gvec_l = xfcapi.xy_to_gvec( - dpts.T, rmat_d, rmat_s, tvec_d, tvec_s, tvec_c, rmat_b=rmat_b - ) - tth_eta = np.vstack(tth_eta).T - - # warp measured points - if distortion is not None: - dpts = distortion.apply_inverse(dpts) - - # plane spacings and energies - dsp = 1.0 / mutil.columnNorm(np.dot(bMat, dhkl)) - wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) - - # find on spatial extent of detector - xTest = np.logical_and( - dpts[0, :] >= -0.5 * panel_dims[1] + panel_buffer, - dpts[0, :] <= 0.5 * panel_dims[1] - panel_buffer, - ) - yTest = np.logical_and( - dpts[1, :] >= -0.5 * panel_dims[0] + panel_buffer, - dpts[1, :] <= 0.5 * panel_dims[0] - panel_buffer, - ) - - onDetector = np.logical_and(xTest, yTest) - if multipleEnergyRanges: - validEnergy = np.zeros(len(wlen), dtype=bool) - for i in range(len(lmin)): - validEnergy = validEnergy | np.logical_and( - wlen >= lmin[i], wlen <= lmax[i] - ) - else: - validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) - - # index for valid reflections - keepers = np.where(np.logical_and(onDetector, validEnergy))[0] - - # assign output arrays - xy_det[iG][keepers, :] = dpts[:, keepers].T - hkls_in[iG][:, keepers] = dhkl[:, keepers] - angles[iG][keepers, :] = tth_eta[keepers, :] - dspacing[iG, keepers] = dsp[keepers] - energy[iG, keepers] = processWavelength(wlen[keepers]) - return xy_det, hkls_in, angles, dspacing, energy - - @numba.njit(nogil=True, cache=True) def _expand_pixels( original: np.ndarray, w: float, h: float, result: np.ndarray diff --git a/hexrd/laue/instrument/__init__.py b/hexrd/laue/instrument/__init__.py index b5414013c..024003040 100644 --- a/hexrd/laue/instrument/__init__.py +++ b/hexrd/laue/instrument/__init__.py @@ -1,4 +1,4 @@ -from .hedm_instrument import ( +from hexrd.core.instrument.hedm_instrument import ( calc_angles_from_beam_vec, calc_beam_vec, centers_of_edge_vec, @@ -10,4 +10,4 @@ unwrap_dict_to_h5, unwrap_h5_to_dict, ) -from .detector import Detector +from hexrd.core.instrument.detector import Detector diff --git a/hexrd/laue/instrument/detector.py b/hexrd/laue/instrument/detector.py deleted file mode 100644 index 858fa21d7..000000000 --- a/hexrd/laue/instrument/detector.py +++ /dev/null @@ -1,2128 +0,0 @@ -from abc import abstractmethod -import copy -import os -from typing import Optional - -from hexrd.core.instrument.constants import ( - COATING_DEFAULT, - FILTER_DEFAULTS, - PHOSPHOR_DEFAULT, -) -from hexrd.core.instrument.physics_package import AbstractPhysicsPackage -import numpy as np -import numba - -from hexrd.core import constants as ct -from hexrd.core import distortion as distortion_pkg -from hexrd.core import matrixutil as mutil - -# TODO: Resolve extra-core-dependency -from hexrd.hedm import xrdutil -from hexrd.core.rotations import mapAngle - -from hexrd.core.material import crystallography -from hexrd.core.material.crystallography import PlaneData - -from hexrd.core.transforms.xfcapi import ( - xy_to_gvec, - gvec_to_xy, - make_beam_rmat, - make_rmat_of_expmap, - oscill_angles_of_hkls, - angles_to_dvec, -) - -from hexrd.core.utils.decorators import memoize -from hexrd.core.gridutil import cellIndices -from hexrd.core.instrument import detector_coatings -from hexrd.core.material.utils import ( - calculate_linear_absorption_length, - calculate_incoherent_scattering, -) - -distortion_registry = distortion_pkg.Registry() - -max_workers_DFLT = max(1, os.cpu_count() - 1) - -beam_energy_DFLT = 65.351 - -# Memoize these, so each detector can avoid re-computing if nothing -# has changed. -_lorentz_factor = memoize(crystallography.lorentz_factor) -_polarization_factor = memoize(crystallography.polarization_factor) - - -class Detector: - """ - Base class for 2D detectors with functions and properties - common to planar and cylindrical detectors. This class - will be inherited by both those classes. - """ - - __pixelPitchUnit = 'mm' - - # Abstract methods that must be redefined in derived classes - @property - @abstractmethod - def detector_type(self): - raise NotImplementedError - - @abstractmethod - def cart_to_angles( - self, - xy_data, - rmat_s=None, - tvec_s=None, - tvec_c=None, - apply_distortion=False, - ): - """ - Transform cartesian coordinates to angular. - - Parameters - ---------- - xy_data : TYPE - The (n, 2) array of n (x, y) coordinates to be transformed in - either the raw or ideal cartesian plane (see `apply_distortion` - kwarg below). - rmat_s : array_like, optional - The (3, 3) COB matrix for the sample frame. The default is None. - tvec_s : array_like, optional - The (3, ) translation vector for the sample frame. - The default is None. - tvec_c : array_like, optional - The (3, ) translation vector for the crystal frame. - The default is None. - apply_distortion : bool, optional - If True, apply distortion to the inpout cartesian coordinates. - The default is False. - - Returns - ------- - tth_eta : TYPE - DESCRIPTION. - g_vec : TYPE - DESCRIPTION. - - """ - raise NotImplementedError - - @abstractmethod - def angles_to_cart( - self, - tth_eta, - rmat_s=None, - tvec_s=None, - rmat_c=None, - tvec_c=None, - apply_distortion=False, - ): - """ - Transform angular coordinates to cartesian. - - Parameters - ---------- - tth_eta : array_like - The (n, 2) array of n (tth, eta) coordinates to be transformed. - rmat_s : array_like, optional - The (3, 3) COB matrix for the sample frame. The default is None. - tvec_s : array_like, optional - The (3, ) translation vector for the sample frame. - The default is None. - rmat_c : array_like, optional - (3, 3) COB matrix for the crystal frame. - The default is None. - tvec_c : array_like, optional - The (3, ) translation vector for the crystal frame. - The default is None. - apply_distortion : bool, optional - If True, apply distortion to take cartesian coordinates to the - "warped" configuration. The default is False. - - Returns - ------- - xy_det : array_like - The (n, 2) array on the n input coordinates in the . - - """ - raise NotImplementedError - - @abstractmethod - def cart_to_dvecs(self, xy_data): - """Convert cartesian coordinates to dvectors""" - raise NotImplementedError - - @abstractmethod - def pixel_angles(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def pixel_tth_gradient(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def pixel_eta_gradient(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def calc_filter_coating_transmission(self, energy): - pass - - @property - @abstractmethod - def beam_position(self): - """ - returns the coordinates of the beam in the cartesian detector - frame {Xd, Yd, Zd}. NaNs if no intersection. - """ - raise NotImplementedError - - @property - def extra_config_kwargs(self): - return {} - - # End of abstract methods - - def __init__( - self, - rows=2048, - cols=2048, - pixel_size=(0.2, 0.2), - tvec=np.r_[0.0, 0.0, -1000.0], - tilt=ct.zeros_3, - name='default', - bvec=ct.beam_vec, - xrs_dist=None, - evec=ct.eta_vec, - saturation_level=None, - panel_buffer=None, - tth_distortion=None, - roi=None, - group=None, - distortion=None, - max_workers=max_workers_DFLT, - detector_filter: Optional[detector_coatings.Filter] = None, - detector_coating: Optional[detector_coatings.Coating] = None, - phosphor: Optional[detector_coatings.Phosphor] = None, - ): - """ - Instantiate a PlanarDetector object. - - Parameters - ---------- - rows : TYPE, optional - DESCRIPTION. The default is 2048. - cols : TYPE, optional - DESCRIPTION. The default is 2048. - pixel_size : TYPE, optional - DESCRIPTION. The default is (0.2, 0.2). - tvec : TYPE, optional - DESCRIPTION. The default is np.r_[0., 0., -1000.]. - tilt : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - name : TYPE, optional - DESCRIPTION. The default is 'default'. - bvec : TYPE, optional - DESCRIPTION. The default is ct.beam_vec. - evec : TYPE, optional - DESCRIPTION. The default is ct.eta_vec. - saturation_level : TYPE, optional - DESCRIPTION. The default is None. - panel_buffer : TYPE, optional - If a scalar or len(2) array_like, the interpretation is a border - in mm. If an array with shape (nrows, ncols), interpretation is a - boolean with True marking valid pixels. The default is None. - roi : TYPE, optional - DESCRIPTION. The default is None. - group : TYPE, optional - DESCRIPTION. The default is None. - distortion : TYPE, optional - DESCRIPTION. The default is None. - detector_filter : detector_coatings.Filter, optional - filter specifications including material type, - density and thickness. Used for absorption correction - calculations. - detector_coating : detector_coatings.Coating, optional - coating specifications including material type, - density and thickness. Used for absorption correction - calculations. - phosphor : detector_coatings.Phosphor, optional - phosphor specifications including material type, - density and thickness. Used for absorption correction - calculations. - - Returns - ------- - None. - - """ - self._name = name - - self._rows = rows - self._cols = cols - - self._pixel_size_row = pixel_size[0] - self._pixel_size_col = pixel_size[1] - - self._saturation_level = saturation_level - - self._panel_buffer = panel_buffer - - self._tth_distortion = tth_distortion - - if roi is None: - self._roi = roi - else: - assert len(roi) == 2, "roi is set via (start_row, start_col)" - self._roi = ( - (roi[0], roi[0] + self._rows), - (roi[1], roi[1] + self._cols), - ) - - self._tvec = np.array(tvec).flatten() - self._tilt = np.array(tilt).flatten() - - self._bvec = np.array(bvec).flatten() - self._xrs_dist = xrs_dist - - self._evec = np.array(evec).flatten() - - self._distortion = distortion - - self.max_workers = max_workers - - self.group = group - - if detector_filter is None: - detector_filter = detector_coatings.Filter( - **FILTER_DEFAULTS.TARDIS - ) - self.filter = detector_filter - - if detector_coating is None: - detector_coating = detector_coatings.Coating(**COATING_DEFAULT) - self.coating = detector_coating - - if phosphor is None: - phosphor = detector_coatings.Phosphor(**PHOSPHOR_DEFAULT) - self.phosphor = phosphor - - # detector ID - @property - def name(self): - return self._name - - @name.setter - def name(self, s): - assert isinstance(s, str), "requires string input" - self._name = s - - @property - def lmfit_name(self): - # lmfit requires underscores instead of dashes - return self.name.replace('-', '_') - - # properties for physical size of rectangular detector - @property - def rows(self): - return self._rows - - @rows.setter - def rows(self, x): - assert isinstance(x, int) - self._rows = x - - @property - def cols(self): - return self._cols - - @cols.setter - def cols(self, x): - assert isinstance(x, int) - self._cols = x - - @property - def pixel_size_row(self): - return self._pixel_size_row - - @pixel_size_row.setter - def pixel_size_row(self, x): - self._pixel_size_row = float(x) - - @property - def pixel_size_col(self): - return self._pixel_size_col - - @pixel_size_col.setter - def pixel_size_col(self, x): - self._pixel_size_col = float(x) - - @property - def pixel_area(self): - return self.pixel_size_row * self.pixel_size_col - - @property - def saturation_level(self): - return self._saturation_level - - @saturation_level.setter - def saturation_level(self, x): - if x is not None: - assert np.isreal(x) - self._saturation_level = x - - @property - def panel_buffer(self): - return self._panel_buffer - - @panel_buffer.setter - def panel_buffer(self, x): - """if not None, a buffer in mm (x, y)""" - if x is not None: - assert len(x) == 2 or x.ndim == 2 - self._panel_buffer = x - - @property - def tth_distortion(self): - return self._tth_distortion - - @tth_distortion.setter - def tth_distortion(self, x): - """if not None, a buffer in mm (x, y)""" - if x is not None: - assert x.ndim == 2 and x.shape == self.shape - self._tth_distortion = x - - @property - def roi(self): - return self._roi - - @roi.setter - def roi(self, vertex_array): - """ - !!! vertex array must be (r0, c0) - """ - if vertex_array is not None: - assert ( - len(vertex_array) == 2 - ), "roi is set via (start_row, start_col)" - self._roi = ( - (vertex_array[0], vertex_array[0] + self.rows), - (vertex_array[1], vertex_array[1] + self.cols), - ) - - @property - def row_dim(self): - return self.rows * self.pixel_size_row - - @property - def col_dim(self): - return self.cols * self.pixel_size_col - - @property - def row_pixel_vec(self): - return self.pixel_size_row * ( - 0.5 * (self.rows - 1) - np.arange(self.rows) - ) - - @property - def row_edge_vec(self): - return _row_edge_vec(self.rows, self.pixel_size_row) - - @property - def col_pixel_vec(self): - return self.pixel_size_col * ( - np.arange(self.cols) - 0.5 * (self.cols - 1) - ) - - @property - def col_edge_vec(self): - return _col_edge_vec(self.cols, self.pixel_size_col) - - @property - def corner_ul(self): - return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] - - @property - def corner_ll(self): - return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim] - - @property - def corner_lr(self): - return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim] - - @property - def corner_ur(self): - return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] - - @property - def shape(self): - return (self.rows, self.cols) - - @property - def tvec(self): - return self._tvec - - @tvec.setter - def tvec(self, x): - x = np.array(x).flatten() - assert len(x) == 3, 'input must have length = 3' - self._tvec = x - - @property - def tilt(self): - return self._tilt - - @tilt.setter - def tilt(self, x): - assert len(x) == 3, 'input must have length = 3' - self._tilt = np.array(x).squeeze() - - @property - def bvec(self): - return self._bvec - - @bvec.setter - def bvec(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._bvec = x - - @property - def xrs_dist(self): - return self._xrs_dist - - @xrs_dist.setter - def xrs_dist(self, x): - assert x is None or np.isscalar( - x - ), f"'source_distance' must be None or scalar; you input '{x}'" - self._xrs_dist = x - - @property - def evec(self): - return self._evec - - @evec.setter - def evec(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._evec = x - - @property - def distortion(self): - return self._distortion - - @distortion.setter - def distortion(self, x): - if x is not None: - registry = distortion_registry.distortion_registry - check_arg = np.zeros(len(registry), dtype=bool) - for i, dcls in enumerate(registry.values()): - check_arg[i] = isinstance(x, dcls) - assert np.any(check_arg), 'input distortion is not in registry!' - self._distortion = x - - @property - def rmat(self): - return make_rmat_of_expmap(self.tilt) - - @property - def normal(self): - return self.rmat[:, 2] - - # ...memoize??? - @property - def pixel_coords(self): - pix_i, pix_j = np.meshgrid( - self.row_pixel_vec, self.col_pixel_vec, indexing='ij' - ) - return pix_i, pix_j - - # ========================================================================= - # METHODS - # ========================================================================= - - def pixel_Q( - self, energy: np.floating, origin: np.ndarray = ct.zeros_3 - ) -> np.ndarray: - '''get the equivalent momentum transfer - for the angles. - - Parameters - ---------- - energy: float - incident photon energy in keV - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise Q in A^-1 - - ''' - lam = ct.keVToAngstrom(energy) - tth, _ = self.pixel_angles(origin=origin) - return 4.0 * np.pi * np.sin(tth * 0.5) / lam - - def pixel_compton_energy_loss( - self, - energy: np.floating, - origin: np.ndarray = ct.zeros_3, - ) -> np.ndarray: - '''inelastic compton scattering leads - to energy loss of the incident photons. - compute the final energy of the photons - for each pixel. - - Parameters - ---------- - energy: float - incident photon energy in keV - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise energy of inelastically - scatterd photons in keV - ''' - energy = np.asarray(energy) - tth, _ = self.pixel_angles() - ang_fact = 1 - np.cos(tth) - beta = energy / ct.cRestmasskeV - return energy / (1 + beta * ang_fact) - - def pixel_compton_attenuation_length( - self, - energy: np.floating, - density: np.floating, - formula: str, - origin: np.ndarray = ct.zeros_3, - ) -> np.ndarray: - '''each pixel intercepts inelastically - scattered photons of different energy. - the attenuation length and the transmission - for these photons are different. this function - calculate attenuatin length for each pixel - on the detector. - - Parameters - ---------- - energy: float - incident photon energy in keV - density: float - density of material in g/cc - formula: str - formula of the material scattering - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise attentuation length of compton - scattered photons - ''' - pixel_energy = self.pixel_compton_energy_loss(energy) - - pixel_attenuation_length = calculate_linear_absorption_length( - density, - formula, - pixel_energy.flatten(), - ) - return pixel_attenuation_length.reshape(self.shape) - - def compute_compton_scattering_intensity( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - origin: np.array = ct.zeros_3, - ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - '''compute the theoretical compton scattering - signal on the detector. this value is corrected - for the transmission of compton scattered photons - and normlaized before getting subtracting from the - raw intensity - - Parameters - ----------- - energy: float - energy of incident photon - rMat_s: np.ndarray - rotation matrix of sample orientation - physics_package: AbstractPhysicsPackage - physics package information - Returns - ------- - compton_intensity: np.ndarray - transmission corrected compton scattering - intensity - ''' - - q = self.pixel_Q(energy) - inc_s = calculate_incoherent_scattering( - physics_package.sample_material, q.flatten() - ).reshape(self.shape) - - inc_w = calculate_incoherent_scattering( - physics_package.window_material, q.flatten() - ).reshape(self.shape) - - t_s = self.calc_compton_physics_package_transmission( - energy, rMat_s, physics_package - ) - - t_w = self.calc_compton_window_transmission( - energy, rMat_s, physics_package - ) - - return inc_s * t_s + inc_w * t_w, t_s, t_w - - def polarization_factor(self, f_hor, f_vert, unpolarized=False): - """ - Calculated the polarization factor for every pixel. - - Parameters - ---------- - f_hor : float - the fraction of horizontal polarization. for XFELs - this is close to 1. - f_vert : TYPE - the fraction of vertical polarization, which is ~0 for XFELs. - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - s = f_hor + f_vert - if np.abs(s - 1) > ct.sqrt_epsf: - msg = ( - "sum of fraction of " - "horizontal and vertical polarizations " - "must be equal to 1." - ) - raise RuntimeError(msg) - - if f_hor < 0 or f_vert < 0: - msg = ( - "fraction of polarization in horizontal " - "or vertical directions can't be negative." - ) - raise RuntimeError(msg) - - tth, eta = self.pixel_angles() - kwargs = { - 'tth': tth, - 'eta': eta, - 'f_hor': f_hor, - 'f_vert': f_vert, - 'unpolarized': unpolarized, - } - - return _polarization_factor(**kwargs) - - def lorentz_factor(self): - """ - calculate the lorentz factor for every pixel - - Parameters - ---------- - None - - Raises - ------ - None - - Returns - ------- - numpy.ndarray - returns an array the same size as the detector panel - with each element containg the lorentz factor of the - corresponding pixel - """ - tth, eta = self.pixel_angles() - return _lorentz_factor(tth) - - def config_dict( - self, - chi=0, - tvec=ct.zeros_3, - beam_energy=beam_energy_DFLT, - beam_vector=ct.beam_vec, - sat_level=None, - panel_buffer=None, - style='yaml', - ): - """ - Return a dictionary of detector parameters. - - Optional instrument level parameters. This is a convenience function - to work with the APIs in several functions in xrdutil. - - Parameters - ---------- - chi : float, optional - DESCRIPTION. The default is 0. - tvec : array_like (3,), optional - DESCRIPTION. The default is ct.zeros_3. - beam_energy : float, optional - DESCRIPTION. The default is beam_energy_DFLT. - beam_vector : aray_like (3,), optional - DESCRIPTION. The default is ct.beam_vec. - sat_level : scalar, optional - DESCRIPTION. The default is None. - panel_buffer : scalar, array_like (2,), optional - DESCRIPTION. The default is None. - - Returns - ------- - config_dict : dict - DESCRIPTION. - - """ - assert style.lower() in ['yaml', 'hdf5'], ( - "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style - ) - - config_dict = {} - - # ===================================================================== - # DETECTOR PARAMETERS - # ===================================================================== - # transform and pixels - # - # assign local vars; listify if necessary - tilt = self.tilt - translation = self.tvec - roi = ( - None - if self.roi is None - else np.array([self.roi[0][0], self.roi[1][0]]).flatten() - ) - if style.lower() == 'yaml': - tilt = tilt.tolist() - translation = translation.tolist() - tvec = tvec.tolist() - roi = None if roi is None else roi.tolist() - - det_dict = dict( - detector_type=self.detector_type, - transform=dict( - tilt=tilt, - translation=translation, - ), - pixels=dict( - rows=int(self.rows), - columns=int(self.cols), - size=[float(self.pixel_size_row), float(self.pixel_size_col)], - ), - ) - - if roi is not None: - # Only add roi if it is not None - det_dict['pixels']['roi'] = roi - - if self.group is not None: - # Only add group if it is not None - det_dict['group'] = self.group - - # distortion - if self.distortion is not None: - dparams = self.distortion.params - if style.lower() == 'yaml': - dparams = dparams.tolist() - dist_d = dict( - function_name=self.distortion.maptype, parameters=dparams - ) - det_dict['distortion'] = dist_d - - # saturation level - if sat_level is None: - sat_level = self.saturation_level - det_dict['saturation_level'] = float(sat_level) - - # panel buffer - if panel_buffer is None: - # could be none, a 2-element list, or a 2-d array (rows, cols) - panel_buffer = copy.deepcopy(self.panel_buffer) - # !!! now we have to do some style-dependent munging of panel_buffer - if isinstance(panel_buffer, np.ndarray): - if panel_buffer.ndim == 1: - assert len(panel_buffer) == 2, "length of 1-d buffer must be 2" - # if here is a 2-element array - if style.lower() == 'yaml': - panel_buffer = panel_buffer.tolist() - elif panel_buffer.ndim == 2: - if style.lower() == 'yaml': - # !!! can't practically write array-like buffers to YAML - # so forced to clobber - print("clobbering panel buffer array in yaml-ready output") - panel_buffer = [0.0, 0.0] - else: - raise RuntimeError( - "panel buffer ndim must be 1 or 2; you specified %d" - % panel_buffer.ndmin - ) - elif panel_buffer is None: - # still None on self - # !!! this gets handled by unwrap_dict_to_h5 now - - # if style.lower() == 'hdf5': - # # !!! can't write None to hdf5; substitute with zeros - # panel_buffer = np.r_[0., 0.] - pass - det_dict['buffer'] = panel_buffer - - det_dict.update(self.extra_config_kwargs) - - # ===================================================================== - # SAMPLE STAGE PARAMETERS - # ===================================================================== - stage_dict = dict(chi=chi, translation=tvec) - - # ===================================================================== - # BEAM PARAMETERS - # ===================================================================== - # !!! make_reflection_patches is still using the vector - # azim, pola = calc_angles_from_beam_vec(beam_vector) - # beam_dict = dict( - # energy=beam_energy, - # vector=dict( - # azimuth=azim, - # polar_angle=pola - # ) - # ) - beam_dict = dict(energy=beam_energy, vector=beam_vector) - - config_dict['detector'] = det_dict - config_dict['oscillation_stage'] = stage_dict - config_dict['beam'] = beam_dict - - return config_dict - - def cartToPixel(self, xy_det, pixels=False, apply_distortion=False): - """ - Coverts cartesian coordinates to pixel coordinates - - Parameters - ---------- - xy_det : array_like - The (n, 2) vstacked array of (x, y) pairs in the reference - cartesian frame (possibly subject to distortion). - pixels : bool, optional - If True, return discrete pixel indices; otherwise fractional pixel - coordinates are returned. The default is False. - apply_distortion : bool, optional - If True, apply self.distortion to the input (if applicable). - The default is False. - - Returns - ------- - ij_det : array_like - The (n, 2) array of vstacked (i, j) coordinates in the pixel - reference frame where i is the (slow) row dimension and j is the - (fast) column dimension. - - """ - xy_det = np.atleast_2d(xy_det) - if apply_distortion and self.distortion is not None: - xy_det = self.distortion.apply(xy_det) - - npts = len(xy_det) - - tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1)) - i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5 - j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5 - - ij_det = np.vstack([i_pix, j_pix]).T - if pixels: - # Hide any runtime warnings in this conversion. Their output values - # will certainly be off the detector, which is fine. - with np.errstate(invalid='ignore'): - ij_det = np.array(np.round(ij_det), dtype=int) - - return ij_det - - def pixelToCart(self, ij_det): - """ - Convert vstacked array or list of [i,j] pixel indices - (or UL corner-based points) and convert to (x,y) in the - cartesian frame {Xd, Yd, Zd} - """ - ij_det = np.atleast_2d(ij_det) - - x = (ij_det[:, 1] + 0.5) * self.pixel_size_col + self.corner_ll[0] - y = ( - self.rows - ij_det[:, 0] - 0.5 - ) * self.pixel_size_row + self.corner_ll[1] - return np.vstack([x, y]).T - - def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): - """ - Notes - ----- - !!! assumes xy are in raw (distorted) frame, if applicable - """ - # munge kwargs - if rMat_s is None: - rMat_s = ct.identity_3x3 - if tVec_s is None: - tVec_s = ct.zeros_3x1 - if tVec_c is None: - tVec_c = ct.zeros_3x1 - - # FIXME: perhaps not necessary, but safe... - xy = np.atleast_2d(xy) - - ''' - # --------------------------------------------------------------------- - # TODO: needs testing and memoized gradient arrays! - # --------------------------------------------------------------------- - # need origin arg - origin = np.dot(rMat_s, tVec_c).flatten() + tVec_s.flatten() - - # get pixel indices - i_crds = cellIndices(self.row_edge_vec, xy[:, 1]) - j_crds = cellIndices(self.col_edge_vec, xy[:, 0]) - - ptth_grad = self.pixel_tth_gradient(origin=origin)[i_crds, j_crds] - peta_grad = self.pixel_eta_gradient(origin=origin)[i_crds, j_crds] - - return np.vstack([ptth_grad, peta_grad]).T - ''' - # call xrdutil function - ang_ps = xrdutil.angularPixelSize( - xy, - (self.pixel_size_row, self.pixel_size_col), - self.rmat, - rMat_s, - self.tvec, - tVec_s, - tVec_c, - distortion=self.distortion, - beamVec=self.bvec, - etaVec=self.evec, - ) - return ang_ps - - def clip_to_panel(self, xy, buffer_edges=True): - """ - if self.roi is not None, uses it by default - - TODO: check if need shape kwarg - TODO: optimize ROI search better than list comprehension below - TODO: panel_buffer can be a 2-d boolean mask, but needs testing - - """ - xy = np.atleast_2d(xy) - - ''' - # !!! THIS LOGIC IS OBSOLETE - if self.roi is not None: - ij_crds = self.cartToPixel(xy, pixels=True) - ii, jj = polygon(self.roi[:, 0], self.roi[:, 1], - shape=(self.rows, self.cols)) - on_panel_rows = [i in ii for i in ij_crds[:, 0]] - on_panel_cols = [j in jj for j in ij_crds[:, 1]] - on_panel = np.logical_and(on_panel_rows, on_panel_cols) - else: - ''' - xlim = 0.5 * self.col_dim - ylim = 0.5 * self.row_dim - if buffer_edges and self.panel_buffer is not None: - if self.panel_buffer.ndim == 2: - pix = self.cartToPixel(xy, pixels=True) - - roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows) - coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols) - - idx = np.logical_or(roff, coff) - - on_panel = np.full(pix.shape[0], False) - valid_pix = pix[~idx, :] - on_panel[~idx] = self.panel_buffer[ - valid_pix[:, 0], valid_pix[:, 1] - ] - else: - xlim -= self.panel_buffer[0] - ylim -= self.panel_buffer[1] - on_panel_x = np.logical_and( - xy[:, 0] >= -xlim, xy[:, 0] <= xlim - ) - on_panel_y = np.logical_and( - xy[:, 1] >= -ylim, xy[:, 1] <= ylim - ) - on_panel = np.logical_and(on_panel_x, on_panel_y) - elif not buffer_edges or self.panel_buffer is None: - on_panel_x = np.logical_and(xy[:, 0] >= -xlim, xy[:, 0] <= xlim) - on_panel_y = np.logical_and(xy[:, 1] >= -ylim, xy[:, 1] <= ylim) - on_panel = np.logical_and(on_panel_x, on_panel_y) - return xy[on_panel, :], on_panel - - def interpolate_nearest(self, xy, img, pad_with_nans=True): - """ - TODO: revisit normalization in here? - - """ - is_2d = img.ndim == 2 - right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert ( - is_2d and right_shape - ), "input image must be 2-d with shape (%d, %d)" % ( - self.rows, - self.cols, - ) - - # initialize output with nans - if pad_with_nans: - int_xy = np.nan * np.ones(len(xy)) - else: - int_xy = np.zeros(len(xy)) - - # clip away points too close to or off the edges of the detector - xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) - - # get pixel indices of clipped points - i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) - j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) - - # next interpolate across cols - int_vals = img[i_src, j_src] - int_xy[on_panel] = int_vals - return int_xy - - def interpolate_bilinear( - self, - xy, - img, - pad_with_nans=True, - clip_to_panel=True, - on_panel: Optional[np.ndarray] = None, - ): - """ - Interpolate an image array at the specified cartesian points. - - Parameters - ---------- - xy : array_like, (n, 2) - Array of cartesian coordinates in the image plane at which - to evaluate intensity. - img : array_like - 2-dimensional image array. - pad_with_nans : bool, optional - Toggle for assigning NaN to points that fall off the detector. - The default is True. - on_panel : np.ndarray, optional - If you want to skip clip_to_panel() for performance reasons, - just provide an array of which pixels are on the panel. - - Returns - ------- - int_xy : array_like, (n,) - The array of interpolated intensities at each of the n input - coordinates. - - Notes - ----- - TODO: revisit normalization in here? - """ - - is_2d = img.ndim == 2 - right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert ( - is_2d and right_shape - ), "input image must be 2-d with shape (%d, %d)" % ( - self.rows, - self.cols, - ) - - # initialize output with nans - if pad_with_nans: - int_xy = np.nan * np.ones(len(xy)) - else: - int_xy = np.zeros(len(xy)) - - if on_panel is None: - # clip away points too close to or off the edges of the detector - xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) - else: - xy_clip = xy[on_panel] - - # grab fractional pixel indices of clipped points - ij_frac = self.cartToPixel(xy_clip) - - # get floors/ceils from array of pixel _centers_ - # and fix indices running off the pixel centers - # !!! notice we already clipped points to the panel! - i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) - i_floor_img = _fix_indices(i_floor, 0, self.rows - 1) - - j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) - j_floor_img = _fix_indices(j_floor, 0, self.cols - 1) - - # ceilings from floors - i_ceil = i_floor + 1 - i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1) - - j_ceil = j_floor + 1 - j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) - - # first interpolate at top/bottom rows - row_floor_int = (j_ceil - ij_frac[:, 1]) * img[ - i_floor_img, j_floor_img - ] + (ij_frac[:, 1] - j_floor) * img[i_floor_img, j_ceil_img] - row_ceil_int = (j_ceil - ij_frac[:, 1]) * img[ - i_ceil_img, j_floor_img - ] + (ij_frac[:, 1] - j_floor) * img[i_ceil_img, j_ceil_img] - - # next interpolate across cols - int_vals = (i_ceil - ij_frac[:, 0]) * row_floor_int + ( - ij_frac[:, 0] - i_floor - ) * row_ceil_int - int_xy[on_panel] = int_vals - return int_xy - - def make_powder_rings( - self, - pd, - merge_hkls=False, - delta_tth=None, - delta_eta=10.0, - eta_period=None, - eta_list=None, - rmat_s=ct.identity_3x3, - tvec_s=ct.zeros_3, - tvec_c=ct.zeros_3, - full_output=False, - tth_distortion=None, - ): - """ - Generate points on Debye_Scherrer rings over the detector. - - !!! it is assuming that rmat_s is built from (chi, ome) as it the case - for HEDM! - - Parameters - ---------- - pd : TYPE - DESCRIPTION. - merge_hkls : TYPE, optional - DESCRIPTION. The default is False. - delta_tth : TYPE, optional - DESCRIPTION. The default is None. - delta_eta : TYPE, optional - DESCRIPTION. The default is 10.. - eta_period : TYPE, optional - DESCRIPTION. The default is None. - eta_list : TYPE, optional - DESCRIPTION. The default is None. - rmat_s : TYPE, optional - DESCRIPTION. The default is ct.identity_3x3. - tvec_s : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - tvec_c : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - full_output : TYPE, optional - DESCRIPTION. The default is False. - tth_distortion : special class, optional - Special distortion class. The default is None. - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - if tth_distortion is not None: - tnorms = mutil.rowNorm(np.vstack([tvec_s, tvec_c])) - assert ( - np.all(tnorms) < ct.sqrt_epsf - ), "If using distrotion function, translations must be zero" - - # in case you want to give it tth angles directly - if isinstance(pd, PlaneData): - pd = PlaneData(None, pd) - if delta_tth is not None: - pd.tThWidth = np.radians(delta_tth) - else: - delta_tth = np.degrees(pd.tThWidth) - - # !!! conversions, meh... - del_eta = np.radians(delta_eta) - - # do merging if asked - if merge_hkls: - _, tth_ranges = pd.getMergedRanges(cullDupl=True) - tth = np.average(tth_ranges, axis=1) - else: - tth_ranges = pd.getTThRanges() - tth = pd.getTTh() - tth_pm = tth_ranges - np.tile(tth, (2, 1)).T - sector_vertices = np.vstack( - [ - [ - i[0], - -del_eta, - i[0], - del_eta, - i[1], - del_eta, - i[1], - -del_eta, - 0.0, - 0.0, - ] - for i in tth_pm - ] - ) - else: - # Okay, we have a array-like tth specification - tth = np.array(pd).flatten() - if delta_tth is None: - raise RuntimeError( - "If supplying a 2theta list as first arg, " - + "must supply a delta_tth" - ) - tth_pm = 0.5 * delta_tth * np.r_[-1.0, 1.0] - tth_ranges = np.radians([i + tth_pm for i in tth]) # !!! units - sector_vertices = np.tile( - 0.5 - * np.radians( - [ - -delta_tth, - -delta_eta, - -delta_tth, - delta_eta, - delta_tth, - delta_eta, - delta_tth, - -delta_eta, - 0.0, - 0.0, - ] - ), - (len(tth), 1), - ) - # !! conversions, meh... - tth = np.radians(tth) - del_eta = np.radians(delta_eta) - - # for generating rings, make eta vector in correct period - if eta_period is None: - eta_period = (-np.pi, np.pi) - - if eta_list is None: - neta = int(360.0 / float(delta_eta)) - # this is the vector of ETA EDGES - eta_edges = mapAngle( - np.radians(delta_eta * np.linspace(0.0, neta, num=neta + 1)) - + eta_period[0], - eta_period, - ) - - # get eta bin centers from edges - """ - # !!! this way is probably overkill, since we have delta eta - eta_centers = np.average( - np.vstack([eta[:-1], eta[1:]), - axis=0) - """ - # !!! should be safe as eta_edges are monotonic - eta_centers = eta_edges[:-1] + 0.5 * del_eta - else: - eta_centers = np.radians(eta_list).flatten() - neta = len(eta_centers) - eta_edges = ( - np.tile(eta_centers, (2, 1)) - + np.tile(0.5 * del_eta * np.r_[-1, 1], (neta, 1)).T - ).T.flatten() - - # get chi and ome from rmat_s - # !!! API ambiguity - # !!! this assumes rmat_s was made from the composition - # !!! rmat_s = R(Xl, chi) * R(Yl, ome) - ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) - - # make list of angle tuples - angs = [ - np.vstack([i * np.ones(neta), eta_centers, ome * np.ones(neta)]) - for i in tth - ] - - # need xy coords and pixel sizes - valid_ang = [] - valid_xy = [] - map_indices = [] - npp = 5 # [ll, ul, ur, lr, center] - for i_ring in range(len(angs)): - # expand angles to patch vertices - these_angs = angs[i_ring].T - - # push to vertices to see who falls off - # FIXME: clipping is not checking if masked regions are on the - # patch interior - patch_vertices = ( - np.tile(these_angs[:, :2], (1, npp)) - + np.tile(sector_vertices[i_ring], (neta, 1)) - ).reshape(npp * neta, 2) - - # find vertices that all fall on the panel - # !!! not API ambiguity regarding rmat_s above - all_xy = self.angles_to_cart( - patch_vertices, - rmat_s=rmat_s, - tvec_s=tvec_s, - rmat_c=None, - tvec_c=tvec_c, - apply_distortion=True, - ) - - _, on_panel = self.clip_to_panel(all_xy) - - # all vertices must be on... - - patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) - patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] - - # !!! Have to apply after clipping, distortion can get wonky near - # the edeg of the panel, and it is assumed to be <~1 deg - # !!! The tth_ranges are NOT correct! - if tth_distortion is not None: - patch_valid_angs = tth_distortion.apply( - self.angles_to_cart(these_angs[patch_is_on, :2]), - return_nominal=True, - ) - patch_valid_xys = self.angles_to_cart( - patch_valid_angs, apply_distortion=True - ) - else: - patch_valid_angs = these_angs[patch_is_on, :2] - patch_valid_xys = patch_xys[:, -1, :].squeeze() - - # form output arrays - valid_ang.append(patch_valid_angs) - valid_xy.append(patch_valid_xys) - map_indices.append(patch_is_on) - # ??? is this option necessary? - if full_output: - return valid_ang, valid_xy, tth_ranges, map_indices, eta_edges - else: - return valid_ang, valid_xy, tth_ranges - - def map_to_plane(self, pts, rmat, tvec): - """ - Map detctor points to specified plane. - - Parameters - ---------- - pts : TYPE - DESCRIPTION. - rmat : TYPE - DESCRIPTION. - tvec : TYPE - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - Notes - ----- - by convention: - - n * (u*pts_l - tvec) = 0 - - [pts]_l = rmat*[pts]_m + tvec - - """ - # arg munging - pts = np.atleast_2d(pts) - npts = len(pts) - - # map plane normal & translation vector, LAB FRAME - nvec_map_lab = rmat[:, 2].reshape(3, 1) - tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1) - tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1) - - # put pts as 3-d in panel CS and transform to 3-d lab coords - pts_det = np.hstack([pts, np.zeros((npts, 1))]) - pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab - - # scaling along pts vectors to hit map plane - u = np.dot(nvec_map_lab.T, tvec_map_lab) / np.dot( - nvec_map_lab.T, pts_lab - ) - - # pts on map plane, in LAB FRAME - pts_map_lab = np.tile(u, (3, 1)) * pts_lab - - return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T - - def simulate_rotation_series( - self, - plane_data, - grain_param_list, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_ranges=[ - (-np.pi, np.pi), - ], - ome_period=(-np.pi, np.pi), - chi=0.0, - tVec_s=ct.zeros_3, - wavelength=None, - ): - """ - Simulate a monochromatic rotation series for a list of grains. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_param_list : TYPE - DESCRIPTION. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - chi : TYPE, optional - DESCRIPTION. The default is 0.. - tVec_s : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - wavelength : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - valid_ids : TYPE - DESCRIPTION. - valid_hkls : TYPE - DESCRIPTION. - valid_angs : TYPE - DESCRIPTION. - valid_xys : TYPE - DESCRIPTION. - ang_pixel_size : TYPE - DESCRIPTION. - - """ - # grab B-matrix from plane data - bMat = plane_data.latVecOps['B'] - - # reconcile wavelength - # * added sanity check on exclusions here; possible to - # * make some reflections invalid (NaN) - if wavelength is None: - wavelength = plane_data.wavelength - else: - if plane_data.wavelength != wavelength: - plane_data.wavelength = ct.keVToAngstrom(wavelength) - assert not np.any( - np.isnan(plane_data.getTTh()) - ), "plane data exclusions incompatible with wavelength" - - # vstacked G-vector id, h, k, l - full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) - - """ LOOP OVER GRAINS """ - valid_ids = [] - valid_hkls = [] - valid_angs = [] - valid_xys = [] - ang_pixel_size = [] - for gparm in grain_param_list: - - # make useful parameters - rMat_c = make_rmat_of_expmap(gparm[:3]) - tVec_c = gparm[3:6] - vInv_s = gparm[6:] - - # All possible bragg conditions as vstacked [tth, eta, ome] - # for each omega solution - angList = np.vstack( - oscill_angles_of_hkls( - full_hkls[:, 1:], - chi, - rMat_c, - bMat, - wavelength, - v_inv=vInv_s, - beam_vec=self.bvec, - ) - ) - - # filter by eta and omega ranges - # ??? get eta range from detector? - allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( - full_hkls, angList, eta_ranges, ome_ranges - ) - allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) - - # find points that fall on the panel - det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( - allAngs, - self.rmat, - rMat_c, - chi, - self.tvec, - tVec_c, - tVec_s, - self.distortion, - self.bvec, - ) - xys_p, on_panel = self.clip_to_panel(det_xy) - valid_xys.append(xys_p) - - # filter angs and hkls that are on the detector plane - # !!! check this -- seems unnecessary but the results of - # _project_on_detector_plane() can have len < the input? - # the output of _project_on_detector_plane has been modified to - # hand back the index array to remedy this JVB 2020-05-27 - if np.any(~on_plane): - allAngs = np.atleast_2d(allAngs[on_plane, :]) - allHKLs = np.atleast_2d(allHKLs[on_plane, :]) - - # grab hkls and gvec ids for this panel - valid_hkls.append(allHKLs[on_panel, 1:]) - valid_ids.append(allHKLs[on_panel, 0]) - - # reflection angles (voxel centers) and pixel size in (tth, eta) - valid_angs.append(allAngs[on_panel, :]) - ang_pixel_size.append(self.angularPixelSize(xys_p)) - return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size - - def simulate_laue_pattern( - self, - crystal_data, - minEnergy=5.0, - maxEnergy=35.0, - rmat_s=None, - tvec_s=None, - grain_params=None, - beam_vec=None, - ): - """ """ - if isinstance(crystal_data, PlaneData): - - plane_data = crystal_data - - # grab the expanded list of hkls from plane_data - hkls = np.hstack(plane_data.getSymHKLs()) - - # and the unit plane normals (G-vectors) in CRYSTAL FRAME - gvec_c = np.dot(plane_data.latVecOps['B'], hkls) - - # Filter out g-vectors going in the wrong direction. `gvec_to_xy()` used - # to do this, but not anymore. - to_keep = np.dot(gvec_c.T, self.bvec) <= 0 - - hkls = hkls[:, to_keep] - gvec_c = gvec_c[:, to_keep] - elif len(crystal_data) == 2: - # !!! should clean this up - hkls = np.array(crystal_data[0]) - bmat = crystal_data[1] - gvec_c = np.dot(bmat, hkls) - else: - raise RuntimeError( - f'argument list not understood: {crystal_data=}' - ) - nhkls_tot = hkls.shape[1] - - # parse energy ranges - # TODO: allow for spectrum parsing - multipleEnergyRanges = False - if hasattr(maxEnergy, '__len__'): - assert len(maxEnergy) == len( - minEnergy - ), 'energy cutoff ranges must have the same length' - multipleEnergyRanges = True - lmin = [] - lmax = [] - for i in range(len(maxEnergy)): - lmin.append(ct.keVToAngstrom(maxEnergy[i])) - lmax.append(ct.keVToAngstrom(minEnergy[i])) - else: - lmin = ct.keVToAngstrom(maxEnergy) - lmax = ct.keVToAngstrom(minEnergy) - - # parse grain parameters kwarg - if grain_params is None: - grain_params = np.atleast_2d( - np.hstack([np.zeros(6), ct.identity_6x1]) - ) - n_grains = len(grain_params) - - # sample rotation - if rmat_s is None: - rmat_s = ct.identity_3x3 - - # dummy translation vector... make input - if tvec_s is None: - tvec_s = ct.zeros_3 - - # beam vector - if beam_vec is None: - beam_vec = ct.beam_vec - - # ========================================================================= - # LOOP OVER GRAINS - # ========================================================================= - - # pre-allocate output arrays - xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) - hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) - angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) - dspacing = np.nan * np.ones((n_grains, nhkls_tot)) - energy = np.nan * np.ones((n_grains, nhkls_tot)) - for iG, gp in enumerate(grain_params): - rmat_c = make_rmat_of_expmap(gp[:3]) - tvec_c = gp[3:6].reshape(3, 1) - vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) - - # stretch them: V^(-1) * R * Gc - gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) - ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) - - # project - dpts = gvec_to_xy( - ghat_c_str.T, - self.rmat, - rmat_s, - rmat_c, - self.tvec, - tvec_s, - tvec_c, - beam_vec=beam_vec, - ) - - # check intersections with detector plane - canIntersect = ~np.isnan(dpts[:, 0]) - npts_in = sum(canIntersect) - - if np.any(canIntersect): - dpts = dpts[canIntersect, :].reshape(npts_in, 2) - dhkl = hkls[:, canIntersect].reshape(3, npts_in) - - rmat_b = make_beam_rmat(beam_vec, ct.eta_vec) - # back to angles - tth_eta, gvec_l = xy_to_gvec( - dpts, - self.rmat, - rmat_s, - self.tvec, - tvec_s, - tvec_c, - rmat_b=rmat_b, - ) - tth_eta = np.vstack(tth_eta).T - - # warp measured points - if self.distortion is not None: - dpts = self.distortion.apply_inverse(dpts) - - # plane spacings and energies - dsp = 1.0 / mutil.rowNorm(gvec_s_str[:, canIntersect].T) - wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) - - # clip to detector panel - _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) - - if multipleEnergyRanges: - validEnergy = np.zeros(len(wlen), dtype=bool) - for i in range(len(lmin)): - in_energy_range = np.logical_and( - wlen >= lmin[i], wlen <= lmax[i] - ) - validEnergy = validEnergy | in_energy_range - else: - validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) - - # index for valid reflections - keepers = np.where(np.logical_and(on_panel, validEnergy))[0] - - # assign output arrays - xy_det[iG][keepers, :] = dpts[keepers, :] - hkls_in[iG][:, keepers] = dhkl[:, keepers] - angles[iG][keepers, :] = tth_eta[keepers, :] - dspacing[iG, keepers] = dsp[keepers] - energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) - return xy_det, hkls_in, angles, dspacing, energy - - @staticmethod - def update_memoization_sizes(all_panels): - funcs = [ - _polarization_factor, - _lorentz_factor, - ] - - min_size = len(all_panels) - return Detector.increase_memoization_sizes(funcs, min_size) - - @staticmethod - def increase_memoization_sizes(funcs, min_size): - for f in funcs: - cache_info = f.cache_info() - if cache_info['maxsize'] < min_size: - f.set_cache_maxsize(min_size) - - def calc_physics_package_transmission( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - ) -> np.float64: - """get the transmission from the physics package - need to consider HED and HEDM samples separately - """ - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_sample = self.calc_transmission_sample( - seca, secb, energy, physics_package - ) - T_window = self.calc_transmission_window(secb, energy, physics_package) - - transmission_physics_package = T_sample * T_window - return transmission_physics_package - - def calc_compton_physics_package_transmission( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - '''calculate the attenuation of inelastically - scattered photons. since these photons lose energy, - the attenuation length is angle dependent ergo a separate - routine than elastically scattered absorption. - ''' - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_sample = self.calc_compton_transmission( - seca, secb, energy, physics_package, 'sample' - ) - T_window = self.calc_compton_transmission_window( - secb, energy, physics_package - ) - - return T_sample * T_window - - def calc_compton_window_transmission( - self, - energy: np.floating, - rMat_s: np.ndarray, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - '''calculate the attenuation of inelastically - scattered photons just fropm the window. - since these photons lose energy, the attenuation length - is angle dependent ergo a separate routine than - elastically scattered absorption. - ''' - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_window = self.calc_compton_transmission( - seca, secb, energy, physics_package, 'window' - ) - T_sample = self.calc_compton_transmission_sample( - seca, energy, physics_package - ) - - return T_sample * T_window - - def calc_transmission_sample( - self, - seca: np.array, - secb: np.array, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.array: - thickness_s = physics_package.sample_thickness # in microns - if np.isclose(thickness_s, 0): - return np.ones(self.shape) - - # in microns^-1 - mu_s = 1.0 / physics_package.sample_absorption_length(energy) - x = mu_s * thickness_s - pre = 1.0 / x / (secb - seca) - num = np.exp(-x * seca) - np.exp(-x * secb) - return pre * num - - def calc_transmission_window( - self, - secb: np.array, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.array: - material_w = physics_package.window_material - thickness_w = physics_package.window_thickness # in microns - if material_w is None or np.isclose(thickness_w, 0): - return np.ones(self.shape) - - # in microns^-1 - mu_w = 1.0 / physics_package.window_absorption_length(energy) - return np.exp(-thickness_w * mu_w * secb) - - def calc_compton_transmission( - self, - seca: np.ndarray, - secb: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - pp_layer: str, - ) -> np.ndarray: - - if pp_layer == 'sample': - formula = physics_package.sample_material - density = physics_package.sample_density - thickness = physics_package.sample_thickness - mu = 1.0 / physics_package.sample_absorption_length(energy) - mu_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, - density, - formula, - ) - elif pp_layer == 'window': - formula = physics_package.window_material - if formula is None: - return np.ones(self.shape) - - density = physics_package.window_density - thickness = physics_package.window_thickness - mu = 1.0 / physics_package.sample_absorption_length(energy) - mu_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, density, formula - ) - - if thickness <= 0: - return np.ones(self.shape) - - x1 = mu * thickness * seca - x2 = mu_prime * thickness * secb - num = np.exp(-x1) - np.exp(-x2) - return -num / (x1 - x2) - - def calc_compton_transmission_sample( - self, - seca: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - thickness_s = physics_package.sample_thickness # in microns - - mu_s = 1.0 / physics_package.sample_absorption_length(energy) - return np.exp(-mu_s * thickness_s * seca) - - def calc_compton_transmission_window( - self, - secb: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - formula = physics_package.window_material - if formula is None: - return np.ones(self.shape) - - density = physics_package.window_density # in g/cc - thickness_w = physics_package.window_thickness # in microns - - mu_w_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, density, formula - ) - return np.exp(-mu_w_prime * thickness_w * secb) - - def calc_effective_pinhole_area( - self, physics_package: AbstractPhysicsPackage - ) -> np.array: - """get the effective pinhole area correction""" - if np.isclose(physics_package.pinhole_diameter, 0) or np.isclose( - physics_package.pinhole_thickness, 0 - ): - return np.ones(self.shape) - - hod = ( - physics_package.pinhole_thickness - / physics_package.pinhole_diameter - ) - bvec = self.bvec - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cth = -dvecs[:, 2].reshape(self.shape) - tanth = np.tan(np.arccos(cth)) - f = hod * tanth - f[np.abs(f) > 1.0] = np.nan - asinf = np.arcsin(f) - return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) - - def calc_transmission_generic( - self, - secb: np.array, - thickness: np.floating, - absorption_length: np.floating, - ) -> np.array: - if np.isclose(thickness, 0): - return np.ones(self.shape) - - mu = 1.0 / absorption_length # in microns^-1 - return np.exp(-thickness * mu * secb) - - def calc_transmission_phosphor( - self, - secb: np.array, - thickness: np.floating, - readout_length: np.floating, - absorption_length: np.floating, - energy: np.floating, - pre_U0: np.floating, - ) -> np.array: - if np.isclose(thickness, 0): - return np.ones(self.shape) - - f1 = absorption_length * thickness - f2 = absorption_length * readout_length - arg = secb + 1 / f2 - return pre_U0 * energy * ((1.0 - np.exp(-f1 * arg)) / arg) - - -# ============================================================================= -# UTILITY METHODS -# ============================================================================= - - -def _fix_indices(idx, lo, hi): - nidx = np.array(idx) - off_lo = nidx < lo - off_hi = nidx > hi - nidx[off_lo] = lo - nidx[off_hi] = hi - return nidx - - -def _row_edge_vec(rows, pixel_size_row): - return pixel_size_row * (0.5 * rows - np.arange(rows + 1)) - - -def _col_edge_vec(cols, pixel_size_col): - return pixel_size_col * (np.arange(cols + 1) - 0.5 * cols) - - -# FIXME find a better place for this, and maybe include loop over pixels -@numba.njit(nogil=True, cache=True) -def _solid_angle_of_triangle(vtx_list): - norms = np.sqrt(np.sum(vtx_list * vtx_list, axis=1)) - norms_prod = norms[0] * norms[1] * norms[2] - scalar_triple_product = np.dot( - vtx_list[0], np.cross(vtx_list[2], vtx_list[1]) - ) - denominator = ( - norms_prod - + norms[0] * np.dot(vtx_list[1], vtx_list[2]) - + norms[1] * np.dot(vtx_list[2], vtx_list[0]) - + norms[2] * np.dot(vtx_list[0], vtx_list[1]) - ) - - return 2.0 * np.arctan2(scalar_triple_product, denominator) diff --git a/hexrd/laue/instrument/hedm_instrument.py b/hexrd/laue/instrument/hedm_instrument.py deleted file mode 100644 index 520586804..000000000 --- a/hexrd/laue/instrument/hedm_instrument.py +++ /dev/null @@ -1,3007 +0,0 @@ -# -*- coding: utf-8 -*- -# ============================================================================= -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. -# All rights reserved. -# -# This file is part of HEXRD. For details on dowloading the source, -# see the file COPYING. -# -# Please also see the file LICENSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License (as published by the Free -# Software Foundation) version 2.1 dated February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program (see file LICENSE); if not, write to -# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, -# Boston, MA 02111-1307 USA or visit . -# ============================================================================= -""" -Created on Fri Dec 9 13:05:27 2016 - -@author: bernier2 -""" -from contextlib import contextmanager -import copy -import logging -import os -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor -from functools import partial -from typing import Optional - -from tqdm import tqdm - -import yaml - -import h5py - -import numpy as np - -from io import IOBase - -from scipy import ndimage -from scipy.linalg import logm -from skimage.measure import regionprops - -from hexrd.core import constants -from hexrd.core.imageseries import ImageSeries -from hexrd.core.imageseries.process import ProcessedImageSeries -from hexrd.core.imageseries.omega import OmegaImageSeries -from hexrd.core.fitting.utils import fit_ring -from hexrd.core.gridutil import make_tolerance_grid -from hexrd.core import matrixutil as mutil -from hexrd.core.transforms.xfcapi import ( - angles_to_gvec, - gvec_to_xy, - make_sample_rmat, - make_rmat_of_expmap, - unit_vector, -) - -# TODO: Resolve extra-workflow dependency -from hexrd.hedm import xrdutil -from hexrd.powder.material.crystallography import PlaneData -from hexrd.core import constants as ct -from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler, mapAngle -from hexrd.core import distortion as distortion_pkg -from hexrd.core.utils.concurrent import distribute_tasks -from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict -from hexrd.core.utils.yaml import NumpyToNativeDumper -from hexrd.core.valunits import valWUnit -from hexrd.powder.wppf import LeBail - -from hexrd.core.instrument.cylindrical_detector import CylindricalDetector -from hexrd.core.instrument.detector import ( - beam_energy_DFLT, - max_workers_DFLT, - Detector, -) -from hexrd.core.instrument.planar_detector import PlanarDetector - -from skimage.draw import polygon -from skimage.util import random_noise -from hexrd.powder.wppf import wppfsupport - -try: - from fast_histogram import histogram1d - - fast_histogram = True -except ImportError: - from numpy import histogram as histogram1d - - fast_histogram = False - -logger = logging.getLogger() -logger.setLevel('INFO') - -# ============================================================================= -# PARAMETERS -# ============================================================================= - -instrument_name_DFLT = 'instrument' - -beam_vec_DFLT = ct.beam_vec -source_distance_DFLT = np.inf - -eta_vec_DFLT = ct.eta_vec - -panel_id_DFLT = 'generic' -nrows_DFLT = 2048 -ncols_DFLT = 2048 -pixel_size_DFLT = (0.2, 0.2) - -tilt_params_DFLT = np.zeros(3) -t_vec_d_DFLT = np.r_[0.0, 0.0, -1000.0] - -chi_DFLT = 0.0 -t_vec_s_DFLT = np.zeros(3) - -multi_ims_key = ct.shared_ims_key -ims_classes = (ImageSeries, ProcessedImageSeries, OmegaImageSeries) - -buffer_key = 'buffer' -distortion_key = 'distortion' - -# ============================================================================= -# UTILITY METHODS -# ============================================================================= - - -def generate_chunks( - nrows, ncols, base_nrows, base_ncols, row_gap=0, col_gap=0 -): - """ - Generate chunking data for regularly tiled composite detectors. - - Parameters - ---------- - nrows : int - DESCRIPTION. - ncols : int - DESCRIPTION. - base_nrows : int - DESCRIPTION. - base_ncols : int - DESCRIPTION. - row_gap : int, optional - DESCRIPTION. The default is 0. - col_gap : int, optional - DESCRIPTION. The default is 0. - - Returns - ------- - rects : array_like - The (nrows*ncols, ) list of ROI specs (see Notes). - labels : array_like - The (nrows*ncols, ) list of ROI (i, j) matrix indexing labels 'i_j'. - - Notes - ----- - ProcessedImageSeries needs a (2, 2) array for the 'rect' kwarg: - [[row_start, row_stop], - [col_start, col_stop]] - """ - row_starts = np.array([i * (base_nrows + row_gap) for i in range(nrows)]) - col_starts = np.array([i * (base_ncols + col_gap) for i in range(ncols)]) - rr = np.vstack([row_starts, row_starts + base_nrows]) - cc = np.vstack([col_starts, col_starts + base_ncols]) - rects = [] - labels = [] - for i in range(nrows): - for j in range(ncols): - this_rect = np.array([[rr[0, i], rr[1, i]], [cc[0, j], cc[1, j]]]) - rects.append(this_rect) - labels.append('%d_%d' % (i, j)) - return rects, labels - - -def chunk_instrument(instr, rects, labels, use_roi=False): - """ - Generate chunked config fro regularly tiled composite detectors. - - Parameters - ---------- - instr : TYPE - DESCRIPTION. - rects : TYPE - DESCRIPTION. - labels : TYPE - DESCRIPTION. - - Returns - ------- - new_icfg_dict : TYPE - DESCRIPTION. - - """ - icfg_dict = instr.write_config() - new_icfg_dict = dict( - beam=icfg_dict['beam'], - oscillation_stage=icfg_dict['oscillation_stage'], - detectors={}, - ) - for panel_id, panel in instr.detectors.items(): - pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] - - for pnum, pdata in enumerate(zip(rects, labels)): - rect, label = pdata - panel_name = f'{panel_id}_{label}' - - row_col_dim = np.diff(rect) # (2, 1) - shape = tuple(row_col_dim.flatten()) - center = rect[:, 0].reshape(2, 1) + 0.5 * row_col_dim - - sp_tvec = np.concatenate( - [panel.pixelToCart(center.T).flatten(), np.zeros(1)] - ) - - tvec = np.dot(panel.rmat, sp_tvec) + panel.tvec - - # new config dict - tmp_cfg = copy.deepcopy(pcfg_dict) - - # fix sizes - tmp_cfg['pixels']['rows'] = shape[0] - tmp_cfg['pixels']['columns'] = shape[1] - if use_roi: - tmp_cfg['pixels']['roi'] = (rect[0][0], rect[1][0]) - - # update tvec - tmp_cfg['transform']['translation'] = tvec.tolist() - - new_icfg_dict['detectors'][panel_name] = copy.deepcopy(tmp_cfg) - - if panel.panel_buffer is not None: - if panel.panel_buffer.ndim == 2: # have a mask array! - submask = panel.panel_buffer[ - rect[0, 0] : rect[0, 1], rect[1, 0] : rect[1, 1] - ] - new_icfg_dict['detectors'][panel_name]['buffer'] = submask - return new_icfg_dict - - -def _parse_imgser_dict(imgser_dict, det_key, roi=None): - """ - Associates a dict of imageseries to the target panel(s). - - Parameters - ---------- - imgser_dict : dict - The input dict of imageseries. Either `det_key` is in imgser_dict, or - the shared key is. Entries can be an ImageSeries object or a 2- or 3-d - ndarray of images. - det_key : str - The target detector key. - roi : tuple or None, optional - The roi of the target images. Format is - ((row_start, row_stop), (col_start, col_stop)) - The stops are used in the normal sense of a slice. The default is None. - - Raises - ------ - RuntimeError - If niether `det_key` nor the shared key is in the input imgser_dict; - Also, if the shared key is specified but the roi is None. - - Returns - ------- - ims : hexrd.core.imageseries - The desired imageseries object. - - """ - # grab imageseries for this detector - try: - ims = imgser_dict[det_key] - except KeyError: - matched_det_keys = [det_key in k for k in imgser_dict] - if multi_ims_key in imgser_dict: - images_in = imgser_dict[multi_ims_key] - elif np.any(matched_det_keys): - if sum(matched_det_keys) != 1: - raise RuntimeError(f"multiple entries found for '{det_key}'") - # use boolean array to index the proper key - # !!! these should be in the same order - img_keys = img_keys = np.asarray(list(imgser_dict.keys())) - matched_det_key = img_keys[matched_det_keys][0] # !!! only one - images_in = imgser_dict[matched_det_key] - else: - raise RuntimeError( - f"neither '{det_key}' nor '{multi_ims_key}' found" - + 'in imageseries input' - ) - - # have images now - if roi is None: - raise RuntimeError( - "roi must be specified to use shared imageseries" - ) - - if isinstance(images_in, ims_classes): - # input is an imageseries of some kind - ims = ProcessedImageSeries( - images_in, - [ - ('rectangle', roi), - ], - ) - if isinstance(images_in, OmegaImageSeries): - # if it was an OmegaImageSeries, must re-cast - ims = OmegaImageSeries(ims) - elif isinstance(images_in, np.ndarray): - # 2- or 3-d array of images - ndim = images_in.ndim - if ndim == 2: - ims = images_in[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]] - elif ndim == 3: - nrows = roi[0][1] - roi[0][0] - ncols = roi[1][1] - roi[1][0] - n_images = len(images_in) - ims = np.empty((n_images, nrows, ncols), dtype=images_in.dtype) - for i, image in images_in: - ims[i, :, :] = images_in[ - roi[0][0] : roi[0][1], roi[1][0] : roi[1][1] - ] - else: - raise RuntimeError( - f"image input dim must be 2 or 3; you gave {ndim}" - ) - return ims - - -def calc_beam_vec(azim, pola): - """ - Calculate unit beam propagation vector from - spherical coordinate spec in DEGREES. - - ...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL! - """ - tht = np.radians(azim) - phi = np.radians(pola) - bv = np.r_[ - np.sin(phi) * np.cos(tht), np.cos(phi), np.sin(phi) * np.sin(tht) - ] - return -bv - - -def calc_angles_from_beam_vec(bvec): - """ - Return the azimuth and polar angle from a beam - vector - """ - bvec = np.atleast_1d(bvec).flatten() - nvec = unit_vector(-bvec) - azim = float(np.degrees(np.arctan2(nvec[2], nvec[0]))) - pola = float(np.degrees(np.arccos(nvec[1]))) - return azim, pola - - -def migrate_instrument_config(instrument_config): - """utility function to generate old instrument config dictionary""" - cfg_list = [] - for detector_id in instrument_config['detectors']: - cfg_list.append( - dict( - detector=instrument_config['detectors'][detector_id], - oscillation_stage=instrument_config['oscillation_stage'], - ) - ) - return cfg_list - - -def angle_in_range(angle, ranges, ccw=True, units='degrees'): - """ - Return the index of the first wedge the angle is found in - - WARNING: always clockwise; assumes wedges are not overlapping - """ - tau = 360.0 - if units.lower() == 'radians': - tau = 2 * np.pi - w = np.nan - for i, wedge in enumerate(ranges): - amin = wedge[0] - amax = wedge[1] - check = amin + np.mod(angle - amin, tau) - if check < amax: - w = i - break - return w - - -# ???: move to gridutil? -def centers_of_edge_vec(edges): - assert np.asarray(edges).ndim == 1, "edges must be 1-d" - return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0) - - -def max_tth(instr): - """ - Return the maximum Bragg angle (in radians) subtended by the instrument. - - Parameters - ---------- - instr : hexrd.hedm.instrument.HEDMInstrument instance - the instrument class to evalutate. - - Returns - ------- - tth_max : float - The maximum observable Bragg angle by the instrument in radians. - """ - tth_max = 0.0 - for det in instr.detectors.values(): - ptth, peta = det.pixel_angles() - tth_max = max(np.max(ptth), tth_max) - return tth_max - - -def pixel_resolution(instr): - """ - Return the minimum, median, and maximum angular - resolution of the instrument. - - Parameters - ---------- - instr : HEDMInstrument instance - An instrument. - - Returns - ------- - tth_stats : float - min/median/max tth resolution in radians. - eta_stats : TYPE - min/median/max eta resolution in radians. - - """ - max_tth = np.inf - max_eta = np.inf - min_tth = -np.inf - min_eta = -np.inf - ang_ps_full = [] - for panel in instr.detectors.values(): - angps = panel.angularPixelSize( - np.stack(panel.pixel_coords, axis=0) - .reshape(2, np.cumprod(panel.shape)[-1]) - .T - ) - ang_ps_full.append(angps) - max_tth = min(max_tth, np.min(angps[:, 0])) - max_eta = min(max_eta, np.min(angps[:, 1])) - min_tth = max(min_tth, np.max(angps[:, 0])) - min_eta = max(min_eta, np.max(angps[:, 1])) - med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten() - return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta) - - -def max_resolution(instr): - """ - Return the maximum angular resolution of the instrument. - - Parameters - ---------- - instr : HEDMInstrument instance - An instrument. - - Returns - ------- - max_tth : float - Maximum tth resolution in radians. - max_eta : TYPE - maximum eta resolution in radians. - - """ - max_tth = np.inf - max_eta = np.inf - for panel in instr.detectors.values(): - angps = panel.angularPixelSize( - np.stack(panel.pixel_coords, axis=0) - .reshape(2, np.cumprod(panel.shape)[-1]) - .T - ) - max_tth = min(max_tth, np.min(angps[:, 0])) - max_eta = min(max_eta, np.min(angps[:, 1])) - return max_tth, max_eta - - -def _gaussian_dist(x, cen, fwhm): - sigm = fwhm / (2 * np.sqrt(2 * np.log(2))) - return np.exp(-0.5 * (x - cen) ** 2 / sigm**2) - - -def _sigma_to_fwhm(sigm): - return sigm * ct.sigma_to_fwhm - - -def _fwhm_to_sigma(fwhm): - return fwhm / ct.sigma_to_fwhm - - -# ============================================================================= -# CLASSES -# ============================================================================= - - -class HEDMInstrument(object): - """ - Abstraction of XRD instrument. - - * Distortion needs to be moved to a class with registry; tuple unworkable - * where should reference eta be defined? currently set to default config - """ - - def __init__( - self, - instrument_config=None, - image_series=None, - eta_vector=None, - instrument_name=None, - tilt_calibration_mapping=None, - max_workers=max_workers_DFLT, - physics_package=None, - active_beam_name: Optional[str] = None, - ): - self._id = instrument_name_DFLT - - self._active_beam_name = active_beam_name - self._beam_dict = {} - - if eta_vector is None: - self._eta_vector = eta_vec_DFLT - else: - self._eta_vector = eta_vector - - self.max_workers = max_workers - - self.physics_package = physics_package - - if instrument_config is None: - # Default instrument - if instrument_name is not None: - self._id = instrument_name - self._num_panels = 1 - self._create_default_beam() - - # FIXME: must add cylindrical - self._detectors = dict( - panel_id_DFLT=PlanarDetector( - rows=nrows_DFLT, - cols=ncols_DFLT, - pixel_size=pixel_size_DFLT, - tvec=t_vec_d_DFLT, - tilt=tilt_params_DFLT, - bvec=self.beam_vector, - xrs_dist=self.source_distance, - evec=self._eta_vector, - distortion=None, - roi=None, - group=None, - max_workers=self.max_workers, - ), - ) - - self._tvec = t_vec_s_DFLT - self._chi = chi_DFLT - else: - if isinstance(instrument_config, h5py.File): - tmp = {} - unwrap_h5_to_dict(instrument_config, tmp) - instrument_config = tmp['instrument'] - elif not isinstance(instrument_config, dict): - raise RuntimeError( - "instrument_config must be either an HDF5 file object" - + "or a dictionary. You gave a %s" - % type(instrument_config) - ) - if instrument_name is None: - if 'id' in instrument_config: - self._id = instrument_config['id'] - else: - self._id = instrument_name - - self._num_panels = len(instrument_config['detectors']) - - if instrument_config.get('physics_package', None) is not None: - self.physics_package = instrument_config['physics_package'] - - xrs_config = instrument_config['beam'] - is_single_beam = 'energy' in xrs_config and 'vector' in xrs_config - if is_single_beam: - # Assume single beam. Load the same way as multibeam - self._create_default_beam() - xrs_config = {self.active_beam_name: xrs_config} - - # Multi beam load - for beam_name, beam in xrs_config.items(): - self._beam_dict[beam_name] = { - 'energy': beam['energy'], - 'vector': calc_beam_vec( - beam['vector']['azimuth'], - beam['vector']['polar_angle'], - ), - 'distance': beam.get('source_distance', np.inf), - } - - # Set the active beam name if not set already - if self._active_beam_name is None: - self._active_beam_name = next(iter(self._beam_dict)) - - # now build detector dict - detectors_config = instrument_config['detectors'] - det_dict = dict.fromkeys(detectors_config) - for det_id, det_info in detectors_config.items(): - det_group = det_info.get('group') # optional detector group - pixel_info = det_info['pixels'] - affine_info = det_info['transform'] - detector_type = det_info.get('detector_type', 'planar') - filter = det_info.get('filter', None) - coating = det_info.get('coating', None) - phosphor = det_info.get('phosphor', None) - try: - saturation_level = det_info['saturation_level'] - except KeyError: - saturation_level = 2**16 - shape = (pixel_info['rows'], pixel_info['columns']) - - panel_buffer = None - if buffer_key in det_info: - det_buffer = det_info[buffer_key] - if det_buffer is not None: - if isinstance(det_buffer, np.ndarray): - if det_buffer.ndim == 2: - if det_buffer.shape != shape: - msg = ( - f'Buffer shape for {det_id} ' - f'({det_buffer.shape}) does not match ' - f'detector shape ({shape})' - ) - raise BufferShapeMismatchError(msg) - else: - assert len(det_buffer) == 2 - panel_buffer = det_buffer - elif isinstance(det_buffer, list): - panel_buffer = np.asarray(det_buffer) - elif np.isscalar(det_buffer): - panel_buffer = det_buffer * np.ones(2) - else: - raise RuntimeError( - "panel buffer spec invalid for %s" % det_id - ) - - # optional roi - roi = pixel_info.get('roi') - - # handle distortion - distortion = None - if distortion_key in det_info: - distortion_cfg = det_info[distortion_key] - if distortion_cfg is not None: - try: - func_name = distortion_cfg['function_name'] - dparams = distortion_cfg['parameters'] - distortion = distortion_pkg.get_mapping( - func_name, dparams - ) - except KeyError: - raise RuntimeError( - "problem with distortion specification" - ) - if detector_type.lower() not in DETECTOR_TYPES: - msg = f'Unknown detector type: {detector_type}' - raise NotImplementedError(msg) - - DetectorClass = DETECTOR_TYPES[detector_type.lower()] - kwargs = dict( - name=det_id, - rows=pixel_info['rows'], - cols=pixel_info['columns'], - pixel_size=pixel_info['size'], - panel_buffer=panel_buffer, - saturation_level=saturation_level, - tvec=affine_info['translation'], - tilt=affine_info['tilt'], - bvec=self.beam_vector, - xrs_dist=self.source_distance, - evec=self._eta_vector, - distortion=distortion, - roi=roi, - group=det_group, - max_workers=self.max_workers, - detector_filter=filter, - detector_coating=coating, - phosphor=phosphor, - ) - - if DetectorClass is CylindricalDetector: - # Add cylindrical detector kwargs - kwargs['radius'] = det_info.get('radius', 49.51) - - det_dict[det_id] = DetectorClass(**kwargs) - - self._detectors = det_dict - - self._tvec = np.r_[ - instrument_config['oscillation_stage']['translation'] - ] - self._chi = instrument_config['oscillation_stage']['chi'] - - # grab angles from beam vec - # !!! these are in DEGREES! - azim, pola = calc_angles_from_beam_vec(self.beam_vector) - - self.update_memoization_sizes() - - @property - def mean_detector_center(self) -> np.ndarray: - """Return the mean center for all detectors""" - centers = np.array([panel.tvec for panel in self.detectors.values()]) - return centers.sum(axis=0) / len(centers) - - def mean_group_center(self, group: str) -> np.ndarray: - """Return the mean center for detectors belonging to a group""" - centers = np.array( - [x.tvec for x in self.detectors_in_group(group).values()] - ) - return centers.sum(axis=0) / len(centers) - - @property - def detector_groups(self) -> list[str]: - groups = [] - for panel in self.detectors.values(): - group = panel.group - if group is not None and group not in groups: - groups.append(group) - - return groups - - def detectors_in_group(self, group: str) -> dict[str, Detector]: - return {k: v for k, v in self.detectors.items() if v.group == group} - - # properties for physical size of rectangular detector - @property - def id(self): - return self._id - - @property - def num_panels(self): - return self._num_panels - - @property - def detectors(self): - return self._detectors - - @property - def detector_parameters(self): - pdict = {} - for key, panel in self.detectors.items(): - pdict[key] = panel.config_dict( - self.chi, - self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - return pdict - - @property - def tvec(self): - return self._tvec - - @tvec.setter - def tvec(self, x): - x = np.array(x).flatten() - assert len(x) == 3, 'input must have length = 3' - self._tvec = x - - @property - def chi(self): - return self._chi - - @chi.setter - def chi(self, x): - self._chi = float(x) - - @property - def beam_energy(self) -> float: - return self.active_beam['energy'] - - @beam_energy.setter - def beam_energy(self, x: float): - self.active_beam['energy'] = float(x) - self.beam_dict_modified() - - @property - def beam_wavelength(self): - return ct.keVToAngstrom(self.beam_energy) - - @property - def has_multi_beam(self) -> bool: - return len(self.beam_dict) > 1 - - @property - def beam_dict(self) -> dict: - return self._beam_dict - - def _create_default_beam(self): - name = 'XRS1' - self._beam_dict[name] = { - 'energy': beam_energy_DFLT, - 'vector': beam_vec_DFLT.copy(), - 'distance': np.inf, - } - - if self._active_beam_name is None: - self._active_beam_name = name - - @property - def beam_names(self) -> list[str]: - return list(self.beam_dict) - - def xrs_beam_energy(self, beam_name: Optional[str]) -> float: - if beam_name is None: - beam_name = self.active_beam_name - - return self.beam_dict[beam_name]['energy'] - - @property - def active_beam_name(self) -> str: - return self._active_beam_name - - @active_beam_name.setter - def active_beam_name(self, name: str): - if self._active_beam_name not in self.beam_dict: - raise RuntimeError( - f'"{name}" is not present in "{self.beam_names}"' - ) - - self._active_beam_name = name - - # Update anything beam related where we need to - self._update_panel_beams() - - def beam_dict_modified(self): - # A function to call to indicate that the beam dict was modified. - # Update anything beam related where we need to - self._update_panel_beams() - - @property - def active_beam(self) -> dict: - return self.beam_dict[self.active_beam_name] - - def _update_panel_beams(self): - # FIXME: maybe we shouldn't store these on the panels? - # Might be hard to fix, though... - for panel in self.detectors.values(): - panel.bvec = self.beam_vector - panel.xrs_dist = self.source_distance - - @property - def beam_vector(self) -> np.ndarray: - return self.active_beam['vector'] - - @beam_vector.setter - def beam_vector(self, x: np.ndarray): - x = np.array(x).flatten() - if len(x) == 3: - assert ( - sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - bvec = x - elif len(x) == 2: - bvec = calc_beam_vec(*x) - else: - raise RuntimeError("input must be a unit vector or angle pair") - - # Modify the beam vector for the active beam dict - self.active_beam['vector'] = bvec - self.beam_dict_modified() - - @property - def source_distance(self): - return self.active_beam['distance'] - - @source_distance.setter - def source_distance(self, x): - assert np.isscalar( - x - ), f"'source_distance' must be a scalar; you input '{x}'" - self.active_beam['distance'] = x - self.beam_dict_modified() - - @property - def eta_vector(self): - return self._eta_vector - - @eta_vector.setter - def eta_vector(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._eta_vector = x - # ...maybe change dictionary item behavior for 3.x compatibility? - for detector_id in self.detectors: - panel = self.detectors[detector_id] - panel.evec = self._eta_vector - - # ========================================================================= - # METHODS - # ========================================================================= - - def write_config(self, file=None, style='yaml', calibration_dict={}): - """WRITE OUT YAML FILE""" - # initialize output dictionary - assert style.lower() in ['yaml', 'hdf5'], ( - "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style - ) - - par_dict = {} - - par_dict['id'] = self.id - - # Multi beam writer - beam_dict = {} - for beam_name, beam in self.beam_dict.items(): - azim, polar = calc_angles_from_beam_vec(beam['vector']) - beam_dict[beam_name] = { - 'energy': beam['energy'], - 'vector': { - 'azimuth': azim, - 'polar_angle': polar, - }, - } - if beam['distance'] != np.inf: - beam_dict[beam_name]['source_distance'] = beam['distance'] - - if len(beam_dict) == 1: - # Just write it out a single beam (classical way) - beam_dict = next(iter(beam_dict.values())) - - par_dict['beam'] = beam_dict - - if calibration_dict: - par_dict['calibration_crystal'] = calibration_dict - - ostage = dict(chi=self.chi, translation=self.tvec.tolist()) - par_dict['oscillation_stage'] = ostage - - det_dict = dict.fromkeys(self.detectors) - for det_name, detector in self.detectors.items(): - # grab panel config - # !!! don't need beam or tvec - # !!! have vetted style - pdict = detector.config_dict( - chi=self.chi, - tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style=style, - ) - det_dict[det_name] = pdict['detector'] - par_dict['detectors'] = det_dict - - # handle output file if requested - if file is not None: - if style.lower() == 'yaml': - with open(file, 'w') as f: - yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) - else: - - def _write_group(file): - instr_grp = file.create_group('instrument') - unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) - - # hdf5 - if isinstance(file, str): - with h5py.File(file, 'w') as f: - _write_group(f) - elif isinstance(file, h5py.File): - _write_group(file) - else: - raise TypeError("Unexpected file type.") - - return par_dict - - def extract_polar_maps( - self, - plane_data, - imgser_dict, - active_hkls=None, - threshold=None, - tth_tol=None, - eta_tol=0.25, - ): - """ - Extract eta-omega maps from an imageseries. - - Quick and dirty way to histogram angular patch data for make - pole figures suitable for fiber generation - - TODO: streamline projection code - TODO: normalization - !!!: images must be non-negative! - !!!: plane_data is NOT a copy! - """ - if tth_tol is not None: - plane_data.tThWidth = np.radians(tth_tol) - else: - tth_tol = np.degrees(plane_data.tThWidth) - - # make rings clipped to panel - # !!! eta_idx has the same length as plane_data.exclusions - # each entry are the integer indices into the bins - # !!! eta_edges is the list of eta bin EDGES; same for all - # detectors, so calculate it once - # !!! grab first panel - panel = next(iter(self.detectors.values())) - pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = ( - panel.make_powder_rings( - plane_data, - merge_hkls=False, - delta_eta=eta_tol, - full_output=True, - ) - ) - - if active_hkls is not None: - assert hasattr( - active_hkls, '__len__' - ), "active_hkls must be an iterable with __len__" - - # need to re-cast for element-wise operations - active_hkls = np.array(active_hkls) - - # these are all active reflection unique hklIDs - active_hklIDs = plane_data.getHKLID(plane_data.hkls, master=True) - - # find indices - idx = np.zeros_like(active_hkls, dtype=int) - for i, input_hklID in enumerate(active_hkls): - try: - idx[i] = np.where(active_hklIDs == input_hklID)[0] - except ValueError: - raise RuntimeError(f"hklID '{input_hklID}' is invalid") - tth_ranges = tth_ranges[idx] - - delta_eta = eta_edges[1] - eta_edges[0] - ncols_eta = len(eta_edges) - 1 - - ring_maps_panel = dict.fromkeys(self.detectors) - for i_d, det_key in enumerate(self.detectors): - print("working on detector '%s'..." % det_key) - - # grab panel - panel = self.detectors[det_key] - # native_area = panel.pixel_area # pixel ref area - - # pixel angular coords for the detector panel - ptth, peta = panel.pixel_angles() - - # grab imageseries for this detector - ims = _parse_imgser_dict(imgser_dict, det_key, roi=panel.roi) - - # grab omegas from imageseries and squawk if missing - try: - omegas = ims.metadata['omega'] - except KeyError: - raise RuntimeError( - f"imageseries for '{det_key}' has no omega info" - ) - - # initialize maps and assing by row (omega/frame) - nrows_ome = len(omegas) - - # init map with NaNs - shape = (len(tth_ranges), nrows_ome, ncols_eta) - ring_maps = np.full(shape, np.nan) - - # Generate ring parameters once, and re-use them for each image - ring_params = [] - for tthr in tth_ranges: - kwargs = { - 'tthr': tthr, - 'ptth': ptth, - 'peta': peta, - 'eta_edges': eta_edges, - 'delta_eta': delta_eta, - } - ring_params.append(_generate_ring_params(**kwargs)) - - # Divide up the images among processes - tasks = distribute_tasks(len(ims), self.max_workers) - func = partial( - _run_histograms, - ims=ims, - tth_ranges=tth_ranges, - ring_maps=ring_maps, - ring_params=ring_params, - threshold=threshold, - ) - - max_workers = self.max_workers - if max_workers == 1 or len(tasks) == 1: - # Just execute it serially. - for task in tasks: - func(task) - else: - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Evaluate the results via `list()`, so that if an - # exception is raised in a thread, it will be re-raised - # and visible to the user. - list(executor.map(func, tasks)) - - ring_maps_panel[det_key] = ring_maps - - return ring_maps_panel, eta_edges - - def extract_line_positions( - self, - plane_data, - imgser_dict, - tth_tol=None, - eta_tol=1.0, - npdiv=2, - eta_centers=None, - collapse_eta=True, - collapse_tth=False, - do_interpolation=True, - do_fitting=False, - tth_distortion=None, - fitting_kwargs=None, - ): - """ - Perform annular interpolation on diffraction images. - - Provides data for extracting the line positions from powder diffraction - images, pole figure patches from imageseries, or Bragg peaks from - Laue diffraction images. - - Parameters - ---------- - plane_data : hexrd.crystallography.PlaneData object or array_like - Object determining the 2theta positions for the integration - sectors. If PlaneData, this will be all non-excluded reflections, - subject to merging within PlaneData.tThWidth. If array_like, - interpreted as a list of 2theta angles IN DEGREES. - imgser_dict : dict - Dictionary of powder diffraction images, one for each detector. - tth_tol : scalar, optional - The radial (i.e. 2theta) width of the integration sectors - IN DEGREES. This arg is required if plane_data is array_like. - The default is None. - eta_tol : scalar, optional - The azimuthal (i.e. eta) width of the integration sectors - IN DEGREES. The default is 1. - npdiv : int, optional - The number of oversampling pixel subdivision (see notes). - The default is 2. - eta_centers : array_like, optional - The desired azimuthal sector centers. The default is None. If - None, then bins are distrubted sequentially from (-180, 180). - collapse_eta : bool, optional - Flag for summing sectors in eta. The default is True. - collapse_tth : bool, optional - Flag for summing sectors in 2theta. The default is False. - do_interpolation : bool, optional - If True, perform bilinear interpolation. The default is True. - do_fitting : bool, optional - If True, then perform spectrum fitting, and append the results - to the returned data. collapse_eta must also be True for this - to have any effect. The default is False. - tth_distortion : special class, optional - for special case of pinhole camera distortions. See - hexrd.hedm.xrdutil.phutil.SampleLayerDistortion (only type supported) - fitting_kwargs : dict, optional - kwargs passed to hexrd.core.fitting.utils.fit_ring if do_fitting is True - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - panel_data : dict - Dictionary over the detctors with the following structure: - [list over (merged) 2theta ranges] - [list over valid eta sectors] - [angle data , - bin intensities , - fitting results ] - - Notes - ----- - TODO: May change the array_like input units to degrees. - TODO: rename function. - - """ - - if fitting_kwargs is None: - fitting_kwargs = {} - - # ===================================================================== - # LOOP OVER DETECTORS - # ===================================================================== - logger.info("Interpolating ring data") - pbar_dets = partial( - tqdm, - total=self.num_panels, - desc="Detector", - position=self.num_panels, - ) - - # Split up the workers among the detectors - max_workers_per_detector = max(1, self.max_workers // self.num_panels) - - kwargs = { - 'plane_data': plane_data, - 'tth_tol': tth_tol, - 'eta_tol': eta_tol, - 'eta_centers': eta_centers, - 'npdiv': npdiv, - 'collapse_tth': collapse_tth, - 'collapse_eta': collapse_eta, - 'do_interpolation': do_interpolation, - 'do_fitting': do_fitting, - 'fitting_kwargs': fitting_kwargs, - 'tth_distortion': tth_distortion, - 'max_workers': max_workers_per_detector, - } - func = partial(_extract_detector_line_positions, **kwargs) - - def make_instr_cfg(panel): - return panel.config_dict( - chi=self.chi, - tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - - images = [] - for detector_id, panel in self.detectors.items(): - images.append( - _parse_imgser_dict(imgser_dict, detector_id, roi=panel.roi) - ) - - panels = [self.detectors[k] for k in self.detectors] - instr_cfgs = [make_instr_cfg(x) for x in panels] - pbp_array = np.arange(self.num_panels) - iter_args = zip(panels, instr_cfgs, images, pbp_array) - with ProcessPoolExecutor( - mp_context=constants.mp_context, max_workers=self.num_panels - ) as executor: - results = list(pbar_dets(executor.map(func, iter_args))) - - panel_data = {} - for det, res in zip(self.detectors, results): - panel_data[det] = res - - return panel_data - - def simulate_powder_pattern( - self, mat_list, params=None, bkgmethod=None, origin=None, noise=None - ): - """ - Generate powder diffraction iamges from specified materials. - - Parameters - ---------- - mat_list : array_like (n, ) - List of Material classes. - params : dict, optional - Dictionary of LeBail parameters (see Notes). The default is None. - bkgmethod : dict, optional - Background function specification. The default is None. - origin : array_like (3,), optional - Vector describing the origin of the diffrction volume. - The default is None, wiich is equivalent to [0, 0, 0]. - noise : str, optional - Flag describing type of noise to be applied. The default is None. - - Returns - ------- - img_dict : dict - Dictionary of diffraciton images over the detectors. - - Notes - ----- - TODO: add more controls for noise function. - TODO: modify hooks to LeBail parameters. - TODO: add optional volume fraction weights for phases in mat_list - """ - """ - >> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab, - saransh1@llnl.gov - >> @DATE: 01/22/2021 SS 1.0 original - >> @DETAILS: adding hook to WPPF class. this changes the input list - significantly - """ - if origin is None: - origin = self.tvec - origin = np.asarray(origin).squeeze() - assert len(origin) == 3, "origin must be a 3-element sequence" - - if bkgmethod is None: - bkgmethod = {'chebyshev': 3} - - ''' - if params is none, fill in some sane default values - only the first value is used. the rest of the values are - the upper, lower bounds and vary flag for refinement which - are not used but required for interfacing with WPPF - - zero_error : zero shift error - U, V, W : Cagliotti parameters - P, X, Y : Lorentzian parameters - eta1, eta2, eta3 : Mixing parameters - ''' - if params is None: - # params = {'zero_error': [0.0, -1., 1., True], - # 'U': [2e-1, -1., 1., True], - # 'V': [2e-2, -1., 1., True], - # 'W': [2e-2, -1., 1., True], - # 'X': [2e-1, -1., 1., True], - # 'Y': [2e-1, -1., 1., True] - # } - params = wppfsupport._generate_default_parameters_LeBail( - mat_list, - 1, - bkgmethod, - ) - ''' - use the material list to obtain the dictionary of initial intensities - we need to make sure that the intensities are properly scaled by the - lorentz polarization factor. since the calculation is done in the - LeBail class, all that means is the initial intensity needs that factor - in there - ''' - img_dict = dict.fromkeys(self.detectors) - - # find min and max tth over all panels - tth_mi = np.inf - tth_ma = 0.0 - ptth_dict = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - ptth, peta = panel.pixel_angles(origin=origin) - tth_mi = min(tth_mi, ptth.min()) - tth_ma = max(tth_ma, ptth.max()) - ptth_dict[det_key] = ptth - - ''' - now make a list of two theta and dummy ones for the experimental - spectrum this is never really used so any values should be okay. We - could also pas the integrated detector image if we would like to - simulate some realistic background. But thats for another day. - ''' - # convert angles to degrees because thats what the WPPF expects - tth_mi = np.degrees(tth_mi) - tth_ma = np.degrees(tth_ma) - - # get tth angular resolution for instrument - ang_res = max_resolution(self) - - # !!! calc nsteps by oversampling - nsteps = int(np.ceil(2 * (tth_ma - tth_mi) / np.degrees(ang_res[0]))) - - # evaulation vector for LeBail - tth = np.linspace(tth_mi, tth_ma, nsteps) - - expt = np.vstack([tth, np.ones_like(tth)]).T - - wavelength = [ - valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), - 1.0, - ] - - ''' - now go through the material list and get the intensity dictionary - ''' - intensity = {} - for mat in mat_list: - - multiplicity = mat.planeData.getMultiplicity() - - tth = mat.planeData.getTTh() - - LP = ( - (1 + np.cos(tth) ** 2) - / np.cos(0.5 * tth) - / np.sin(0.5 * tth) ** 2 - ) - - intensity[mat.name] = {} - intensity[mat.name]['synchrotron'] = ( - mat.planeData.structFact * LP * multiplicity - ) - - kwargs = { - 'expt_spectrum': expt, - 'params': params, - 'phases': mat_list, - 'wavelength': {'synchrotron': wavelength}, - 'bkgmethod': bkgmethod, - 'intensity_init': intensity, - 'peakshape': 'pvtch', - } - - self.WPPFclass = LeBail(**kwargs) - - self.simulated_spectrum = self.WPPFclass.spectrum_sim - self.background = self.WPPFclass.background - - ''' - now that we have the simulated intensities, its time to get the - two theta for the detector pixels and interpolate what the intensity - for each pixel should be - ''' - - img_dict = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - ptth = ptth_dict[det_key] - - img = np.interp( - np.degrees(ptth), - self.simulated_spectrum.x, - self.simulated_spectrum.y + self.background.y, - ) - - if noise is None: - img_dict[det_key] = img - - else: - # Rescale to be between 0 and 1 so random_noise() will work - prev_max = img.max() - img /= prev_max - - if noise.lower() == 'poisson': - im_noise = random_noise(img, mode='poisson', clip=True) - mi = im_noise.min() - ma = im_noise.max() - if ma > mi: - im_noise = (im_noise - mi) / (ma - mi) - - elif noise.lower() == 'gaussian': - im_noise = random_noise(img, mode='gaussian', clip=True) - - elif noise.lower() == 'salt': - im_noise = random_noise(img, mode='salt') - - elif noise.lower() == 'pepper': - im_noise = random_noise(img, mode='pepper') - - elif noise.lower() == 's&p': - im_noise = random_noise(img, mode='s&p') - - elif noise.lower() == 'speckle': - im_noise = random_noise(img, mode='speckle', clip=True) - - # Now scale back up - img_dict[det_key] = im_noise * prev_max - - return img_dict - - def simulate_laue_pattern( - self, - crystal_data, - minEnergy=5.0, - maxEnergy=35.0, - rmat_s=None, - grain_params=None, - ): - """ - Simulate Laue diffraction over the instrument. - - Parameters - ---------- - crystal_data : TYPE - DESCRIPTION. - minEnergy : TYPE, optional - DESCRIPTION. The default is 5.. - maxEnergy : TYPE, optional - DESCRIPTION. The default is 35.. - rmat_s : TYPE, optional - DESCRIPTION. The default is None. - grain_params : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - results : TYPE - DESCRIPTION. - - xy_det, hkls_in, angles, dspacing, energy - - TODO: revisit output; dict, or concatenated list? - """ - results = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - results[det_key] = panel.simulate_laue_pattern( - crystal_data, - minEnergy=minEnergy, - maxEnergy=maxEnergy, - rmat_s=rmat_s, - tvec_s=self.tvec, - grain_params=grain_params, - beam_vec=self.beam_vector, - ) - return results - - def simulate_rotation_series( - self, - plane_data, - grain_param_list, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_ranges=[ - (-np.pi, np.pi), - ], - ome_period=(-np.pi, np.pi), - wavelength=None, - ): - """ - Simulate a monochromatic rotation series over the instrument. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_param_list : TYPE - DESCRIPTION. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - wavelength : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - results : TYPE - DESCRIPTION. - - TODO: revisit output; dict, or concatenated list? - """ - results = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - results[det_key] = panel.simulate_rotation_series( - plane_data, - grain_param_list, - eta_ranges=eta_ranges, - ome_ranges=ome_ranges, - ome_period=ome_period, - chi=self.chi, - tVec_s=self.tvec, - wavelength=wavelength, - ) - return results - - def pull_spots( - self, - plane_data, - grain_params, - imgser_dict, - tth_tol=0.25, - eta_tol=1.0, - ome_tol=1.0, - npdiv=2, - threshold=10, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_period=None, - dirname='results', - filename=None, - output_format='text', - return_spot_list=False, - quiet=True, - check_only=False, - interp='nearest', - ): - """ - Exctract reflection info from a rotation series. - - Input must be encoded as an OmegaImageseries object. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_params : TYPE - DESCRIPTION. - imgser_dict : TYPE - DESCRIPTION. - tth_tol : TYPE, optional - DESCRIPTION. The default is 0.25. - eta_tol : TYPE, optional - DESCRIPTION. The default is 1.. - ome_tol : TYPE, optional - DESCRIPTION. The default is 1.. - npdiv : TYPE, optional - DESCRIPTION. The default is 2. - threshold : TYPE, optional - DESCRIPTION. The default is 10. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - dirname : TYPE, optional - DESCRIPTION. The default is 'results'. - filename : TYPE, optional - DESCRIPTION. The default is None. - output_format : TYPE, optional - DESCRIPTION. The default is 'text'. - return_spot_list : TYPE, optional - DESCRIPTION. The default is False. - quiet : TYPE, optional - DESCRIPTION. The default is True. - check_only : TYPE, optional - DESCRIPTION. The default is False. - interp : TYPE, optional - DESCRIPTION. The default is 'nearest'. - - Returns - ------- - compl : TYPE - DESCRIPTION. - output : TYPE - DESCRIPTION. - - """ - # grain parameters - rMat_c = make_rmat_of_expmap(grain_params[:3]) - tVec_c = grain_params[3:6] - - # grab omega ranges from first imageseries - # - # WARNING: all imageseries AND all wedges within are assumed to have - # the same omega values; put in a check that they are all the same??? - oims0 = next(iter(imgser_dict.values())) - ome_ranges = [ - np.radians([i['ostart'], i['ostop']]) - for i in oims0.omegawedges.wedges - ] - if ome_period is None: - ims = next(iter(imgser_dict.values())) - ostart = ims.omega[0, 0] - ome_period = np.radians(ostart + np.r_[0.0, 360.0]) - - # delta omega in DEGREES grabbed from first imageseries in the dict - delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] - - # make omega grid for frame expansion around reference frame - # in DEGREES - ndiv_ome, ome_del = make_tolerance_grid( - delta_ome, - ome_tol, - 1, - adjust_window=True, - ) - - # generate structuring element for connected component labeling - if ndiv_ome == 1: - label_struct = ndimage.generate_binary_structure(2, 2) - else: - label_struct = ndimage.generate_binary_structure(3, 3) - - # simulate rotation series - sim_results = self.simulate_rotation_series( - plane_data, - [ - grain_params, - ], - eta_ranges=eta_ranges, - ome_ranges=ome_ranges, - ome_period=ome_period, - ) - - # patch vertex generator (global for instrument) - tol_vec = 0.5 * np.radians( - [ - -tth_tol, - -eta_tol, - -tth_tol, - eta_tol, - tth_tol, - eta_tol, - tth_tol, - -eta_tol, - ] - ) - - # prepare output if requested - if filename is not None and output_format.lower() == 'hdf5': - this_filename = os.path.join(dirname, filename) - writer = GrainDataWriter_h5( - os.path.join(dirname, filename), - self.write_config(), - grain_params, - ) - - # ===================================================================== - # LOOP OVER PANELS - # ===================================================================== - iRefl = 0 - next_invalid_peak_id = -100 - compl = [] - output = dict.fromkeys(self.detectors) - for detector_id, panel in self.detectors.items(): - # initialize text-based output writer - if filename is not None and output_format.lower() == 'text': - output_dir = os.path.join(dirname, detector_id) - os.makedirs(output_dir, exist_ok=True) - this_filename = os.path.join(output_dir, filename) - writer = PatchDataWriter(this_filename) - - # grab panel - instr_cfg = panel.config_dict( - self.chi, - self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - native_area = panel.pixel_area # pixel ref area - - # pull out the OmegaImageSeries for this panel from input dict - ome_imgser = _parse_imgser_dict( - imgser_dict, detector_id, roi=panel.roi - ) - - # extract simulation results - sim_results_p = sim_results[detector_id] - hkl_ids = sim_results_p[0][0] - hkls_p = sim_results_p[1][0] - ang_centers = sim_results_p[2][0] - xy_centers = sim_results_p[3][0] - ang_pixel_size = sim_results_p[4][0] - - # now verify that full patch falls on detector... - # ???: strictly necessary? - # - # patch vertex array from sim - nangs = len(ang_centers) - patch_vertices = ( - np.tile(ang_centers[:, :2], (1, 4)) - + np.tile(tol_vec, (nangs, 1)) - ).reshape(4 * nangs, 2) - ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape( - len(patch_vertices), 1 - ) - - # find vertices that all fall on the panel - det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( - np.hstack([patch_vertices, ome_dupl]), - panel.rmat, - rMat_c, - self.chi, - panel.tvec, - tVec_c, - self.tvec, - panel.distortion, - ) - _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) - - # all vertices must be on... - patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) - patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] - - # re-filter... - hkl_ids = hkl_ids[patch_is_on] - hkls_p = hkls_p[patch_is_on, :] - ang_centers = ang_centers[patch_is_on, :] - xy_centers = xy_centers[patch_is_on, :] - ang_pixel_size = ang_pixel_size[patch_is_on, :] - - # TODO: add polygon testing right here! - # done - if check_only: - patch_output = [] - for i_pt, angs in enumerate(ang_centers): - # the evaluation omegas; - # expand about the central value using tol vector - ome_eval = np.degrees(angs[2]) + ome_del - - # ...vectorize the omega_to_frame function to avoid loop? - frame_indices = [ - ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval - ] - if -1 in frame_indices: - if not quiet: - msg = """ - window for (%d %d %d) falls outside omega range - """ % tuple( - hkls_p[i_pt, :] - ) - print(msg) - continue - else: - these_vertices = patch_xys[i_pt] - ijs = panel.cartToPixel(these_vertices) - ii, jj = polygon(ijs[:, 0], ijs[:, 1]) - contains_signal = False - for i_frame in frame_indices: - contains_signal = contains_signal or np.any( - ome_imgser[i_frame][ii, jj] > threshold - ) - compl.append(contains_signal) - patch_output.append((ii, jj, frame_indices)) - else: - # make the tth,eta patches for interpolation - patches = xrdutil.make_reflection_patches( - instr_cfg, - ang_centers[:, :2], - ang_pixel_size, - omega=ang_centers[:, 2], - tth_tol=tth_tol, - eta_tol=eta_tol, - rmat_c=rMat_c, - tvec_c=tVec_c, - npdiv=npdiv, - quiet=True, - ) - - # GRAND LOOP over reflections for this panel - patch_output = [] - for i_pt, patch in enumerate(patches): - - # strip relevant objects out of current patch - vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch - - prows, pcols = areas.shape - nrm_fac = areas / float(native_area) - nrm_fac = nrm_fac / np.min(nrm_fac) - - # grab hkl info - hkl = hkls_p[i_pt, :] - hkl_id = hkl_ids[i_pt] - - # edge arrays - tth_edges = vtx_angs[0][0, :] - delta_tth = tth_edges[1] - tth_edges[0] - eta_edges = vtx_angs[1][:, 0] - delta_eta = eta_edges[1] - eta_edges[0] - - # need to reshape eval pts for interpolation - xy_eval = np.vstack( - [xy_eval[0].flatten(), xy_eval[1].flatten()] - ).T - - # the evaluation omegas; - # expand about the central value using tol vector - ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del - - # ???: vectorize the omega_to_frame function to avoid loop? - frame_indices = [ - ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval - ] - - if -1 in frame_indices: - if not quiet: - msg = """ - window for (%d%d%d) falls outside omega range - """ % tuple( - hkl - ) - print(msg) - continue - else: - # initialize spot data parameters - # !!! maybe change these to nan to not fuck up writer - peak_id = next_invalid_peak_id - sum_int = np.nan - max_int = np.nan - meas_angs = np.nan * np.ones(3) - meas_xy = np.nan * np.ones(2) - - # quick check for intensity - contains_signal = False - patch_data_raw = [] - for i_frame in frame_indices: - tmp = ome_imgser[i_frame][ijs[0], ijs[1]] - contains_signal = contains_signal or np.any( - tmp > threshold - ) - patch_data_raw.append(tmp) - patch_data_raw = np.stack(patch_data_raw, axis=0) - compl.append(contains_signal) - - if contains_signal: - # initialize patch data array for intensities - if interp.lower() == 'bilinear': - patch_data = np.zeros( - (len(frame_indices), prows, pcols) - ) - for i, i_frame in enumerate(frame_indices): - patch_data[i] = panel.interpolate_bilinear( - xy_eval, - ome_imgser[i_frame], - pad_with_nans=False, - ).reshape( - prows, pcols - ) # * nrm_fac - elif interp.lower() == 'nearest': - patch_data = patch_data_raw # * nrm_fac - else: - msg = ( - "interpolation option " - + "'%s' not understood" - ) - raise RuntimeError(msg % interp) - - # now have interpolated patch data... - labels, num_peaks = ndimage.label( - patch_data > threshold, structure=label_struct - ) - slabels = np.arange(1, num_peaks + 1) - - if num_peaks > 0: - peak_id = iRefl - props = regionprops(labels, patch_data) - coms = np.vstack( - [x.weighted_centroid for x in props] - ) - if num_peaks > 1: - center = np.r_[patch_data.shape] * 0.5 - center_t = np.tile(center, (num_peaks, 1)) - com_diff = coms - center_t - closest_peak_idx = np.argmin( - np.sum(com_diff**2, axis=1) - ) - else: - closest_peak_idx = 0 - coms = coms[closest_peak_idx] - # meas_omes = \ - # ome_edges[0] + (0.5 + coms[0])*delta_ome - meas_omes = ome_eval[0] + coms[0] * delta_ome - meas_angs = np.hstack( - [ - tth_edges[0] - + (0.5 + coms[2]) * delta_tth, - eta_edges[0] - + (0.5 + coms[1]) * delta_eta, - mapAngle( - np.radians(meas_omes), ome_period - ), - ] - ) - - # intensities - # - summed is 'integrated' over interpolated - # data - # - max is max of raw input data - sum_int = np.sum( - patch_data[ - labels == slabels[closest_peak_idx] - ] - ) - max_int = np.max( - patch_data_raw[ - labels == slabels[closest_peak_idx] - ] - ) - # ???: Should this only use labeled pixels? - # Those are segmented from interpolated data, - # not raw; likely ok in most cases. - - # need MEASURED xy coords - # FIXME: overload angles_to_cart? - gvec_c = angles_to_gvec( - meas_angs, - chi=self.chi, - rmat_c=rMat_c, - beam_vec=self.beam_vector, - ) - rMat_s = make_sample_rmat( - self.chi, meas_angs[2] - ) - meas_xy = gvec_to_xy( - gvec_c, - panel.rmat, - rMat_s, - rMat_c, - panel.tvec, - self.tvec, - tVec_c, - beam_vec=self.beam_vector, - ) - if panel.distortion is not None: - meas_xy = panel.distortion.apply_inverse( - np.atleast_2d(meas_xy) - ).flatten() - # FIXME: why is this suddenly necessary??? - meas_xy = meas_xy.squeeze() - else: - patch_data = patch_data_raw - - if peak_id < 0: - # The peak is invalid. - # Decrement the next invalid peak ID. - next_invalid_peak_id -= 1 - - # write output - if filename is not None: - if output_format.lower() == 'text': - writer.dump_patch( - peak_id, - hkl_id, - hkl, - sum_int, - max_int, - ang_centers[i_pt], - meas_angs, - xy_centers[i_pt], - meas_xy, - ) - elif output_format.lower() == 'hdf5': - xyc_arr = xy_eval.reshape( - prows, pcols, 2 - ).transpose(2, 0, 1) - writer.dump_patch( - detector_id, - iRefl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - np.radians(ome_eval), - xyc_arr, - ijs, - frame_indices, - patch_data, - ang_centers[i_pt], - xy_centers[i_pt], - meas_angs, - meas_xy, - ) - - if return_spot_list: - # Full output - xyc_arr = xy_eval.reshape( - prows, pcols, 2 - ).transpose(2, 0, 1) - _patch_output = [ - detector_id, - iRefl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - np.radians(ome_eval), - xyc_arr, - ijs, - frame_indices, - patch_data, - ang_centers[i_pt], - xy_centers[i_pt], - meas_angs, - meas_xy, - ] - else: - # Trimmed output - _patch_output = [ - peak_id, - hkl_id, - hkl, - sum_int, - max_int, - ang_centers[i_pt], - meas_angs, - meas_xy, - ] - patch_output.append(_patch_output) - iRefl += 1 - output[detector_id] = patch_output - if filename is not None and output_format.lower() == 'text': - writer.close() - if filename is not None and output_format.lower() == 'hdf5': - writer.close() - return compl, output - - def update_memoization_sizes(self): - # Resize all known memoization functions to have a cache at least - # the size of the number of detectors. - all_panels = list(self.detectors.values()) - PlanarDetector.update_memoization_sizes(all_panels) - CylindricalDetector.update_memoization_sizes(all_panels) - - def calc_transmission( - self, rMat_s: np.ndarray = None - ) -> dict[str, np.ndarray]: - """calculate the transmission from the - filter and polymer coating. the inverse of this - number is the intensity correction that needs - to be applied. actual computation is done inside - the detector class - """ - if rMat_s is None: - rMat_s = ct.identity_3x3 - - energy = self.beam_energy - transmissions = {} - for det_name, det in self.detectors.items(): - transmission_filter, transmission_phosphor = ( - det.calc_filter_coating_transmission(energy) - ) - - transmission = transmission_filter * transmission_phosphor - - if self.physics_package is not None: - transmission_physics_package = ( - det.calc_physics_package_transmission( - energy, rMat_s, self.physics_package - ) - ) - effective_pinhole_area = det.calc_effective_pinhole_area( - self.physics_package - ) - - transmission = ( - transmission - * transmission_physics_package - * effective_pinhole_area - ) - - transmissions[det_name] = transmission - return transmissions - - -# ============================================================================= -# UTILITIES -# ============================================================================= - - -class PatchDataWriter(object): - """Class for dumping Bragg reflection data.""" - - def __init__(self, filename): - self._delim = ' ' - # fmt: off - header_items = ( - '# ID', 'PID', - 'H', 'K', 'L', - 'sum(int)', 'max(int)', - 'pred tth', 'pred eta', 'pred ome', - 'meas tth', 'meas eta', 'meas ome', - 'pred X', 'pred Y', - 'meas X', 'meas Y' - ) - self._header = self._delim.join([ - self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]), - self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), - self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) - ]) - # fmt: on - if isinstance(filename, IOBase): - self.fid = filename - else: - self.fid = open(filename, 'w') - print(self._header, file=self.fid) - - def __del__(self): - self.close() - - def close(self): - self.fid.close() - - def dump_patch( - self, peak_id, hkl_id, hkl, spot_int, max_int, pangs, mangs, pxy, mxy - ): - """ - !!! maybe need to check that last four inputs are arrays - """ - if mangs is None: - spot_int = np.nan - max_int = np.nan - mangs = np.nan * np.ones(3) - mxy = np.nan * np.ones(2) - - res = ( - [int(peak_id), int(hkl_id)] - + np.array(hkl, dtype=int).tolist() - + [spot_int, max_int] - + pangs.tolist() - + mangs.tolist() - + pxy.tolist() - + mxy.tolist() - ) - - output_str = self._delim.join( - [ - self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), - self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), - self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:]), - ] - ) - print(output_str, file=self.fid) - return output_str - - -class GrainDataWriter(object): - """Class for dumping grain data.""" - - def __init__(self, filename=None, array=None): - """Writes to either file or np array - - Array must be initialized with number of rows to be written. - """ - if filename is None and array is None: - raise RuntimeError( - 'GrainDataWriter must be specified with filename or array' - ) - - self.array = None - self.fid = None - - # array supersedes filename - if array is not None: - assert ( - array.shape[1] == 21 - ), f'grain data table must have 21 columns not {array.shape[21]}' - self.array = array - self._array_row = 0 - return - - self._delim = ' ' - # fmt: off - header_items = ( - '# grain ID', 'completeness', 'chi^2', - 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', - 't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]', - 'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]', - 'inv(V_s)[1,2]*sqrt(2)', - 'inv(V_s)[0,2]*sqrt(2)', - 'inv(V_s)[0,1]*sqrt(2)', - 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]', - 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]' - ) - self._header = self._delim.join( - [self._delim.join( - np.tile('{:<12}', 3) - ).format(*header_items[:3]), - self._delim.join( - np.tile('{:<23}', len(header_items) - 3) - ).format(*header_items[3:])] - ) - # fmt: on - if isinstance(filename, IOBase): - self.fid = filename - else: - self.fid = open(filename, 'w') - print(self._header, file=self.fid) - - def __del__(self): - self.close() - - def close(self): - if self.fid is not None: - self.fid.close() - - def dump_grain(self, grain_id, completeness, chisq, grain_params): - assert ( - len(grain_params) == 12 - ), "len(grain_params) must be 12, not %d" % len(grain_params) - - # extract strain - emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) - evec = mutil.symmToVecMV(emat, scale=False) - - res = ( - [int(grain_id), completeness, chisq] - + grain_params.tolist() - + evec.tolist() - ) - - if self.array is not None: - row = self._array_row - assert ( - row < self.array.shape[0] - ), f'invalid row {row} in array table' - self.array[row] = res - self._array_row += 1 - return res - - # (else) format and write to file - output_str = self._delim.join( - [ - self._delim.join(['{:<12d}', '{:<12f}', '{:<12e}']).format( - *res[:3] - ), - self._delim.join(np.tile('{:<23.16e}', len(res) - 3)).format( - *res[3:] - ), - ] - ) - print(output_str, file=self.fid) - return output_str - - -class GrainDataWriter_h5(object): - """Class for dumping grain results to an HDF5 archive. - - TODO: add material spec - """ - - def __init__(self, filename, instr_cfg, grain_params, use_attr=False): - if isinstance(filename, h5py.File): - self.fid = filename - else: - self.fid = h5py.File(filename + ".hdf5", "w") - icfg = dict(instr_cfg) - - # add instrument groups and attributes - self.instr_grp = self.fid.create_group('instrument') - unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr) - - # add grain group - self.grain_grp = self.fid.create_group('grain') - rmat_c = make_rmat_of_expmap(grain_params[:3]) - tvec_c = np.array(grain_params[3:6]).flatten() - vinv_s = np.array(grain_params[6:]).flatten() - vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) - - if use_attr: # attribute version - self.grain_grp.attrs.create('rmat_c', rmat_c) - self.grain_grp.attrs.create('tvec_c', tvec_c) - self.grain_grp.attrs.create('inv(V)_s', vinv_s) - self.grain_grp.attrs.create('vmat_s', vmat_s) - else: # dataset version - self.grain_grp.create_dataset('rmat_c', data=rmat_c) - self.grain_grp.create_dataset('tvec_c', data=tvec_c) - self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) - self.grain_grp.create_dataset('vmat_s', data=vmat_s) - - data_key = 'reflection_data' - self.data_grp = self.fid.create_group(data_key) - - for det_key in self.instr_grp['detectors'].keys(): - self.data_grp.create_group(det_key) - - # FIXME: throws exception when called after close method - # def __del__(self): - # self.close() - - def close(self): - self.fid.close() - - def dump_patch( - self, - panel_id, - i_refl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - ome_centers, - xy_centers, - ijs, - frame_indices, - spot_data, - pangs, - pxy, - mangs, - mxy, - gzip=1, - ): - """ - to be called inside loop over patches - - default GZIP level for data arrays is 1 - """ - fi = np.array(frame_indices, dtype=int) - - panel_grp = self.data_grp[panel_id] - spot_grp = panel_grp.create_group("spot_%05d" % i_refl) - spot_grp.attrs.create('peak_id', int(peak_id)) - spot_grp.attrs.create('hkl_id', int(hkl_id)) - spot_grp.attrs.create('hkl', np.array(hkl, dtype=int)) - spot_grp.attrs.create('predicted_angles', pangs) - spot_grp.attrs.create('predicted_xy', pxy) - if mangs is None: - mangs = np.nan * np.ones(3) - spot_grp.attrs.create('measured_angles', mangs) - if mxy is None: - mxy = np.nan * np.ones(3) - spot_grp.attrs.create('measured_xy', mxy) - - # get centers crds from edge arrays - # FIXME: export full coordinate arrays, or just center vectors??? - # - # ome_crd, eta_crd, tth_crd = np.meshgrid( - # ome_centers, - # centers_of_edge_vec(eta_edges), - # centers_of_edge_vec(tth_edges), - # indexing='ij') - # - # ome_dim, eta_dim, tth_dim = spot_data.shape - - # !!! for now just exporting center vectors for spot_data - tth_crd = centers_of_edge_vec(tth_edges) - eta_crd = centers_of_edge_vec(eta_edges) - - shuffle_data = True # reduces size by 20% - spot_grp.create_dataset( - 'tth_crd', - data=tth_crd, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'eta_crd', - data=eta_crd, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'ome_crd', - data=ome_centers, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'xy_centers', - data=xy_centers, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'ij_centers', - data=ijs, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'frame_indices', - data=fi, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'intensities', - data=spot_data, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - return - - -class GenerateEtaOmeMaps(object): - """ - eta-ome map class derived from new image_series and YAML config - - ...for now... - - must provide: - - self.dataStore - self.planeData - self.iHKLList - self.etaEdges # IN RADIANS - self.omeEdges # IN RADIANS - self.etas # IN RADIANS - self.omegas # IN RADIANS - - """ - - def __init__( - self, - image_series_dict, - instrument, - plane_data, - active_hkls=None, - eta_step=0.25, - threshold=None, - ome_period=(0, 360), - ): - """ - image_series must be OmegaImageSeries class - instrument_params must be a dict (loaded from yaml spec) - active_hkls must be a list (required for now) - - FIXME: get rid of omega period; should get it from imageseries - """ - - self._planeData = plane_data - - # ???: change name of iHKLList? - # ???: can we change the behavior of iHKLList? - if active_hkls is None: - self._iHKLList = plane_data.getHKLID(plane_data.hkls, master=True) - n_rings = len(self._iHKLList) - else: - assert hasattr( - active_hkls, '__len__' - ), "active_hkls must be an iterable with __len__" - self._iHKLList = active_hkls - n_rings = len(active_hkls) - - # grab a det key and corresponding imageseries (first will do) - # !!! assuming that the imageseries for all panels - # have the same length and omegas - det_key, this_det_ims = next(iter(image_series_dict.items())) - - # handle omegas - # !!! for multi wedge, enforncing monotonicity - # !!! wedges also cannot overlap or span more than 360 - omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES - delta_ome = omegas_array[0][-1] - omegas_array[0][0] - frame_mask = None - ome_period = omegas_array[0, 0] + np.r_[0.0, 360.0] # !!! be careful - if this_det_ims.omegawedges.nwedges > 1: - delta_omes = [ - (i['ostop'] - i['ostart']) / i['nsteps'] - for i in this_det_ims.omegawedges.wedges - ] - check_wedges = mutil.uniqueVectors( - np.atleast_2d(delta_omes), tol=1e-6 - ).squeeze() - assert ( - check_wedges.size == 1 - ), "all wedges must have the same delta omega to 1e-6" - # grab representative delta ome - # !!! assuming positive delta consistent with OmegaImageSeries - delta_ome = delta_omes[0] - - # grab full-range start/stop - # !!! be sure to map to the same period to enable arithmatic - # ??? safer to do this way rather than just pulling from - # the omegas attribute? - owedges = this_det_ims.omegawedges.wedges - ostart = owedges[0]['ostart'] # !!! DEGREES - ostop = float( - mapAngle(owedges[-1]['ostop'], ome_period, units='degrees') - ) - # compute total nsteps - # FIXME: need check for roundoff badness - nsteps = int((ostop - ostart) / delta_ome) - ome_edges_full = np.linspace( - ostart, ostop, num=nsteps + 1, endpoint=True - ) - omegas_array = np.vstack( - [ome_edges_full[:-1], ome_edges_full[1:]] - ).T - ome_centers = np.average(omegas_array, axis=1) - - # use OmegaImageSeries method to determine which bins have data - # !!! this array has -1 outside a wedge - # !!! again assuming the valid frame order increases monotonically - frame_mask = np.array( - [ - this_det_ims.omega_to_frame(ome)[0] != -1 - for ome in ome_centers - ] - ) - - # ???: need to pass a threshold? - eta_mapping, etas = instrument.extract_polar_maps( - plane_data, - image_series_dict, - active_hkls=active_hkls, - threshold=threshold, - tth_tol=None, - eta_tol=eta_step, - ) - - # for convenience grab map shape from first - map_shape = next(iter(eta_mapping.values())).shape[1:] - - # pack all detectors with masking - # FIXME: add omega masking - data_store = [] - for i_ring in range(n_rings): - # first handle etas - full_map = np.zeros(map_shape, dtype=float) - nan_mask_full = np.zeros( - (len(eta_mapping), map_shape[0], map_shape[1]) - ) - i_p = 0 - for det_key, eta_map in eta_mapping.items(): - nan_mask = ~np.isnan(eta_map[i_ring]) - nan_mask_full[i_p] = nan_mask - full_map[nan_mask] += eta_map[i_ring][nan_mask] - i_p += 1 - re_nan_these = np.sum(nan_mask_full, axis=0) == 0 - full_map[re_nan_these] = np.nan - - # now omegas - if frame_mask is not None: - # !!! must expand row dimension to include - # skipped omegas - tmp = np.ones((len(frame_mask), map_shape[1])) * np.nan - tmp[frame_mask, :] = full_map - full_map = tmp - data_store.append(full_map) - self._dataStore = data_store - - # set required attributes - self._omegas = mapAngle( - np.radians(np.average(omegas_array, axis=1)), - np.radians(ome_period), - ) - self._omeEdges = mapAngle( - np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), - np.radians(ome_period), - ) - - # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the - # indexer to work properly - if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf: - # !!! SIGNED delta ome - del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0]) - self._omeEdges[-1] = self._omeEdges[-2] + del_ome - - # handle etas - # WARNING: unlinke the omegas in imageseries metadata, - # these are in RADIANS and represent bin centers - self._etaEdges = etas - self._etas = self._etaEdges[:-1] + 0.5 * np.radians(eta_step) - - @property - def dataStore(self): - return self._dataStore - - @property - def planeData(self): - return self._planeData - - @property - def iHKLList(self): - return np.atleast_1d(self._iHKLList).flatten() - - @property - def etaEdges(self): - return self._etaEdges - - @property - def omeEdges(self): - return self._omeEdges - - @property - def etas(self): - return self._etas - - @property - def omegas(self): - return self._omegas - - def save(self, filename): - xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename) - - -def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): - # mark pixels in the spec'd tth range - pixels_in_tthr = np.logical_and(ptth >= tthr[0], ptth <= tthr[1]) - - # catch case where ring isn't on detector - if not np.any(pixels_in_tthr): - return None - - pixel_ids = np.where(pixels_in_tthr) - - # grab relevant eta coords using histogram - pixel_etas = peta[pixel_ids] - reta_hist = histogram(pixel_etas, eta_edges) - bins_on_detector = np.where(reta_hist)[0] - - return pixel_etas, eta_edges, pixel_ids, bins_on_detector - - -def run_fast_histogram(x, bins, weights=None): - return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), weights=weights) - - -def run_numpy_histogram(x, bins, weights=None): - return histogram1d(x, bins=bins, weights=weights)[0] - - -histogram = run_fast_histogram if fast_histogram else run_numpy_histogram - - -def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): - for i_row in range(*rows): - image = ims[i_row] - - # handle threshold if specified - if threshold is not None: - # !!! NaNs get preserved - image = np.array(image) - image[image < threshold] = 0.0 - - for i_r, tthr in enumerate(tth_ranges): - this_map = ring_maps[i_r] - params = ring_params[i_r] - if not params: - # We are supposed to skip this ring... - continue - - # Unpack the params - pixel_etas, eta_edges, pixel_ids, bins_on_detector = params - result = histogram(pixel_etas, eta_edges, weights=image[pixel_ids]) - - # Note that this preserves nan values for bins not on the detector. - this_map[i_row, bins_on_detector] = result[bins_on_detector] - - -def _extract_detector_line_positions( - iter_args, - plane_data, - tth_tol, - eta_tol, - eta_centers, - npdiv, - collapse_tth, - collapse_eta, - do_interpolation, - do_fitting, - fitting_kwargs, - tth_distortion, - max_workers, -): - panel, instr_cfg, images, pbp = iter_args - - if images.ndim == 2: - images = np.tile(images, (1, 1, 1)) - elif images.ndim != 3: - raise RuntimeError("images must be 2- or 3-d") - - # make rings - # !!! adding tth_distortion pass-through; comes in as dict over panels - tth_distr_cls = None - if tth_distortion is not None: - tth_distr_cls = tth_distortion[panel.name] - - pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( - plane_data, - merge_hkls=True, - delta_tth=tth_tol, - delta_eta=eta_tol, - eta_list=eta_centers, - tth_distortion=tth_distr_cls, - ) - - tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) - - # !!! this is only needed if doing fitting - if isinstance(plane_data, PlaneData): - tth_idx, tth_ranges = plane_data.getMergedRanges(cullDupl=True) - tth_ref = plane_data.getTTh() - tth0 = [np.degrees(tth_ref[i]) for i in tth_idx] - else: - tth0 = plane_data - - # ================================================================= - # LOOP OVER RING SETS - # ================================================================= - pbar_rings = partial( - tqdm, total=len(pow_angs), desc="Ringset", position=pbp - ) - - kwargs = { - 'instr_cfg': instr_cfg, - 'panel': panel, - 'eta_tol': eta_tol, - 'npdiv': npdiv, - 'collapse_tth': collapse_tth, - 'collapse_eta': collapse_eta, - 'images': images, - 'do_interpolation': do_interpolation, - 'do_fitting': do_fitting, - 'fitting_kwargs': fitting_kwargs, - 'tth_distortion': tth_distr_cls, - } - func = partial(_extract_ring_line_positions, **kwargs) - iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) - with ProcessPoolExecutor( - mp_context=constants.mp_context, max_workers=max_workers - ) as executor: - return list(pbar_rings(executor.map(func, iter_arg))) - - -def _extract_ring_line_positions( - iter_args, - instr_cfg, - panel, - eta_tol, - npdiv, - collapse_tth, - collapse_eta, - images, - do_interpolation, - do_fitting, - fitting_kwargs, - tth_distortion, -): - """ - Extracts data for a single Debye-Scherrer ring . - - Parameters - ---------- - iter_args : tuple - (angs [radians], - xys [mm], - tth_tol [deg], - this_tth0 [deg]) - instr_cfg : TYPE - DESCRIPTION. - panel : TYPE - DESCRIPTION. - eta_tol : TYPE - DESCRIPTION. - npdiv : TYPE - DESCRIPTION. - collapse_tth : TYPE - DESCRIPTION. - collapse_eta : TYPE - DESCRIPTION. - images : TYPE - DESCRIPTION. - do_interpolation : TYPE - DESCRIPTION. - do_fitting : TYPE - DESCRIPTION. - fitting_kwargs : TYPE - DESCRIPTION. - tth_distortion : TYPE - DESCRIPTION. - - Yields - ------ - patch_data : TYPE - DESCRIPTION. - - """ - # points are already checked to fall on detector - angs, xys, tth_tol, this_tth0 = iter_args - - # SS 01/31/25 noticed some nans in xys even after clipping - # going to do another round of masking to get rid of those - nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) - nan_mask = np.logical_or.reduce(nan_mask, 1) - if angs.ndim > 1 and xys.ndim > 1: - angs = angs[nan_mask, :] - xys = xys[nan_mask, :] - - n_images = len(images) - native_area = panel.pixel_area - - # make the tth,eta patches for interpolation - patches = xrdutil.make_reflection_patches( - instr_cfg, - angs, - panel.angularPixelSize(xys), - tth_tol=tth_tol, - eta_tol=eta_tol, - npdiv=npdiv, - quiet=True, - ) - - # loop over patches - # FIXME: fix initialization - if collapse_tth: - patch_data = np.zeros((len(angs), n_images)) - else: - patch_data = [] - for i_p, patch in enumerate(patches): - # strip relevant objects out of current patch - vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch - - # need to reshape eval pts for interpolation - xy_eval = np.vstack([xys_eval[0].flatten(), xys_eval[1].flatten()]).T - - _, on_panel = panel.clip_to_panel(xy_eval) - - if np.any(~on_panel): - continue - - if collapse_tth: - ang_data = (vtx_angs[0][0, [0, -1]], vtx_angs[1][[0, -1], 0]) - elif collapse_eta: - # !!! yield the tth bin centers - tth_centers = np.average( - np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]), axis=0 - ) - ang_data = (tth_centers, angs[i_p][-1]) - if do_fitting: - fit_data = [] - else: - ang_data = vtx_angs - - prows, pcols = areas.shape - area_fac = areas / float(native_area) - - # interpolate - if not collapse_tth: - ims_data = [] - for j_p in np.arange(len(images)): - # catch interpolation type - image = images[j_p] - if do_interpolation: - p_img = ( - panel.interpolate_bilinear( - xy_eval, - image, - ).reshape(prows, pcols) - * area_fac - ) - else: - p_img = image[ijs[0], ijs[1]] * area_fac - - # catch flat spectrum data, which will cause - # fitting to fail. - # ???: best here, or make fitting handle it? - mxval = np.max(p_img) - mnval = np.min(p_img) - if mxval == 0 or (1.0 - mnval / mxval) < 0.01: - continue - - # catch collapsing options - if collapse_tth: - patch_data[i_p, j_p] = np.average(p_img) - # ims_data.append(np.sum(p_img)) - else: - if collapse_eta: - lineout = np.average(p_img, axis=0) - ims_data.append(lineout) - if do_fitting: - if tth_distortion is not None: - # must correct tth0 - tmp = tth_distortion.apply( - panel.angles_to_cart( - np.vstack( - [ - np.radians(this_tth0), - np.tile( - ang_data[-1], len(this_tth0) - ), - ] - ).T - ), - return_nominal=True, - ) - pk_centers = np.degrees(tmp[:, 0]) - else: - pk_centers = this_tth0 - kwargs = { - 'tth_centers': np.degrees(tth_centers), - 'lineout': lineout, - 'tth_pred': pk_centers, - **fitting_kwargs, - } - result = fit_ring(**kwargs) - fit_data.append(result) - else: - ims_data.append(p_img) - if not collapse_tth: - output = [ang_data, ims_data] - if do_fitting: - output.append(fit_data) - patch_data.append(output) - - return patch_data - - -DETECTOR_TYPES = { - 'planar': PlanarDetector, - 'cylindrical': CylindricalDetector, -} - - -class BufferShapeMismatchError(RuntimeError): - # This is raised when the buffer shape does not match the detector shape - pass - - -@contextmanager -def switch_xray_source(instr: HEDMInstrument, xray_source: Optional[str]): - if xray_source is None: - # If the x-ray source is None, leave it as the current active one - yield - return - - prev_beam_name = instr.active_beam_name - instr.active_beam_name = xray_source - try: - yield - finally: - instr.active_beam_name = prev_beam_name diff --git a/hexrd/laue/material/crystallography.py b/hexrd/laue/material/crystallography.py deleted file mode 100644 index 29e621972..000000000 --- a/hexrd/laue/material/crystallography.py +++ /dev/null @@ -1,2260 +0,0 @@ -# -*- coding: utf-8 -*- -# ============================================================================= -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. -# All rights reserved. -# -# This file is part of HEXRD. For details on dowloading the source, -# see the file COPYING. -# -# Please also see the file LICENSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License (as published by the Free -# Software Foundation) version 2.1 dated February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program (see file LICENSE); if not, write to -# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, -# Boston, MA 02111-1307 USA or visit . -# ============================================================================= -import re -import copy -import csv -import os -from math import pi -from typing import Optional, Union, Dict, List, Tuple - -import numpy as np - -from hexrd.core.material.unitcell import unitcell -from hexrd.core.deprecation import deprecated -from hexrd.core import constants -from hexrd.core.matrixutil import unitVector -from hexrd.core.rotations import ( - rotMatOfExpMap, - mapAngle, - applySym, - ltypeOfLaueGroup, - quatOfLaueGroup, -) -from hexrd.core.transforms import xfcapi -from hexrd.core import valunits -from hexrd.core.valunits import toFloat -from hexrd.core.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf - -"""module vars""" - -# units -dUnit = 'angstrom' -outputDegrees = False -outputDegrees_bak = outputDegrees - - -def hklToStr(hkl: np.ndarray) -> str: - """ - Converts hkl representation to a string. - - Parameters - ---------- - hkl : np.ndarray - 3 element list of h, k, and l values (Miller indices). - - Returns - ------- - str - Space-separated string representation of h, k, and l values. - - """ - return re.sub(r'[\[\]\(\)\{\},]', '', str(hkl)) - - -def tempSetOutputDegrees(val: bool) -> None: - """ - Set the global outputDegrees flag temporarily. Can be reverted with - revertOutputDegrees(). - - Parameters - ---------- - val : bool - True to output angles in degrees, False to output angles in radians. - - Returns - ------- - None - - """ - global outputDegrees, outputDegrees_bak - outputDegrees_bak = outputDegrees - outputDegrees = val - - -def revertOutputDegrees() -> None: - """ - Revert the effect of tempSetOutputDegrees(), resetting the outputDegrees - flag to its previous value (True to output in degrees, False for radians). - - Returns - ------- - None - """ - global outputDegrees, outputDegrees_bak - outputDegrees = outputDegrees_bak - - -def cosineXform( - a: np.ndarray, b: np.ndarray, c: np.ndarray -) -> tuple[np.ndarray, np.ndarray]: - """ - Spherical trig transform to take alpha, beta, gamma to expressions - for cos(alpha*). See ref below. - - [1] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector algebra and - the relations between direct and reciprocal lattice quantities''. Acta - Cryst. (1968), A24, 247--248 - - Parameters - ---------- - a : np.ndarray - List of alpha angle values (radians). - b : np.ndarray - List of beta angle values (radians). - c : np.ndarray - List of gamma angle values (radians). - - Returns - ------- - np.ndarray - List of cos(alpha*) values. - np.ndarray - List of sin(alpha*) values. - - """ - cosar = (np.cos(b) * np.cos(c) - np.cos(a)) / (np.sin(b) * np.sin(c)) - sinar = np.sqrt(1 - cosar**2) - return cosar, sinar - - -def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: - """ - Convert an energy value to a wavelength. If argument has units of length - or energy, will convert to globally specified unit type for wavelength - (dUnit). If argument is a scalar, assumed input units are keV. - """ - if isinstance(arg, valunits.valWUnit): - # arg is a valunits.valWUnit object - if arg.isLength(): - return arg.getVal(dUnit) - elif arg.isEnergy(): - e = arg.getVal('keV') - return valunits.valWUnit( - 'wavelength', 'length', constants.keVToAngstrom(e), 'angstrom' - ).getVal(dUnit) - else: - raise RuntimeError('do not know what to do with ' + str(arg)) - else: - # !!! assuming arg is in keV - return valunits.valWUnit( - 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' - ).getVal(dUnit) - - -def latticeParameters(lvec): - """ - Generates direct and reciprocal lattice vector components in a - crystal-relative RHON basis, X. The convention for fixing X to the - lattice is such that a || x1 and c* || x3, where a and c* are - direct and reciprocal lattice vectors, respectively. - """ - lnorm = np.sqrt(np.sum(lvec**2, 0)) - - a = lnorm[0] - b = lnorm[1] - c = lnorm[2] - - ahat = lvec[:, 0] / a - bhat = lvec[:, 1] / b - chat = lvec[:, 2] / c - - gama = np.arccos(np.dot(ahat, bhat)) - beta = np.arccos(np.dot(ahat, chat)) - alfa = np.arccos(np.dot(bhat, chat)) - if outputDegrees: - gama = r2d * gama - beta = r2d * beta - alfa = r2d * alfa - - return [a, b, c, alfa, beta, gama] - - -def latticePlanes( - hkls: np.ndarray, - lparms: np.ndarray, - ltype: Optional[str] = 'cubic', - wavelength: Optional[float] = 1.54059292, - strainMag: Optional[float] = None, -) -> Dict[str, np.ndarray]: - """ - Generates lattice plane data in the direct lattice for a given set - of Miller indices. Vector components are written in the - crystal-relative RHON basis, X. The convention for fixing X to the - lattice is such that a || x1 and c* || x3, where a and c* are - direct and reciprocal lattice vectors, respectively. - - USAGE: - - planeInfo = latticePlanes(hkls, lparms, **kwargs) - - INPUTS: - - 1) hkls (3 x n float ndarray) is the array of Miller indices for - the planes of interest. The vectors are assumed to be - concatenated along the 1-axis (horizontal). - - 2) lparms (1 x m float list) is the array of lattice parameters, - where m depends on the symmetry group (see below). - - The following optional arguments are recognized: - - 3) ltype=(string) is a string representing the symmetry type of - the implied Laue group. The 11 available choices are shown - below. The default value is 'cubic'. Note that each group - expects a lattice parameter array of the indicated length - and order. - - latticeType lparms - ----------- ------------ - 'cubic' a - 'hexagonal' a, c - 'trigonal' a, c - 'rhombohedral' a, alpha (in degrees) - 'tetragonal' a, c - 'orthorhombic' a, b, c - 'monoclinic' a, b, c, beta (in degrees) - 'triclinic' a, b, c, alpha, beta, gamma (in degrees) - - 4) wavelength= is a value represented the wavelength in - Angstroms to calculate bragg angles for. The default value - is for Cu K-alpha radiation (1.54059292 Angstrom) - - 5) strainMag=None - - OUTPUTS: - - 1) planeInfo is a dictionary containing the following keys/items: - - normals (3, n) double array array of the components to the - unit normals for each {hkl} in - X (horizontally concatenated) - - dspacings (n, ) double array array of the d-spacings for - each {hkl} - - tThetas (n, ) double array array of the Bragg angles for - each {hkl} relative to the - specified wavelength - - NOTES: - - *) This function is effectively a wrapper to 'latticeVectors'. - See 'help(latticeVectors)' for additional info. - - *) Lattice plane d-spacings are calculated from the reciprocal - lattice vectors specified by {hkl} as shown in Appendix 1 of - [1]. - - REFERENCES: - - [1] B. D. Cullity, ``Elements of X-Ray Diffraction, 2 - ed.''. Addison-Wesley Publishing Company, Inc., 1978. ISBN - 0-201-01174-3 - - """ - location = 'latticePlanes' - - assert ( - hkls.shape[0] == 3 - ), f"hkls aren't column vectors in call to '{location}'!" - - tag = ltype - wlen = wavelength - - # get B - L = latticeVectors(lparms, tag) - - # get G-vectors -- reciprocal vectors in crystal frame - G = np.dot(L['B'], hkls) - - # magnitudes - d = 1 / np.sqrt(np.sum(G**2, 0)) - - aconv = 1.0 - if outputDegrees: - aconv = r2d - - # two thetas - sth = wlen / 2.0 / d - mask = np.abs(sth) < 1.0 - tth = np.zeros(sth.shape) - - tth[~mask] = np.nan - tth[mask] = aconv * 2.0 * np.arcsin(sth[mask]) - - p = dict(normals=unitVector(G), dspacings=d, tThetas=tth) - - if strainMag is not None: - p['tThetasLo'] = np.zeros(sth.shape) - p['tThetasHi'] = np.zeros(sth.shape) - - mask = (np.abs(wlen / 2.0 / (d * (1.0 + strainMag))) < 1.0) & ( - np.abs(wlen / 2.0 / (d * (1.0 - strainMag))) < 1.0 - ) - - p['tThetasLo'][~mask] = np.nan - p['tThetasHi'][~mask] = np.nan - - p['tThetasLo'][mask] = ( - aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 + strainMag))) - ) - p['tThetasHi'][mask] = ( - aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 - strainMag))) - ) - - return p - - -def latticeVectors( - lparms: np.ndarray, - tag: Optional[str] = 'cubic', - radians: Optional[bool] = False, -) -> Dict[str, Union[np.ndarray, float]]: - """ - Generates direct and reciprocal lattice vector components in a - crystal-relative RHON basis, X. The convention for fixing X to the - lattice is such that a || x1 and c* || x3, where a and c* are - direct and reciprocal lattice vectors, respectively. - - USAGE: - - lattice = LatticeVectors(lparms, ) - - INPUTS: - - 1) lparms (1 x n float list) is the array of lattice parameters, - where n depends on the symmetry group (see below). - - 2) tag (string) is a case-insensitive string representing the - symmetry type of the implied Laue group. The 11 available choices - are shown below. The default value is 'cubic'. Note that each - group expects a lattice parameter array of the indicated length - and order. - - latticeType lparms - ----------- ------------ - 'cubic' a - 'hexagonal' a, c - 'trigonal' a, c - 'rhombohedral' a, alpha (in degrees) - 'tetragonal' a, c - 'orthorhombic' a, b, c - 'monoclinic' a, b, c, beta (in degrees) - 'triclinic' a, b, c, alpha, beta, gamma (in degrees) - - The following optional arguments are recognized: - - 3) radians= is a boolean flag indicating usage of radians rather - than degrees, defaults to false. - - OUTPUTS: - - 1) lattice is a dictionary containing the following keys/items: - - F (3, 3) double array transformation matrix taking - componenents in the direct - lattice (i.e. {uvw}) to the - reference, X - - B (3, 3) double array transformation matrix taking - componenents in the reciprocal - lattice (i.e. {hkl}) to X - - BR (3, 3) double array transformation matrix taking - componenents in the reciprocal - lattice to the Fable reference - frame (see notes) - - U0 (3, 3) double array transformation matrix - (orthogonal) taking - componenents in the - Fable reference frame to X - - vol double the unit cell volume - - - dparms (6, ) double list the direct lattice parameters: - [a b c alpha beta gamma] - - rparms (6, ) double list the reciprocal lattice - parameters: - [a* b* c* alpha* beta* gamma*] - - NOTES: - - *) The conventions used for assigning a RHON basis, - X -> {x1, x2, x3}, to each point group are consistent with - those published in Appendix B of [1]. Namely: a || x1 and - c* || x3. This differs from the convention chosen by the Fable - group, where a* || x1 and c || x3 [2]. - - *) The unit cell angles are defined as follows: - alpha=acos(b'*c/|b||c|), beta=acos(c'*a/|c||a|), and - gamma=acos(a'*b/|a||b|). - - *) The reciprocal lattice vectors are calculated using the - crystallographic convention, where the prefactor of 2*pi is - omitted. In this convention, the reciprocal lattice volume is - 1/V. - - *) Several relations from [3] were employed in the component - calculations. - - REFERENCES: - - [1] J. F. Nye, ``Physical Properties of Crystals: Their - Representation by Tensors and Matrices''. Oxford University - Press, 1985. ISBN 0198511655 - - [2] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, - ``Tracking: a method for structural characterization of grains - in powders or polycrystals''. J. Appl. Cryst. (2001). 34, - 744--750 - - [3] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector - algebra and the relations between direct and reciprocal - lattice quantities''. Acta Cryst. (1968), A24, 247--248 - - - """ - - # build index for sorting out lattice parameters - lattStrings = [ - 'cubic', - 'hexagonal', - 'trigonal', - 'rhombohedral', - 'tetragonal', - 'orthorhombic', - 'monoclinic', - 'triclinic', - ] - - if radians: - aconv = 1.0 - else: - aconv = pi / 180.0 # degToRad - deg90 = pi / 2.0 - deg120 = 2.0 * pi / 3.0 - # - if tag == lattStrings[0]: - # cubic - cellparms = np.r_[np.tile(lparms[0], (3,)), deg90 * np.ones((3,))] - elif tag == lattStrings[1] or tag == lattStrings[2]: - # hexagonal | trigonal (hex indices) - cellparms = np.r_[ - lparms[0], lparms[0], lparms[1], deg90, deg90, deg120 - ] - elif tag == lattStrings[3]: - # rhombohedral - cellparms = np.r_[ - np.tile(lparms[0], (3,)), np.tile(aconv * lparms[1], (3,)) - ] - elif tag == lattStrings[4]: - # tetragonal - cellparms = np.r_[lparms[0], lparms[0], lparms[1], deg90, deg90, deg90] - elif tag == lattStrings[5]: - # orthorhombic - cellparms = np.r_[lparms[0], lparms[1], lparms[2], deg90, deg90, deg90] - elif tag == lattStrings[6]: - # monoclinic - cellparms = np.r_[ - lparms[0], lparms[1], lparms[2], deg90, aconv * lparms[3], deg90 - ] - elif tag == lattStrings[7]: - # triclinic - cellparms = np.r_[ - lparms[0], - lparms[1], - lparms[2], - aconv * lparms[3], - aconv * lparms[4], - aconv * lparms[5], - ] - else: - raise RuntimeError(f'lattice tag "{tag}" is not recognized') - - alpha, beta, gamma = cellparms[3:6] - cosalfar, sinalfar = cosineXform(alpha, beta, gamma) - - a = cellparms[0] * np.r_[1, 0, 0] - b = cellparms[1] * np.r_[np.cos(gamma), np.sin(gamma), 0] - c = ( - cellparms[2] - * np.r_[ - np.cos(beta), -cosalfar * np.sin(beta), sinalfar * np.sin(beta) - ] - ) - - ad = np.sqrt(np.sum(a**2)) - bd = np.sqrt(np.sum(b**2)) - cd = np.sqrt(np.sum(c**2)) - - # Cell volume - V = np.dot(a, np.cross(b, c)) - - # F takes components in the direct lattice to X - F = np.c_[a, b, c] - - # Reciprocal lattice vectors - astar = np.cross(b, c) / V - bstar = np.cross(c, a) / V - cstar = np.cross(a, b) / V - - # and parameters - ar = np.sqrt(np.sum(astar**2)) - br = np.sqrt(np.sum(bstar**2)) - cr = np.sqrt(np.sum(cstar**2)) - - alfar = np.arccos(np.dot(bstar, cstar) / br / cr) - betar = np.arccos(np.dot(cstar, astar) / cr / ar) - gamar = np.arccos(np.dot(astar, bstar) / ar / br) - - # B takes components in the reciprocal lattice to X - B = np.c_[astar, bstar, cstar] - - cosalfar2, sinalfar2 = cosineXform(alfar, betar, gamar) - - afable = ar * np.r_[1, 0, 0] - bfable = br * np.r_[np.cos(gamar), np.sin(gamar), 0] - cfable = ( - cr - * np.r_[ - np.cos(betar), - -cosalfar2 * np.sin(betar), - sinalfar2 * np.sin(betar), - ] - ) - - BR = np.c_[afable, bfable, cfable] - U0 = np.dot(B, np.linalg.inv(BR)) - if outputDegrees: - dparms = np.r_[ad, bd, cd, r2d * np.r_[alpha, beta, gamma]] - rparms = np.r_[ar, br, cr, r2d * np.r_[alfar, betar, gamar]] - else: - dparms = np.r_[ad, bd, cd, np.r_[alpha, beta, gamma]] - rparms = np.r_[ar, br, cr, np.r_[alfar, betar, gamar]] - - return { - 'F': F, - 'B': B, - 'BR': BR, - 'U0': U0, - 'vol': V, - 'dparms': dparms, - 'rparms': rparms, - } - - -def hexagonalIndicesFromRhombohedral(hkl): - """ - converts rhombohedral hkl to hexagonal indices - """ - HKL = np.zeros((3, hkl.shape[1]), dtype='int') - - HKL[0, :] = hkl[0, :] - hkl[1, :] - HKL[1, :] = hkl[1, :] - hkl[2, :] - HKL[2, :] = hkl[0, :] + hkl[1, :] + hkl[2, :] - - return HKL - - -def rhombohedralIndicesFromHexagonal(HKL): - """ - converts hexagonal hkl to rhombohedral indices - """ - hkl = np.zeros((3, HKL.shape[1]), dtype='int') - - hkl[0, :] = 2 * HKL[0, :] + HKL[1, :] + HKL[2, :] - hkl[1, :] = -HKL[0, :] + HKL[1, :] + HKL[2, :] - hkl[2, :] = -HKL[0, :] - 2 * HKL[1, :] + HKL[2, :] - - hkl = hkl / 3.0 - return hkl - - -def rhombohedralParametersFromHexagonal(a_h, c_h): - """ - converts hexagonal lattice parameters (a, c) to rhombohedral - lattice parameters (a, alpha) - """ - a_r = np.sqrt(3 * a_h**2 + c_h**2) / 3.0 - alfa_r = 2 * np.arcsin(3.0 / (2 * np.sqrt(3 + (c_h / a_h) ** 2))) - if outputDegrees: - alfa_r = r2d * alfa_r - return a_r, alfa_r - - -def convert_Miller_direction_to_cartesian(uvw, a=1.0, c=1.0, normalize=False): - """ - Converts 3-index hexagonal Miller direction indices to components in the - crystal reference frame. - Parameters - ---------- - uvw : array_like - The (n, 3) array of 3-index hexagonal indices to convert. - a : scalar, optional - The `a` lattice parameter. The default value is 1. - c : scalar, optional - The `c` lattice parameter. The default value is 1. - normalize : bool, optional - Flag for whether or not to normalize output vectors - Returns - ------- - numpy.ndarray - The (n, 3) array of cartesian components associated with the input - direction indices. - Notes - ----- - 1) The [uv.w] the Miller-Bravais convention is in the hexagonal basis - {a1, a2, a3, c}. The basis for the output, {o1, o2, o3}, is - chosen such that - o1 || a1 - o3 || c - o2 = o3 ^ o1 - """ - u, v, w = np.atleast_2d(uvw).T - retval = np.vstack([1.5 * u * a, sqrt3by2 * a * (2 * v + u), w * c]) - if normalize: - return unitVector(retval).T - else: - return retval.T - - -def convert_Miller_direction_to_MillerBravias(uvw, suppress_redundant=True): - """ - Converts 3-index hexagonal Miller direction indices to 4-index - Miller-Bravais direction indices. - Parameters - ---------- - uvw : array_like - The (n, 3) array of 3-index hexagonal Miller indices to convert. - suppress_redundant : bool, optional - Flag to suppress the redundant 3rd index. The default is True. - Returns - ------- - numpy.ndarray - The (n, 3) or (n, 4) array -- depending on kwarg -- of Miller-Bravis - components associated with the input Miller direction indices. - Notes - ----- - * NOT for plane normals!!! - """ - u, v, w = np.atleast_2d(uvw).T - retval = np.vstack([(2 * u - v) / 3, (2 * v - u) / 3, w]).T - rem = np.vstack([np.mod(np.tile(i[0], 2), i[1:]) for i in retval]) - rem[abs(rem) < epsf] = np.nan - lcm = np.nanmin(rem, axis=1) - lcm[np.isnan(lcm)] = 1 - retval = retval / np.tile(lcm, (3, 1)).T - if suppress_redundant: - return retval - else: - t = np.atleast_2d(1 - np.sum(retval[:2], axis=1)).T - return np.hstack([retval[:, :2], t, np.atleast_2d(retval[:, 2]).T]) - - -def convert_MillerBravias_direction_to_Miller(UVW): - """ - Converts 4-index hexagonal Miller-Bravais direction indices to - 3-index Miller direction indices. - Parameters - ---------- - UVW : array_like - The (n, 3) array of **non-redundant** Miller-Bravais direction indices - to convert. - Returns - ------- - numpy.ndarray - The (n, 3) array of Miller direction indices associated with the - input Miller-Bravais indices. - Notes - ----- - * NOT for plane normals!!! - """ - U, V, W = np.atleast_2d(UVW).T - return np.vstack([2 * U + V, 2 * V + U, W]) - - -class PlaneData(object): - """ - Careful with ordering: Outputs are ordered by the 2-theta for the - hkl unless you get self._hkls directly, and this order can change - with changes in lattice parameters (lparms); setting and getting - exclusions works on the current hkl ordering, not the original - ordering (in self._hkls), but exclusions are stored in the - original ordering in case the hkl ordering does change with - lattice parameters - - if not None, tThWidth takes priority over strainMag in setting - two-theta ranges; changing strainMag automatically turns off - tThWidth - """ - - def __init__(self, hkls: Optional[np.ndarray], *args, **kwargs) -> None: - """ - Constructor for PlaneData - - Parameters - ---------- - hkls : np.ndarray - Miller indices to be used in the plane data. Can be None if - args is another PlaneData object - - *args - Unnamed arguments. Could be in the format of `lparms, laueGroup, - wavelength, strainMag`, or just a `PlaneData` object. - - **kwargs - Valid keyword arguments include: - - doTThSort - - exclusions - - tThMax - - tThWidth - """ - self._doTThSort = True - self._exclusions = None - self._tThMax = None - - if len(args) == 4: - lparms, laueGroup, wavelength, strainMag = args - tThWidth = None - self._wavelength = processWavelength(wavelength) - self._lparms = self._parseLParms(lparms) - elif len(args) == 1 and isinstance(args[0], PlaneData): - other = args[0] - lparms, laueGroup, wavelength, strainMag, tThWidth = ( - other.getParams() - ) - self._wavelength = wavelength - self._lparms = lparms - self._doTThSort = other._doTThSort - self._exclusions = other._exclusions - self._tThMax = other._tThMax - if hkls is None: - hkls = other._hkls - else: - raise NotImplementedError(f'args : {args}') - - self._laueGroup = laueGroup - self._hkls = copy.deepcopy(hkls) - self._strainMag = strainMag - self._structFact = np.ones(self._hkls.shape[1]) - self.tThWidth = tThWidth - - # ... need to implement tThMin too - if 'doTThSort' in kwargs: - self._doTThSort = kwargs.pop('doTThSort') - if 'exclusions' in kwargs: - self._exclusions = kwargs.pop('exclusions') - if 'tThMax' in kwargs: - self._tThMax = toFloat(kwargs.pop('tThMax'), 'radians') - if 'tThWidth' in kwargs: - self.tThWidth = kwargs.pop('tThWidth') - if len(kwargs) > 0: - raise RuntimeError( - f'have unparsed keyword arguments with keys: {kwargs.keys()}' - ) - - # This is only used to calculate the structure factor if invalidated - self._unitcell: unitcell = None - - self._calc() - - def _calc(self): - symmGroup = ltypeOfLaueGroup(self._laueGroup) - self._q_sym = quatOfLaueGroup(self._laueGroup) - _, latVecOps, hklDataList = PlaneData.makePlaneData( - self._hkls, - self._lparms, - self._q_sym, - symmGroup, - self._strainMag, - self.wavelength, - ) - 'sort by tTheta' - tThs = np.array( - [hklDataList[iHKL]['tTheta'] for iHKL in range(len(hklDataList))] - ) - if self._doTThSort: - # sorted hkl -> _hkl - # _hkl -> sorted hkl - self.tThSort = np.argsort(tThs) - self.tThSortInv = np.empty(len(hklDataList), dtype=int) - self.tThSortInv[self.tThSort] = np.arange(len(hklDataList)) - self.hklDataList = [hklDataList[iHKL] for iHKL in self.tThSort] - else: - self.tThSort = np.arange(len(hklDataList)) - self.tThSortInv = np.arange(len(hklDataList)) - self.hklDataList = hklDataList - self._latVecOps = latVecOps - self.nHKLs = len(self.getHKLs()) - - def __str__(self): - s = '========== plane data ==========\n' - s += 'lattice parameters:\n ' + str(self.lparms) + '\n' - s += f'two theta width: ({str(self.tThWidth)})\n' - s += f'strain magnitude: ({str(self.strainMag)})\n' - s += f'beam energy ({str(self.wavelength)})\n' - s += 'hkls: (%d)\n' % self.nHKLs - s += str(self.getHKLs()) - return s - - def getParams(self): - """ - Getter for the parameters of the plane data. - - Returns - ------- - tuple - The parameters of the plane data. In the order of - _lparams, _laueGroup, _wavelength, _strainMag, tThWidth - - """ - return ( - self._lparms, - self._laueGroup, - self._wavelength, - self._strainMag, - self.tThWidth, - ) - - def getNhklRef(self) -> int: - """ - Get the total number of hkl's in the plane data, not ignoring - ones that are excluded in exclusions. - - Returns - ------- - int - The total number of hkl's in the plane data. - """ - return len(self.hklDataList) - - @property - def hkls(self) -> np.ndarray: - """ - hStacked Hkls of the plane data (Miller indices). - """ - return self.getHKLs().T - - @hkls.setter - def hkls(self, hkls): - raise NotImplementedError('for now, not allowing hkls to be reset') - - @property - def tThMax(self) -> Optional[float]: - """ - Maximum 2-theta value of the plane data. - - float or None - """ - return self._tThMax - - @tThMax.setter - def tThMax(self, t_th_max: Union[float, valunits.valWUnit]) -> None: - self._tThMax = toFloat(t_th_max, 'radians') - - @property - def exclusions(self) -> np.ndarray: - """ - Excluded HKL's the plane data. - - Set as type np.ndarray, as a mask of length getNhklRef(), a list of - indices to be excluded, or a list of ranges of indices. - - Read as a mask of length getNhklRef(). - """ - retval = np.zeros(self.getNhklRef(), dtype=bool) - if self._exclusions is not None: - # report in current hkl ordering - retval[:] = self._exclusions[self.tThSortInv] - if self._tThMax is not None: - for iHKLr, hklData in enumerate(self.hklDataList): - if hklData['tTheta'] > self._tThMax: - retval[iHKLr] = True - return retval - - @exclusions.setter - def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: - excl = np.zeros(len(self.hklDataList), dtype=bool) - if new_exclusions is not None: - exclusions = np.atleast_1d(new_exclusions) - if len(exclusions) == len(self.hklDataList): - assert ( - exclusions.dtype == 'bool' - ), 'Exclusions should be bool if full length' - # convert from current hkl ordering to _hkl ordering - excl[:] = exclusions[self.tThSort] - else: - if len(exclusions.shape) == 1: - # treat exclusions as indices - excl[self.tThSort[exclusions]] = True - elif len(exclusions.shape) == 2: - # treat exclusions as ranges of indices - for r in exclusions: - excl[self.tThSort[r[0] : r[1]]] = True - else: - raise RuntimeError( - f'Unclear behavior for shape {exclusions.shape}' - ) - self._exclusions = excl - self.nHKLs = np.sum(np.logical_not(self._exclusions)) - - def exclude( - self, - dmin: Optional[float] = None, - dmax: Optional[float] = None, - tthmin: Optional[float] = None, - tthmax: Optional[float] = None, - sfacmin: Optional[float] = None, - sfacmax: Optional[float] = None, - pintmin: Optional[float] = None, - pintmax: Optional[float] = None, - ) -> None: - """ - Set exclusions according to various parameters - - Any hkl with a value below any min or above any max will be excluded. So - to be included, an hkl needs to have values between the min and max - for all of the conditions given. - - Note that method resets the tThMax attribute to None. - - PARAMETERS - ---------- - dmin: float > 0 - minimum lattice spacing (angstroms) - dmax: float > 0 - maximum lattice spacing (angstroms) - tthmin: float > 0 - minimum two theta (radians) - tthmax: float > 0 - maximum two theta (radians) - sfacmin: float > 0 - minimum structure factor as a proportion of maximum - sfacmax: float > 0 - maximum structure factor as a proportion of maximum - pintmin: float > 0 - minimum powder intensity as a proportion of maximum - pintmax: float > 0 - maximum powder intensity as a proportion of maximum - """ - excl = np.zeros(self.getNhklRef(), dtype=bool) - self.exclusions = None - self.tThMax = None - - if (dmin is not None) or (dmax is not None): - d = np.array(self.getPlaneSpacings()) - if dmin is not None: - excl[d < dmin] = True - if dmax is not None: - excl[d > dmax] = True - - if (tthmin is not None) or (tthmax is not None): - tth = self.getTTh() - if tthmin is not None: - excl[tth < tthmin] = True - if tthmax is not None: - excl[tth > tthmax] = True - - if (sfacmin is not None) or (sfacmax is not None): - sfac = self.structFact - sfac = sfac / sfac.max() - if sfacmin is not None: - excl[sfac < sfacmin] = True - if sfacmax is not None: - excl[sfac > sfacmax] = True - - if (pintmin is not None) or (pintmax is not None): - pint = self.powder_intensity - pint = pint / pint.max() - if pintmin is not None: - excl[pint < pintmin] = True - if pintmax is not None: - excl[pint > pintmax] = True - - self.exclusions = excl - - def _parseLParms( - self, lparms: List[Union[valunits.valWUnit, float]] - ) -> List[float]: - lparmsDUnit = [] - for lparmThis in lparms: - if isinstance(lparmThis, valunits.valWUnit): - if lparmThis.isLength(): - lparmsDUnit.append(lparmThis.getVal(dUnit)) - elif lparmThis.isAngle(): - # plumbing set up to default to degrees - # for lattice parameters - lparmsDUnit.append(lparmThis.getVal('degrees')) - else: - raise RuntimeError( - f'Do not know what to do with {lparmThis}' - ) - else: - lparmsDUnit.append(lparmThis) - return lparmsDUnit - - @property - def lparms(self) -> List[float]: - """ - Lattice parameters of the plane data. - - Can be set as a List[float | valWUnit], but will be converted to - List[float]. - """ - return self._lparms - - @lparms.setter - def lparms(self, lparms: List[Union[valunits.valWUnit, float]]) -> None: - self._lparms = self._parseLParms(lparms) - self._calc() - - @property - def strainMag(self) -> Optional[float]: - """ - Strain magnitude of the plane data. - - float or None - """ - return self._strainMag - - @strainMag.setter - def strainMag(self, strain_mag: float) -> None: - self._strainMag = strain_mag - self.tThWidth = None - self._calc() - - @property - def wavelength(self) -> float: - """ - Wavelength of the plane data. - - Set as float or valWUnit. - - Read as float - """ - return self._wavelength - - @wavelength.setter - def wavelength(self, wavelength: Union[float, valunits.valWUnit]) -> None: - wavelength = processWavelength(wavelength) - # Do not re-compute if it is almost the same - if np.isclose(self._wavelength, wavelength): - return - - self._wavelength = wavelength - self._calc() - - def invalidate_structure_factor(self, ucell: unitcell) -> None: - """ - It can be expensive to compute the structure factor - This method just invalidates it, providing a unit cell, - so that it can be lazily computed from the unit cell. - - Parameters: - ----------- - unitcell : unitcell - The unit cell to be used to compute the structure factor - """ - self._structFact = None - self._hedm_intensity = None - self._powder_intensity = None - self._unitcell = ucell - - def _compute_sf_if_needed(self): - any_invalid = ( - self._structFact is None - or self._hedm_intensity is None - or self._powder_intensity is None - ) - if any_invalid and self._unitcell is not None: - # Compute the structure factor first. - # This can be expensive to do, so we lazily compute it when needed. - hkls = self.getHKLs(allHKLs=True) - self.structFact = self._unitcell.CalcXRSF(hkls) - - @property - def structFact(self) -> np.ndarray: - """ - Structure factors for each hkl. - - np.ndarray - """ - self._compute_sf_if_needed() - return self._structFact[~self.exclusions] - - @structFact.setter - def structFact(self, structFact: np.ndarray) -> None: - self._structFact = structFact - multiplicity = self.getMultiplicity(allHKLs=True) - tth = self.getTTh(allHKLs=True) - - hedm_intensity = ( - structFact * lorentz_factor(tth) * polarization_factor(tth) - ) - - powderI = hedm_intensity * multiplicity - - # Now scale them - hedm_intensity = 100.0 * hedm_intensity / np.nanmax(hedm_intensity) - powderI = 100.0 * powderI / np.nanmax(powderI) - - self._hedm_intensity = hedm_intensity - self._powder_intensity = powderI - - @property - def powder_intensity(self) -> np.ndarray: - """ - Powder intensity for each hkl. - """ - self._compute_sf_if_needed() - return self._powder_intensity[~self.exclusions] - - @property - def hedm_intensity(self) -> np.ndarray: - """ - HEDM (high energy x-ray diffraction microscopy) intensity for each hkl. - """ - self._compute_sf_if_needed() - return self._hedm_intensity[~self.exclusions] - - @staticmethod - def makePlaneData( - hkls: np.ndarray, - lparms: np.ndarray, - qsym: np.ndarray, - symmGroup, - strainMag, - wavelength, - ) -> Tuple[ - Dict[str, np.ndarray], Dict[str, Union[np.ndarray, float]], List[Dict] - ]: - """ - Generate lattice plane data from inputs. - - Parameters: - ----------- - hkls: np.ndarray - Miller indices, as in crystallography.latticePlanes - lparms: np.ndarray - Lattice parameters, as in crystallography.latticePlanes - qsym: np.ndarray - (4, n) containing quaternions of symmetry - symmGroup: str - Tag for the symmetry (Laue) group of the lattice. Can generate from - ltypeOfLaueGroup - strainMag: float - Swag of strain magnitudes - wavelength: float - Wavelength - - Returns: - ------- - dict: - Dictionary containing lattice plane data - dict: - Dictionary containing lattice vector operators - list: - List of dictionaries, each containing the data for one hkl - """ - - tempSetOutputDegrees(False) - latPlaneData = latticePlanes( - hkls, - lparms, - ltype=symmGroup, - strainMag=strainMag, - wavelength=wavelength, - ) - - latVecOps = latticeVectors(lparms, symmGroup) - - hklDataList = [] - for iHKL in range(len(hkls.T)): - # need transpose because of convention for hkls ordering - - """ - latVec = latPlaneData['normals'][:,iHKL] - # ... if not spots, may be able to work with a subset of these - latPlnNrmlList = applySym( - np.c_[latVec], qsym, csFlag=True, cullPM=False - ) - """ - # returns UN-NORMALIZED lattice plane normals - latPlnNrmls = applySym( - np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), - qsym, - csFlag=True, - cullPM=False, - ) - - # check for +/- in symmetry group - latPlnNrmlsM = applySym( - np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), - qsym, - csFlag=False, - cullPM=False, - ) - - csRefl = latPlnNrmls.shape[1] == latPlnNrmlsM.shape[1] - - # added this so that I retain the actual symmetric - # integer hkls as well - symHKLs = np.array( - np.round(np.dot(latVecOps['F'].T, latPlnNrmls)), dtype='int' - ) - - hklDataList.append( - dict( - hklID=iHKL, - hkl=hkls[:, iHKL], - tTheta=latPlaneData['tThetas'][iHKL], - dSpacings=latPlaneData['dspacings'][iHKL], - tThetaLo=latPlaneData['tThetasLo'][iHKL], - tThetaHi=latPlaneData['tThetasHi'][iHKL], - latPlnNrmls=unitVector(latPlnNrmls), - symHKLs=symHKLs, - centrosym=csRefl, - ) - ) - - revertOutputDegrees() - return latPlaneData, latVecOps, hklDataList - - @property - def laueGroup(self) -> str: - """ - This is the Schoenflies tag, describing symmetry group of the lattice. - Note that setting this with incompatible lattice parameters will - cause an error. If changing both, use set_laue_and_lparms. - - str - """ - return self._laueGroup - - @laueGroup.setter - def laueGroup(self, laueGroup: str) -> None: - self._laueGroup = laueGroup - self._calc() - - def set_laue_and_lparms( - self, laueGroup: str, lparms: List[Union[valunits.valWUnit, float]] - ) -> None: - """ - Set the Laue group and lattice parameters simultaneously - - When the Laue group changes, the lattice parameters may be - incompatible, and cause an error in self._calc(). This function - allows us to update both the Laue group and lattice parameters - simultaneously to avoid this issue. - - Parameters: - ----------- - laueGroup : str - The symmetry (Laue) group to be set - lparms : List[valunits.valWUnit | float] - Lattice parameters to be set - """ - self._laueGroup = laueGroup - self._lparms = self._parseLParms(lparms) - self._calc() - - @property - def q_sym(self) -> np.ndarray: - """ - Quaternions of symmetry for each hkl, generated from the Laue group - - np.ndarray((4, n)) - """ - return self._q_sym # rotations.quatOfLaueGroup(self._laueGroup) - - def getPlaneSpacings(self) -> List[float]: - """ - Plane spacings for each hkl. - - Returns: - ------- - List[float] - List of plane spacings for each hkl - """ - dspacings = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - dspacings.append(hklData['dSpacings']) - return dspacings - - @property - def latVecOps(self) -> Dict[str, Union[np.ndarray, float]]: - """ - gets lattice vector operators as a new (deepcopy) - - Returns: - ------- - Dict[str, np.ndarray | float] - Dictionary containing lattice vector operators - """ - return copy.deepcopy(self._latVecOps) - - def _thisHKL(self, iHKLr: int) -> bool: - hklData = self.hklDataList[iHKLr] - if self._exclusions is not None: - if self._exclusions[self.tThSortInv[iHKLr]]: - return False - if self._tThMax is not None: - if hklData['tTheta'] > self._tThMax or np.isnan(hklData['tTheta']): - return False - return True - - def _getTThRange(self, iHKLr: int) -> Tuple[float, float]: - hklData = self.hklDataList[iHKLr] - if self.tThWidth is not None: # tThHi-tThLo < self.tThWidth - tTh = hklData['tTheta'] - tThHi = tTh + self.tThWidth * 0.5 - tThLo = tTh - self.tThWidth * 0.5 - else: - tThHi = hklData['tThetaHi'] - tThLo = hklData['tThetaLo'] - return (tThLo, tThHi) - - def getTThRanges(self, strainMag: Optional[float] = None) -> np.ndarray: - """ - Get the 2-theta ranges for included hkls - - Parameters: - ----------- - strainMag : Optional[float] - Optional swag of strain magnitude - - Returns: - ------- - np.ndarray: - hstacked array of hstacked tThLo and tThHi for each hkl (n x 2) - """ - tThRanges = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - if strainMag is None: - tThRanges.append(self._getTThRange(iHKLr)) - else: - hklData = self.hklDataList[iHKLr] - d = hklData['dSpacings'] - tThLo = 2.0 * np.arcsin( - self._wavelength / 2.0 / (d * (1.0 + strainMag)) - ) - tThHi = 2.0 * np.arcsin( - self._wavelength / 2.0 / (d * (1.0 - strainMag)) - ) - tThRanges.append((tThLo, tThHi)) - return np.array(tThRanges) - - def getMergedRanges( - self, cullDupl: Optional[bool] = False - ) -> Tuple[List[List[int]], List[List[float]]]: - """ - Return indices and ranges for specified planeData, merging where - there is overlap based on the tThWidth and line positions - - Parameters: - ----------- - cullDupl : (optional) bool - If True, cull duplicate 2-theta values (within sqrt_epsf). Defaults - to False. - - Returns: - -------- - List[List[int]] - List of indices for each merged range - - List[List[float]] - List of merged ranges, (n x 2) - """ - tThs = self.getTTh() - tThRanges = self.getTThRanges() - - # if you end exlcusions in a doublet (or multiple close rings) - # then this will 'fail'. May need to revisit... - nonoverlapNexts = np.hstack( - (tThRanges[:-1, 1] < tThRanges[1:, 0], True) - ) - iHKLLists = [] - mergedRanges = [] - hklsCur = [] - tThLoIdx = 0 - tThHiCur = 0.0 - for iHKL, nonoverlapNext in enumerate(nonoverlapNexts): - tThHi = tThRanges[iHKL, -1] - if not nonoverlapNext: - if cullDupl and abs(tThs[iHKL] - tThs[iHKL + 1]) < sqrt_epsf: - continue - else: - hklsCur.append(iHKL) - tThHiCur = tThHi - else: - hklsCur.append(iHKL) - tThHiCur = tThHi - iHKLLists.append(hklsCur) - mergedRanges.append([tThRanges[tThLoIdx, 0], tThHiCur]) - tThLoIdx = iHKL + 1 - hklsCur = [] - return iHKLLists, mergedRanges - - def getTTh(self, allHKLs: Optional[bool] = False) -> np.ndarray: - """ - Get the 2-theta values for each hkl. - - Parameters: - ----------- - allHKLs : (optional) bool - If True, return all 2-theta values, even if they are excluded in - the current planeData. Default is False. - - Returns: - ------- - np.ndarray - Array of 2-theta values for each hkl - """ - tTh = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not allHKLs and not self._thisHKL(iHKLr): - continue - tTh.append(hklData['tTheta']) - return np.array(tTh) - - def getMultiplicity(self, allHKLs: Optional[bool] = False) -> np.ndarray: - """ - Get the multiplicity for each hkl (number of symHKLs). - - Paramters: - ---------- - allHKLs : (optional) bool - If True, return all multiplicities, even if they are excluded in - the current planeData. Defaults to false. - - Returns - ------- - np.ndarray - Array of multiplicities for each hkl - """ - # ... JVB: is this incorrect? - multip = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if allHKLs or self._thisHKL(iHKLr): - multip.append(hklData['symHKLs'].shape[1]) - return np.array(multip) - - def getHKLID( - self, - hkl: Union[int, Tuple[int, int, int], np.ndarray], - master: Optional[bool] = False, - ) -> Union[List[int], int]: - """ - Return the unique ID of a list of hkls. - - Parameters - ---------- - hkl : int | tuple | list | numpy.ndarray - The input hkl. If an int, or a list of ints, it just passes - through (FIXME). - If a tuple, treated as a single (h, k, l). - If a list of lists/tuples, each is treated as an (h, k, l). - If an numpy.ndarray, it is assumed to have shape (3, N) with the - N (h, k, l) vectors stacked column-wise - - master : bool, optional - If True, return the master hklID, else return the index from the - external (sorted and reduced) list. - - Returns - ------- - hkl_ids : list - The list of requested hklID values associate with the input. - - Notes - ----- - TODO: revisit this weird API??? - - Changes: - ------- - 2020-05-21 (JVB) -- modified to handle all symmetric equavlent reprs. - """ - if hasattr(hkl, '__setitem__'): # tuple does not have __setitem__ - if isinstance(hkl, np.ndarray): - # if is ndarray, assume is 3xN - return [self._getHKLID(x, master=master) for x in hkl.T] - else: - return [self._getHKLID(x, master=master) for x in hkl] - else: - return self._getHKLID(hkl, master=master) - - def _getHKLID( - self, - hkl: Union[int, Tuple[int, int, int], np.ndarray], - master: Optional[bool] = False, - ) -> int: - """ - for hkl that is a tuple, return externally visible hkl index - """ - if isinstance(hkl, int): - return hkl - else: - hklList = self.getSymHKLs() # !!! list, reduced by exclusions - intl_hklIDs = np.asarray([i['hklID'] for i in self.hklDataList]) - intl_hklIDs_sorted = intl_hklIDs[~self.exclusions[self.tThSortInv]] - dHKLInv = {} - for iHKL, symHKLs in enumerate(hklList): - idx = intl_hklIDs_sorted[iHKL] if master else iHKL - for thisHKL in symHKLs.T: - dHKLInv[tuple(thisHKL)] = idx - try: - return dHKLInv[tuple(hkl)] - except KeyError: - raise RuntimeError( - f"hkl '{tuple(hkl)}' is not present in this material!" - ) - - def getHKLs(self, *hkl_ids: int, **kwargs) -> Union[List[str], np.ndarray]: - """ - Returns the powder HKLs subject to specified options. - - Parameters - ---------- - *hkl_ids : int - Optional list of specific master hklIDs. - **kwargs : dict - One or more of the following keyword arguments: - asStr : bool - If True, return a list of strings. The default is False. - thisTTh : scalar | None - If not None, only return hkls overlapping the specified - 2-theta (in radians). The default is None. - allHKLs : bool - If True, then ignore exlcusions. The default is False. - - Raises - ------ - TypeError - If an unknown kwarg is passed. - RuntimeError - If an invalid hklID is passed. - - Returns - ------- - hkls : list | numpy.ndarray - Either a list of hkls as strings (if asStr=True) or a vstacked - array of hkls. - - Notes - ----- - !!! the shape of the return value when asStr=False is the _transpose_ - of the typical return value for self.get_hkls() and self.hkls! - This _may_ change to avoid confusion, but going to leave it for - now so as not to break anything. - - 2022/08/05 JVB: - - Added functionality to handle optional hklID args - - Updated docstring - """ - # kwarg parsing - opts = dict(asStr=False, thisTTh=None, allHKLs=False) - if len(kwargs) > 0: - # check keys - for k, v in kwargs.items(): - if k not in opts: - raise TypeError( - f"getHKLs() got an unexpected keyword argument '{k}'" - ) - opts.update(kwargs) - - hkls = [] - if len(hkl_ids) == 0: - for iHKLr, hklData in enumerate(self.hklDataList): - if not opts['allHKLs']: - if not self._thisHKL(iHKLr): - continue - if opts['thisTTh'] is not None: - tThLo, tThHi = self._getTThRange(iHKLr) - if opts['thisTTh'] < tThHi and opts['thisTTh'] > tThLo: - hkls.append(hklData['hkl']) - else: - hkls.append(hklData['hkl']) - else: - # !!! changing behavior here; if the hkl_id is invalid, raises - # RuntimeError, and if allHKLs=True and the hkl_id is - # excluded, it also raises a RuntimeError - all_hkl_ids = np.asarray([i['hklID'] for i in self.hklDataList]) - sorted_excl = self.exclusions[self.tThSortInv] - idx = np.zeros(len(self.hklDataList), dtype=int) - for i, hkl_id in enumerate(hkl_ids): - # find ordinal index of current hklID - try: - idx[i] = int(np.where(all_hkl_ids == hkl_id)[0]) - except TypeError: - raise RuntimeError( - f"Requested hklID '{hkl_id}'is invalid!" - ) - if sorted_excl[idx[i]] and not opts['allHKLs']: - raise RuntimeError( - f"Requested hklID '{hkl_id}' is excluded!" - ) - hkls.append(self.hklDataList[idx[i]]['hkl']) - - # handle output kwarg - if opts['asStr']: - return list(map(hklToStr, np.array(hkls))) - else: - return np.array(hkls) - - def getSymHKLs( - self, - asStr: Optional[bool] = False, - withID: Optional[bool] = False, - indices: Optional[List[int]] = None, - ) -> Union[List[List[str]], List[np.ndarray]]: - """ - Return all symmetry HKLs. - - Parameters - ---------- - asStr : bool, optional - If True, return the symmetry HKLs as strings. The default is False. - withID : bool, optional - If True, return the symmetry HKLs with the hklID. The default is - False. Does nothing if asStr is True. - indices : list[inr], optional - Optional list of indices of hkls to include. - - Returns - ------- - sym_hkls : list list of strings, or list of numpy.ndarray - List of symmetry HKLs for each HKL, either as strings or as a - vstacked array. - """ - sym_hkls = [] - hkl_index = 0 - if indices is not None: - indB = np.zeros(self.nHKLs, dtype=bool) - indB[np.array(indices)] = True - else: - indB = np.ones(self.nHKLs, dtype=bool) - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - if indB[hkl_index]: - hkls = hklData['symHKLs'] - if asStr: - sym_hkls.append(list(map(hklToStr, np.array(hkls).T))) - elif withID: - sym_hkls.append( - np.vstack( - [ - np.tile(hklData['hklID'], (1, hkls.shape[1])), - hkls, - ] - ) - ) - else: - sym_hkls.append(np.array(hkls)) - hkl_index += 1 - return sym_hkls - - @staticmethod - def makeScatteringVectors( - hkls: np.ndarray, - rMat_c: np.ndarray, - bMat: np.ndarray, - wavelength: float, - chiTilt: Optional[float] = None, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - Static method for calculating g-vectors and scattering vector angles - for specified hkls, subject to the bragg conditions specified by - lattice vectors, orientation matrix, and wavelength - - Parameters - ---------- - hkls : np.ndarray - (3, n) array of hkls. - rMat_c : np.ndarray - (3, 3) rotation matrix from the crystal to the sample frame. - bMat : np.ndarray, optional - (3, 3) COB from reciprocal lattice frame to the crystal frame. - wavelength : float - xray wavelength in Angstroms. - chiTilt : float, optional - 0 <= chiTilt <= 90 degrees, defaults to 0 - - Returns - ------- - gVec_s : np.ndarray - (3, n) array of g-vectors (reciprocal lattice) in the sample frame. - oangs0 : np.ndarray - (3, n) array containing the feasible (2-theta, eta, ome) triplets - for each input hkl (first solution) - oangs1 : np.ndarray - (3, n) array containing the feasible (2-theta, eta, ome) triplets - for each input hkl (second solution) - - FIXME: must do testing on strained bMat - """ - # arg munging - chi = float(chiTilt) if chiTilt is not None else 0.0 - rMat_c = rMat_c.squeeze() - - # these are the reciprocal lattice vectors in the SAMPLE FRAME - # ** NOTE ** - # if strained, assumes that you handed it a bMat calculated from - # strained [a, b, c] in the CRYSTAL FRAME - gVec_s = np.dot(rMat_c, np.dot(bMat, hkls)) - - dim0 = gVec_s.shape[0] - if dim0 != 3: - raise ValueError(f'Number of lattice plane normal dims is {dim0}') - - # call model from transforms now - oangs0, oangs1 = xfcapi.oscill_angles_of_hkls( - hkls.T, chi, rMat_c, bMat, wavelength - ) - - return gVec_s, oangs0.T, oangs1.T - - def _makeScatteringVectors( - self, - rMat: np.ndarray, - bMat: Optional[np.ndarray] = None, - chiTilt: Optional[float] = None, - ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: - """ - modeled after QFromU.m - """ - - if bMat is None: - bMat = self._latVecOps['B'] - - Qs_vec = [] - Qs_ang0 = [] - Qs_ang1 = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - thisQs, thisAng0, thisAng1 = PlaneData.makeScatteringVectors( - hklData['symHKLs'], - rMat, - bMat, - self._wavelength, - chiTilt=chiTilt, - ) - Qs_vec.append(thisQs) - Qs_ang0.append(thisAng0) - Qs_ang1.append(thisAng1) - - return Qs_vec, Qs_ang0, Qs_ang1 - - def calcStructFactor(self, atominfo): - """ - Calculates unit cell structure factors as a function of hkl - USAGE: - FSquared = calcStructFactor(atominfo,hkls,B) - INPUTS: - 1) atominfo (m x 1 float ndarray) the first threee columns of the - matrix contain fractional atom positions [uvw] of atoms in the unit - cell. The last column contains the number of electrons for a given atom - 2) hkls (3 x n float ndarray) is the array of Miller indices for - the planes of interest. The vectors are assumed to be - concatenated along the 1-axis (horizontal) - 3) B (3 x 3 float ndarray) is a matrix of reciprocal lattice basis - vectors,where each column contains a reciprocal lattice basis vector - ({g}=[B]*{hkl}) - OUTPUTS: - 1) FSquared (n x 1 float ndarray) array of structure factors, - one for each hkl passed into the function - """ - r = atominfo[:, 0:3] - elecNum = atominfo[:, 3] - hkls = self.hkls - B = self.latVecOps['B'] - sinThOverLamdaList, ffDataList = LoadFormFactorData() - FSquared = np.zeros(hkls.shape[1]) - - for jj in np.arange(0, hkls.shape[1]): - # ???: probably have other functions for this - # Calculate G for each hkl - # Calculate magnitude of G for each hkl - G = ( - hkls[0, jj] * B[:, 0] - + hkls[1, jj] * B[:, 1] - + hkls[2, jj] * B[:, 2] - ) - magG = np.sqrt(G[0] ** 2 + G[1] ** 2 + G[2] ** 2) - - # Begin calculating form factor - F = 0 - for ii in np.arange(0, r.shape[0]): - ff = RetrieveAtomicFormFactor( - elecNum[ii], magG, sinThOverLamdaList, ffDataList - ) - exparg = complex( - 0.0, - 2.0 - * np.pi - * ( - hkls[0, jj] * r[ii, 0] - + hkls[1, jj] * r[ii, 1] - + hkls[2, jj] * r[ii, 2] - ), - ) - F += ff * np.exp(exparg) - - """ - F = sum_atoms(ff(Q)*e^(2*pi*i(hu+kv+lw))) - """ - FSquared[jj] = np.real(F * np.conj(F)) - - return FSquared - - # OLD DEPRECATED PLANE_DATA STUFF ==================================== - @deprecated(new_func="len(self.hkls.T)", removal_date="2025-08-01") - def getNHKLs(self): - return len(self.getHKLs()) - - @deprecated(new_func="self.exclusions", removal_date="2025-08-01") - def get_exclusions(self): - return self.exclusions - - @deprecated(new_func="self.exclusions=...", removal_date="2025-08-01") - def set_exclusions(self, exclusions): - self.exclusions = exclusions - - @deprecated( - new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", - removal_date="2025-08-01", - ) - def getLatticeType(self): - return ltypeOfLaueGroup(self.laueGroup) - - @deprecated(new_func="self.q_sym", removal_date="2025-08-01") - def getQSym(self): - return self.q_sym - - -@deprecated(removal_date='2025-01-01') -def getFriedelPair(tth0, eta0, *ome0, **kwargs): - """ - Get the diffractometer angular coordinates in degrees for - the Friedel pair of a given reflection (min angular distance). - - AUTHORS: - - J. V. Bernier -- 10 Nov 2009 - - USAGE: - - ome1, eta1 = getFriedelPair(tth0, eta0, *ome0, - display=False, - units='degrees', - convention='hexrd') - - INPUTS: - - 1) tth0 is a list (or ndarray) of 1 or n the bragg angles (2theta) for - the n reflections (tiled to match eta0 if only 1 is given). - - 2) eta0 is a list (or ndarray) of 1 or n azimuthal coordinates for the n - reflections (tiled to match tth0 if only 1 is given). - - 3) ome0 is a list (or ndarray) of 1 or n reference oscillation - angles for the n reflections (denoted omega in [1]). This argument - is optional. - - 4) Keyword arguments may be one of the following: - - Keyword Values|{default} Action - -------------- -------------- -------------- - 'display' True|{False} toggles display to cmd line - 'units' 'radians'|{'degrees'} sets units for input angles - 'convention' 'fable'|{'hexrd'} sets conventions defining - the angles (see below) - 'chiTilt' None the inclination (about Xlab) of - the oscillation axis - - OUTPUTS: - - 1) ome1 contains the oscialltion angle coordinates of the - Friedel pairs associated with the n input reflections, relative to ome0 - (i.e. ome1 = + ome0). Output is in DEGREES! - - 2) eta1 contains the azimuthal coordinates of the Friedel - pairs associated with the n input reflections. Output units are - controlled via the module variable 'outputDegrees' - - NOTES: - - !!!: The ouputs ome1, eta1 are written using the selected convention, but - the units are alway degrees. May change this to work with Nathan's - global... - - !!!: In the 'fable' convention [1], {XYZ} form a RHON basis where X is - downstream, Z is vertical, and eta is CCW with +Z defining eta = 0. - - !!!: In the 'hexrd' convention [2], {XYZ} form a RHON basis where Z is - upstream, Y is vertical, and eta is CCW with +X defining eta = 0. - - REFERENCES: - - [1] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, - ``Tracking: a method for structural characterization of grains in - powders or polycrystals''. J. Appl. Cryst. (2001). 34, 744--750 - - [2] J. V. Bernier, M. P. Miller, J. -S. Park, and U. Lienert, - ``Quantitative Stress Analysis of Recrystallized OFHC Cu Subject - to Deformed In Situ'', J. Eng. Mater. Technol. (2008). 130. - DOI:10.1115/1.2870234 - """ - - dispFlag = False - fableFlag = False - chi = None - c1 = 1.0 - c2 = pi / 180.0 - - eta0 = np.atleast_1d(eta0) - tth0 = np.atleast_1d(tth0) - ome0 = np.atleast_1d(ome0) - - if eta0.ndim != 1: - raise RuntimeError('azimuthal input must be 1-D') - - npts = len(eta0) - - if tth0.ndim != 1: - raise RuntimeError('Bragg angle input must be not 1-D') - else: - if len(tth0) != npts: - if len(tth0) == 1: - tth0 *= np.ones(npts) - elif npts == 1: - npts = len(tth0) - eta0 *= np.ones(npts) - else: - raise RuntimeError( - 'the azimuthal and Bragg angle inputs are inconsistent' - ) - - if len(ome0) == 0: - ome0 = np.zeros(npts) # dummy ome0 - elif len(ome0) == 1 and npts > 1: - ome0 *= np.ones(npts) - else: - if len(ome0) != npts: - raise RuntimeError( - 'your oscialltion angle input is inconsistent; ' - + f'it has length {len(ome0)} while it should be {npts}' - ) - - # keyword args processing - kwarglen = len(kwargs) - if kwarglen > 0: - argkeys = list(kwargs.keys()) - for i in range(kwarglen): - if argkeys[i] == 'display': - dispFlag = kwargs[argkeys[i]] - elif argkeys[i] == 'convention': - if kwargs[argkeys[i]].lower() == 'fable': - fableFlag = True - elif argkeys[i] == 'units': - if kwargs[argkeys[i]] == 'radians': - c1 = 180.0 / pi - c2 = 1.0 - elif argkeys[i] == 'chiTilt': - if kwargs[argkeys[i]] is not None: - chi = kwargs[argkeys[i]] - - # a little talkback... - if dispFlag: - if fableFlag: - print('\nUsing Fable angle convention\n') - else: - print('\nUsing image-based angle convention\n') - - # mapped eta input - # - in DEGREES, thanks to c1 - eta0 = mapAngle(c1 * eta0, [-180, 180], units='degrees') - if fableFlag: - eta0 = 90 - eta0 - - # must put args into RADIANS - # - eta0 is in DEGREES, - # - the others are in whatever was entered, hence c2 - eta0 = d2r * eta0 - tht0 = c2 * tth0 / 2 - if chi is not None: - chi = c2 * chi - else: - chi = 0 - - """ - SYSTEM SOLVE - - - cos(chi)cos(eta)cos(theta)sin(x) - cos(chi)sin(theta)cos(x) \ - = sin(theta) - sin(chi)sin(eta)cos(theta) - - - Identity: a sin x + b cos x = sqrt(a**2 + b**2) sin (x + alpha) - - / - | atan(b/a) for a > 0 - alpha < - | pi + atan(b/a) for a < 0 - \ - - => sin (x + alpha) = c / sqrt(a**2 + b**2) - - must use both branches for sin(x) = n: - x = u (+ 2k*pi) | x = pi - u (+ 2k*pi) - """ - cchi = np.cos(chi) - schi = np.sin(chi) - ceta = np.cos(eta0) - seta = np.sin(eta0) - ctht = np.cos(tht0) - stht = np.sin(tht0) - - nchi = np.c_[0.0, cchi, schi].T - - gHat0_l = -np.vstack([ceta * ctht, seta * ctht, stht]) - - a = cchi * ceta * ctht - b = -cchi * stht - c = stht + schi * seta * ctht - - # form solution - abMag = np.sqrt(a * a + b * b) - assert np.all(abMag > 0), "Beam vector specification is infeasible!" - phaseAng = np.arctan2(b, a) - rhs = c / abMag - rhs[abs(rhs) > 1.0] = np.nan - rhsAng = np.arcsin(rhs) - - # write ome angle output arrays (NaNs persist here) - ome1 = rhsAng - phaseAng - ome2 = np.pi - rhsAng - phaseAng - - ome1 = mapAngle(ome1, [-np.pi, np.pi], units='radians') - ome2 = mapAngle(ome2, [-np.pi, np.pi], units='radians') - - ome_stack = np.vstack([ome1, ome2]) - - min_idx = np.argmin(abs(ome_stack), axis=0) - - ome_min = ome_stack[min_idx, list(range(len(ome1)))] - eta_min = np.nan * np.ones_like(ome_min) - - # mark feasible reflections - goodOnes = ~np.isnan(ome_min) - - numGood = np.sum(goodOnes) - tmp_eta = np.empty(numGood) - tmp_gvec = gHat0_l[:, goodOnes] - for i in range(numGood): - rchi = rotMatOfExpMap(np.tile(ome_min[goodOnes][i], (3, 1)) * nchi) - gHat_l = np.dot(rchi, tmp_gvec[:, i].reshape(3, 1)) - tmp_eta[i] = np.arctan2(gHat_l[1], gHat_l[0]) - eta_min[goodOnes] = tmp_eta - - # everybody back to DEGREES! - # - ome1 is in RADIANS here - # - convert and put into [-180, 180] - ome1 = mapAngle( - mapAngle(r2d * ome_min, [-180, 180], units='degrees') + c1 * ome0, - [-180, 180], - units='degrees', - ) - - # put eta1 in [-180, 180] - eta1 = mapAngle(r2d * eta_min, [-180, 180], units='degrees') - - if not outputDegrees: - ome1 *= d2r - eta1 *= d2r - - return ome1, eta1 - - -def getDparms( - lp: np.ndarray, lpTag: str, radians: Optional[bool] = True -) -> np.ndarray: - """ - Utility routine for getting dparms, that is the lattice parameters - without symmetry -- 'triclinic' - - Parameters - ---------- - lp : np.ndarray - Parsed lattice parameters - lpTag : str - Tag for the symmetry group of the lattice (from Laue group) - radians : bool, optional - Whether or not to use radians for angles, default is True - - Returns - ------- - np.ndarray - The lattice parameters without symmetry. - """ - latVecOps = latticeVectors(lp, tag=lpTag, radians=radians) - return latVecOps['dparms'] - - -def LoadFormFactorData(): - """ - Script to read in a csv file containing information relating the - magnitude of Q (sin(th)/lambda) to atomic form factor - Notes: - Atomic form factor data gathered from the International Tables of - Crystallography: - P. J. Brown, A. G. Fox, E. N. Maslen, M. A. O'Keefec and B. T. M. Willis, - "Chapter 6.1. Intensity of diffracted intensities", International Tables - for Crystallography (2006). Vol. C, ch. 6.1, pp. 554-595 - """ - - dir1 = os.path.split(valunits.__file__) - dataloc = os.path.join(dir1[0], 'data', 'FormFactorVsQ.csv') - - data = np.zeros((62, 99), float) - - # FIXME: marked broken by DP - jj = 0 - with open(dataloc, 'rU') as csvfile: - datareader = csv.reader(csvfile, dialect=csv.excel) - for row in datareader: - ii = 0 - for val in row: - data[jj, ii] = float(val) - ii += 1 - jj += 1 - - sinThOverLamdaList = data[:, 0] - ffDataList = data[:, 1:] - - return sinThOverLamdaList, ffDataList - - -def RetrieveAtomicFormFactor(elecNum, magG, sinThOverLamdaList, ffDataList): - """Interpolates between tabulated data to find the atomic form factor - for an atom with elecNum electrons for a given magnitude of Q - USAGE: - ff = RetrieveAtomicFormFactor(elecNum,magG,sinThOverLamdaList,ffDataList) - INPUTS: - 1) elecNum, (1 x 1 float) number of electrons for atom of interest - 2) magG (1 x 1 float) magnitude of G - 3) sinThOverLamdaList (n x 1 float ndarray) form factor data is tabulated - in terms of sin(theta)/lambda (A^-1). - 3) ffDataList (n x m float ndarray) form factor data is tabulated in terms - of sin(theta)/lambda (A^-1). Each column corresponds to a different - number of electrons - OUTPUTS: - 1) ff (n x 1 float) atomic form factor for atom and hkl of interest - NOTES: - Data should be calculated in terms of G at some point - """ - sinThOverLambda = 0.5 * magG - # lambda=2*d*sin(th) - # lambda=2*sin(th)/G - # 1/2*G=sin(th)/lambda - - ff = np.interp( - sinThOverLambda, sinThOverLamdaList, ffDataList[:, (elecNum - 1)] - ) - - return ff - - -def lorentz_factor(tth: np.ndarray) -> np.ndarray: - """ - 05/26/2022 SS adding lorentz factor computation - to the detector so that it can be compenstated for in the - intensity correction - - Parameters - ---------- - tth: np.ndarray - 2-theta of every pixel in radians - - Returns - ------- - np.ndarray - Lorentz factor for each pixel - """ - - theta = 0.5 * tth - - cth = np.cos(theta) - sth2 = np.sin(theta) ** 2 - - return 1.0 / (4.0 * cth * sth2) - - -def polarization_factor( - tth: np.ndarray, - unpolarized: Optional[bool] = True, - eta: Optional[np.ndarray] = None, - f_hor: Optional[float] = None, - f_vert: Optional[float] = None, -) -> np.ndarray: - """ - 06/14/2021 SS adding lorentz polarization factor computation - to the detector so that it can be compenstated for in the - intensity correction - - 05/26/2022 decoupling lorentz factor from polarization factor - - parameters: tth two theta of every pixel in radians - if unpolarized is True, all subsequent arguments are optional - eta azimuthal angle of every pixel - f_hor fraction of horizontal polarization - (~1 for XFELs) - f_vert fraction of vertical polarization - (~0 for XFELs) - notice f_hor + f_vert = 1 - - FIXME, called without parameters like eta, f_hor, f_vert, but they default - to none in the current implementation, which will throw an error. - """ - - ctth2 = np.cos(tth) ** 2 - - if unpolarized: - return (1 + ctth2) / 2 - - seta2 = np.sin(eta) ** 2 - ceta2 = np.cos(eta) ** 2 - return f_hor * (seta2 + ceta2 * ctth2) + f_vert * (ceta2 + seta2 * ctth2) diff --git a/hexrd/laue/xrdutil/utils.py b/hexrd/laue/xrdutil/utils.py index 1528403ec..f66e9ca2c 100644 --- a/hexrd/laue/xrdutil/utils.py +++ b/hexrd/laue/xrdutil/utils.py @@ -27,25 +27,14 @@ # ============================================================ -from typing import Optional, Union, Any, Generator -from hexrd.laue.material.crystallography import PlaneData -from hexrd.core.distortion.distortionabc import DistortionABC - -import numba import numpy as np -import numba from hexrd.core import constants from hexrd.core import matrixutil as mutil -from hexrd.core import rotations as rot -from hexrd.core import gridutil as gutil -from hexrd.laue.material.crystallography import processWavelength, PlaneData +from hexrd.core.material.crystallography import processWavelength from hexrd.core.transforms import xfcapi -from hexrd.core.valunits import valWUnit - -from hexrd.core import distortion as distortion_pkg from hexrd.core.deprecation import deprecated @@ -70,995 +59,9 @@ nans_1x2 = np.nan * np.ones((1, 2)) -# ============================================================================= -# CLASSES -# ============================================================================= - - -class EtaOmeMaps(object): - """ - find-orientations loads pickled eta-ome data, but CollapseOmeEta is not - pickleable, because it holds a list of ReadGE, each of which holds a - reference to an open file object, which is not pickleable. - """ - - def __init__(self, ome_eta_archive: str): - ome_eta: np.ndarray = np.load(ome_eta_archive, allow_pickle=True) - - planeData_args = ome_eta['planeData_args'] - planeData_hkls = ome_eta['planeData_hkls'] - self.planeData = PlaneData(planeData_hkls, *planeData_args) - self.planeData.exclusions = ome_eta['planeData_excl'] - self.dataStore = ome_eta['dataStore'] - self.iHKLList = ome_eta['iHKLList'] - self.etaEdges = ome_eta['etaEdges'] - self.omeEdges = ome_eta['omeEdges'] - self.etas = ome_eta['etas'] - self.omegas = ome_eta['omegas'] - - def save_eta_ome_maps(self, filename: str) -> None: - """ - eta_ome.dataStore - eta_ome.planeData - eta_ome.iHKLList - eta_ome.etaEdges - eta_ome.omeEdges - eta_ome.etas - eta_ome.omegas - """ - args = np.array(self.planeData.getParams(), dtype=object)[:4] - args[2] = valWUnit('wavelength', 'length', args[2], 'angstrom') - hkls = np.vstack([i['hkl'] for i in self.planeData.hklDataList]).T - save_dict = { - 'dataStore': self.dataStore, - 'etas': self.etas, - 'etaEdges': self.etaEdges, - 'iHKLList': self.iHKLList, - 'omegas': self.omegas, - 'omeEdges': self.omeEdges, - 'planeData_args': args, - 'planeData_hkls': hkls, - 'planeData_excl': self.planeData.exclusions, - } - np.savez_compressed(filename, **save_dict) - - -# ============================================================================= -# FUNCTIONS -# ============================================================================= - - -def _zproject(x: np.ndarray, y: np.ndarray): - return np.cos(x) * np.sin(y) - np.sin(x) * np.cos(y) - - -def zproject_sph_angles( - invecs: np.ndarray, - chi: float = 0.0, - method: str = 'stereographic', - source: str = 'd', - use_mask: bool = False, - invert_z: bool = False, - rmat: Optional[np.ndarray] = None, -) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray]]: - """ - Projects spherical angles to 2-d mapping. - - Parameters - ---------- - invec : array_like - The (n, 3) array of input points, interpreted via the 'source' kwarg. - chi : scalar, optional - The inclination angle of the sample frame. The default is 0.. - method : str, optional - Mapping type spec, either 'stereographic' or 'equal-area'. - The default is 'stereographic'. - source : str, optional - The type specifier of the input vectors, either 'd', 'q', or 'g'. - 'd' signifies unit diffraction vectors as (2theta, eta, omega), - 'q' specifies unit scattering vectors as (2theta, eta, omega), - 'g' specifies unit vectors in the sample frame as (x, y, z). - The default is 'd'. - use_mask : bool, optional - If True, trim points not on the +z hemishpere (polar angles > 90). - The default is False. - invert_z : bool, optional - If True, invert the Z-coordinates of the unit vectors calculated from - the input angles. The default is False. - rmat : numpy.ndarry, shape=(3, 3), optional - Array representing a change of basis (rotation) to appy to the - calculated unit vectors. The default is None. - - Raises - ------ - RuntimeError - If method not in ('stereographic', 'equal-area'). - - Returns - ------- - numpy.ndarray or tuple - If use_mask = False, then the array of n mapped input points with shape - (n, 2). If use_mask = True, then the first element is the ndarray of - mapped points with shape (<=n, 2), and the second is a bool array with - shape (n,) marking the point that fell on the upper hemishpere. - . - - Notes - ----- - CAVEAT: +Z axis projections only!!! - TODO: check mask application. - """ - assert isinstance(source, str), "source kwarg must be a string" - - invecs = np.atleast_2d(invecs) - if source.lower() == 'd': - spts_s = xfcapi.angles_to_dvec(invecs, chi=chi) - elif source.lower() == 'q': - spts_s = xfcapi.angles_to_gvec(invecs, chi=chi) - elif source.lower() == 'g': - spts_s = invecs - - if rmat is not None: - spts_s = np.dot(spts_s, rmat.T) - - if invert_z: - spts_s[:, 2] = -spts_s[:, 2] - - # filter based on hemisphere - if use_mask: - pzi = spts_s[:, 2] <= 0 - spts_s = spts_s[pzi, :] - - if method.lower() == 'stereographic': - ppts = np.vstack( - [ - spts_s[:, 0] / (1.0 - spts_s[:, 2]), - spts_s[:, 1] / (1.0 - spts_s[:, 2]), - ] - ).T - elif method.lower() == 'equal-area': - chords = spts_s + np.tile([0, 0, 1], (len(spts_s), 1)) - scl = np.tile(mutil.rowNorm(chords), (2, 1)).T - ucrd = mutil.unitVector( - np.hstack([chords[:, :2], np.zeros((len(spts_s), 1))]).T - ) - - ppts = ucrd[:2, :].T * scl - else: - raise RuntimeError(f"method '{method}' not recognized") - - if use_mask: - return ppts, pzi - else: - return ppts - - -def make_polar_net( - ndiv: int = 24, projection: str = 'stereographic', max_angle: float = 120.0 -) -> np.ndarray: - """ - TODO: options for generating net boundaries; fixed to Z proj. - """ - ndiv_tth = int(np.floor(0.5 * ndiv)) + 1 - wtths = np.radians( - np.linspace(0, 1, num=ndiv_tth, endpoint=True) * max_angle - ) - wetas = np.radians(np.linspace(-1, 1, num=ndiv + 1, endpoint=True) * 180.0) - weta_gen = np.radians(np.linspace(-1, 1, num=181, endpoint=True) * 180.0) - pts = [] - for eta in wetas: - net_ang = np.vstack( - [[wtths[0], wtths[-1]], np.tile(eta, 2), np.zeros(2)] - ).T - pts.append(zproject_sph_angles(net_ang, method=projection, source='d')) - pts.append(np.nan * np.ones((1, 2))) - for tth in wtths[1:]: - net_ang = np.vstack( - [tth * np.ones_like(weta_gen), weta_gen, np.zeros_like(weta_gen)] - ).T - pts.append(zproject_sph_angles(net_ang, method=projection, source='d')) - pts.append(nans_1x2) - - return np.vstack(pts) - - validateAngleRanges = xfcapi.validate_angle_ranges - -@deprecated(removal_date='2025-01-01') -def simulateOmeEtaMaps( - omeEdges, - etaEdges, - planeData, - expMaps, - chi=0.0, - etaTol=None, - omeTol=None, - etaRanges=None, - omeRanges=None, - bVec=constants.beam_vec, - eVec=constants.eta_vec, - vInv=constants.identity_6x1, -): - """ - Simulate spherical maps. - - Parameters - ---------- - omeEdges : TYPE - DESCRIPTION. - etaEdges : TYPE - DESCRIPTION. - planeData : TYPE - DESCRIPTION. - expMaps : (3, n) ndarray - DESCRIPTION. - chi : TYPE, optional - DESCRIPTION. The default is 0.. - etaTol : TYPE, optional - DESCRIPTION. The default is None. - omeTol : TYPE, optional - DESCRIPTION. The default is None. - etaRanges : TYPE, optional - DESCRIPTION. The default is None. - omeRanges : TYPE, optional - DESCRIPTION. The default is None. - bVec : TYPE, optional - DESCRIPTION. The default is [0, 0, -1]. - eVec : TYPE, optional - DESCRIPTION. The default is [1, 0, 0]. - vInv : TYPE, optional - DESCRIPTION. The default is [1, 1, 1, 0, 0, 0]. - - Returns - ------- - eta_ome : TYPE - DESCRIPTION. - - Notes - ----- - all angular info is entered in degrees - - ??? might want to creat module-level angluar unit flag - ??? might want to allow resvers delta omega - - """ - # convert to radians - etaEdges = np.radians(np.sort(etaEdges)) - omeEdges = np.radians(np.sort(omeEdges)) - - omeIndices = list(range(len(omeEdges))) - etaIndices = list(range(len(etaEdges))) - - i_max = omeIndices[-1] - j_max = etaIndices[-1] - - etaMin = etaEdges[0] - etaMax = etaEdges[-1] - omeMin = omeEdges[0] - omeMax = omeEdges[-1] - if omeRanges is None: - omeRanges = [ - [omeMin, omeMax], - ] - - if etaRanges is None: - etaRanges = [ - [etaMin, etaMax], - ] - - # signed deltas IN RADIANS - del_ome = omeEdges[1] - omeEdges[0] - del_eta = etaEdges[1] - etaEdges[0] - - delOmeSign = np.sign(del_eta) - - # tolerances are in degrees (easier) - if omeTol is None: - omeTol = abs(del_ome) - else: - omeTol = np.radians(omeTol) - if etaTol is None: - etaTol = abs(del_eta) - else: - etaTol = np.radians(etaTol) - - # pixel dialtions - dpix_ome = round(omeTol / abs(del_ome)) - dpix_eta = round(etaTol / abs(del_eta)) - - i_dil, j_dil = np.meshgrid( - np.arange(-dpix_ome, dpix_ome + 1), np.arange(-dpix_eta, dpix_eta + 1) - ) - - # get symmetrically expanded hkls from planeData - sym_hkls = planeData.getSymHKLs() - nhkls = len(sym_hkls) - - # make things C-contiguous for use in xfcapi functions - expMaps = np.array(expMaps.T, order='C') - nOrs = len(expMaps) - - bMat = np.array(planeData.latVecOps['B'], order='C') - wlen = planeData.wavelength - - bVec = np.array(bVec.flatten(), order='C') - eVec = np.array(eVec.flatten(), order='C') - vInv = np.array(vInv.flatten(), order='C') - - eta_ome = np.zeros((nhkls, max(omeIndices), max(etaIndices)), order='C') - for iHKL in range(nhkls): - these_hkls = np.ascontiguousarray(sym_hkls[iHKL].T, dtype=float) - for iOr in range(nOrs): - rMat_c = xfcapi.make_rmat_of_expmap(expMaps[iOr, :]) - angList = np.vstack( - xfcapi.oscill_angles_of_hkls( - these_hkls, - chi, - rMat_c, - bMat, - wlen, - beam_vec=bVec, - eta_vec=eVec, - v_inv=vInv, - ) - ) - if not np.all(np.isnan(angList)): - # - angList[:, 1] = rot.mapAngle( - angList[:, 1], [etaEdges[0], etaEdges[0] + 2 * np.pi] - ) - angList[:, 2] = rot.mapAngle( - angList[:, 2], [omeEdges[0], omeEdges[0] + 2 * np.pi] - ) - # - # do eta ranges - angMask_eta = np.zeros(len(angList), dtype=bool) - for etas in etaRanges: - angMask_eta = np.logical_or( - angMask_eta, - xfcapi.validate_angle_ranges( - angList[:, 1], etas[0], etas[1] - ), - ) - - # do omega ranges - ccw = True - angMask_ome = np.zeros(len(angList), dtype=bool) - for omes in omeRanges: - if omes[1] - omes[0] < 0: - ccw = False - angMask_ome = np.logical_or( - angMask_ome, - xfcapi.validate_angle_ranges( - angList[:, 2], omes[0], omes[1], ccw=ccw - ), - ) - - # mask angles list, hkls - angMask = np.logical_and(angMask_eta, angMask_ome) - - culledTTh = angList[angMask, 0] - culledEta = angList[angMask, 1] - culledOme = angList[angMask, 2] - - for iTTh in range(len(culledTTh)): - culledEtaIdx = np.where(etaEdges - culledEta[iTTh] > 0)[0] - if len(culledEtaIdx) > 0: - culledEtaIdx = culledEtaIdx[0] - 1 - if culledEtaIdx < 0: - culledEtaIdx = None - else: - culledEtaIdx = None - culledOmeIdx = np.where(omeEdges - culledOme[iTTh] > 0)[0] - if len(culledOmeIdx) > 0: - if delOmeSign > 0: - culledOmeIdx = culledOmeIdx[0] - 1 - else: - culledOmeIdx = culledOmeIdx[-1] - if culledOmeIdx < 0: - culledOmeIdx = None - else: - culledOmeIdx = None - - if culledEtaIdx is not None and culledOmeIdx is not None: - if dpix_ome > 0 or dpix_eta > 0: - i_sup = omeIndices[culledOmeIdx] + np.array( - [i_dil.flatten()], dtype=int - ) - j_sup = etaIndices[culledEtaIdx] + np.array( - [j_dil.flatten()], dtype=int - ) - - # catch shit that falls off detector... - # maybe make this fancy enough to wrap at 2pi? - idx_mask = np.logical_and( - np.logical_and(i_sup >= 0, i_sup < i_max), - np.logical_and(j_sup >= 0, j_sup < j_max), - ) - eta_ome[iHKL, i_sup[idx_mask], j_sup[idx_mask]] = ( - 1.0 - ) - else: - eta_ome[ - iHKL, - omeIndices[culledOmeIdx], - etaIndices[culledEtaIdx], - ] = 1.0 - return eta_ome - - -def _fetch_hkls_from_planedata(pd: PlaneData): - return np.hstack(pd.getSymHKLs(withID=True)).T - - -def _filter_hkls_eta_ome( - hkls: np.ndarray, - angles: np.ndarray, - eta_range: list[tuple[float]], - ome_range: list[tuple[float]], - return_mask: bool = False, -) -> Union[ - tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray, np.ndarray] -]: - """ - given a set of hkls and angles, filter them by the - eta and omega ranges - """ - angMask_eta = np.zeros(len(angles), dtype=bool) - for etas in eta_range: - angMask_eta = np.logical_or( - angMask_eta, - xfcapi.validate_angle_ranges(angles[:, 1], etas[0], etas[1]), - ) - - ccw = True - angMask_ome = np.zeros(len(angles), dtype=bool) - for omes in ome_range: - if omes[1] - omes[0] < 0: - ccw = False - angMask_ome = np.logical_or( - angMask_ome, - xfcapi.validate_angle_ranges( - angles[:, 2], omes[0], omes[1], ccw=ccw - ), - ) - - angMask = np.logical_and(angMask_eta, angMask_ome) - - allAngs = angles[angMask, :] - allHKLs = np.vstack([hkls, hkls])[angMask, :] - - if return_mask: - return allAngs, allHKLs, angMask - else: - return allAngs, allHKLs - - -def _project_on_detector_plane( - allAngs: np.ndarray, - rMat_d: np.ndarray, - rMat_c: np.ndarray, - chi: float, - tVec_d: np.ndarray, - tVec_c: np.ndarray, - tVec_s: np.ndarray, - distortion: DistortionABC, - beamVec: np.ndarray = constants.beam_vec, -) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - utility routine for projecting a list of (tth, eta, ome) onto the - detector plane parameterized by the args - """ - gVec_cs = xfcapi.angles_to_gvec( - allAngs, chi=chi, rmat_c=rMat_c, beam_vec=beamVec - ) - - rMat_ss = xfcapi.make_sample_rmat(chi, allAngs[:, 2]) - - tmp_xys = xfcapi.gvec_to_xy( - gVec_cs, - rMat_d, - rMat_ss, - rMat_c, - tVec_d, - tVec_s, - tVec_c, - beam_vec=beamVec, - ) - - valid_mask = ~(np.isnan(tmp_xys[:, 0]) | np.isnan(tmp_xys[:, 1])) - - det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) - - # apply distortion if specified - if distortion is not None: - det_xy = distortion.apply_inverse(det_xy) - - return det_xy, rMat_ss, valid_mask - - -def _project_on_detector_cylinder( - allAngs: np.ndarray, - chi: float, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - physical_size: np.ndarray, - angle_extent: float, - distortion: DistortionABC = None, - beamVec: np.ndarray = constants.beam_vec, - etaVec: np.ndarray = constants.eta_vec, - tVec_s: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, - tVec_c: np.ndarray = constants.zeros_3x1, -) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - utility routine for projecting a list of (tth, eta, ome) onto the - detector plane parameterized by the args. this function does the - computation for a cylindrical detector - """ - dVec_cs = xfcapi.angles_to_dvec( - allAngs, chi=chi, rmat_c=np.eye(3), beam_vec=beamVec, eta_vec=etaVec - ) - - rMat_ss = np.tile(rmat_s, [allAngs.shape[0], 1, 1]) - - tmp_xys, valid_mask = _dvecToDetectorXYcylinder( - dVec_cs, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=tVec_s, - rmat_s=rmat_s, - tVec_c=tVec_c, - ) - - det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) - - # apply distortion if specified - if distortion is not None: - det_xy = distortion.apply_inverse(det_xy) - - return det_xy, rMat_ss, valid_mask - - -def _dvecToDetectorXYcylinder( - dVec_cs: np.ndarray, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - physical_size: np.ndarray, - angle_extent: float, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -) -> tuple[np.ndarray, np.ndarray]: - - cvec = _unitvec_to_cylinder( - dVec_cs, - caxis, - paxis, - radius, - tVec_d, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - cvec_det, valid_mask = _clip_to_cylindrical_detector( - cvec, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - xy_det = _dewarp_from_cylinder( - cvec_det, - tVec_d, - caxis, - paxis, - radius, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - return xy_det, valid_mask - - -def _unitvec_to_cylinder( - uvw: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - tvec: np.ndarray, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -) -> np.ndarray: - """ - get point where unitvector uvw - intersect the cylindrical detector. - this will give points which are - outside the actual panel. the points - will be clipped to the panel later - - Parameters - ---------- - uvw : numpy.ndarray - unit vectors stacked row wise (nx3) shape - - Returns - ------- - numpy.ndarray - (x,y,z) vectors point which intersect with - the cylinder with (nx3) shape - """ - naxis = np.cross(caxis, paxis) - naxis = naxis / np.linalg.norm(naxis) - - tvec_c_l = np.dot(rmat_s, tVec_c) - - delta = tvec - (radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l)) - num = uvw.shape[0] - cx = np.atleast_2d(caxis).T - - delta_t = np.tile(delta, [num, 1]) - - t1 = np.dot(uvw, delta.T) - t2 = np.squeeze(np.dot(uvw, cx)) - t3 = np.squeeze(np.dot(delta, cx)) - t4 = np.dot(uvw, cx) - - A = np.squeeze(1 - t4**2) - B = t1 - t2 * t3 - C = radius**2 - np.linalg.norm(delta) ** 2 + t3**2 - - mask = np.abs(A) < 1e-10 - beta = np.zeros( - [ - num, - ] - ) - - beta[~mask] = (B[~mask] + np.sqrt(B[~mask] ** 2 + A[~mask] * C)) / A[~mask] - - beta[mask] = np.nan - return np.tile(beta, [3, 1]).T * uvw - - -def _clip_to_cylindrical_detector( - uvw: np.ndarray, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - physical_size: np.ndarray, - angle_extent: float, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -) -> tuple[np.ndarray, np.ndarray]: - """ - takes in the intersection points uvw - with the cylindrical detector and - prunes out points which don't actually - hit the actual panel - - Parameters - ---------- - uvw : numpy.ndarray - unit vectors stacked row wise (nx3) shape - - Returns - ------- - numpy.ndarray - (x,y,z) vectors point which fall on panel - with (mx3) shape - """ - # first get rid of points which are above - # or below the detector - naxis = np.cross(caxis, paxis) - num = uvw.shape[0] - - cx = np.atleast_2d(caxis).T - nx = np.atleast_2d(naxis).T - - tvec_c_l = np.dot(rmat_s, tVec_c) - - delta = tVec_d - ( - radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) - ) - - delta_t = np.tile(delta, [num, 1]) - - uvwp = uvw - delta_t - dp = np.dot(uvwp, cx) - - uvwpxy = uvwp - np.tile(dp, [1, 3]) * np.tile(cx, [1, num]).T - - size = physical_size - tvec = np.atleast_2d(tVec_d).T - - # ycomp = uvwp - np.tile(tVec_d,[num, 1]) - mask1 = np.squeeze(np.abs(dp) > size[0] * 0.5) - uvwp[mask1, :] = np.nan - - # next get rid of points that fall outside - # the polar angle range - - ang = np.dot(uvwpxy, nx) / radius - ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) - - ang = np.arccos(ang) - mask2 = np.squeeze(ang >= angle_extent) - mask = np.logical_or(mask1, mask2) - res = uvw.copy() - res[mask, :] = np.nan - - return res, ~mask - - -def _dewarp_from_cylinder( - uvw: np.ndarray, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -): - """ - routine to convert cylindrical coordinates - to cartesian coordinates in image frame - """ - naxis = np.cross(caxis, paxis) - naxis = naxis / np.linalg.norm(naxis) - - cx = np.atleast_2d(caxis).T - px = np.atleast_2d(paxis).T - nx = np.atleast_2d(naxis).T - num = uvw.shape[0] - - tvec_c_l = np.dot(rmat_s, tVec_c) - - delta = tVec_d - ( - radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) - ) - - delta_t = np.tile(delta, [num, 1]) - - uvwp = uvw - delta_t - - uvwpxy = uvwp - np.tile(np.dot(uvwp, cx), [1, 3]) * np.tile(cx, [1, num]).T - - sgn = np.sign(np.dot(uvwpxy, px)) - sgn[sgn == 0.0] = 1.0 - ang = np.dot(uvwpxy, nx) / radius - ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) - ang = np.arccos(ang) - xcrd = np.squeeze(radius * ang * sgn) - ycrd = np.squeeze(np.dot(uvwp, cx)) - return np.vstack((xcrd, ycrd)).T - - -def _warp_to_cylinder( - cart: np.ndarray, - tVec_d: np.ndarray, - radius: float, - caxis: np.ndarray, - paxis: np.ndarray, - tVec_s: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, - tVec_c: np.ndarray = constants.zeros_3x1, - normalize: bool = True, -) -> np.ndarray: - """ - routine to convert cartesian coordinates - in image frame to cylindrical coordinates - """ - tvec = np.atleast_2d(tVec_d).T - if tVec_s.ndim == 1: - tVec_s = np.atleast_2d(tVec_s).T - if tVec_c.ndim == 1: - tVec_c = np.atleast_2d(tVec_c).T - num = cart.shape[0] - naxis = np.cross(paxis, caxis) - x = cart[:, 0] - y = cart[:, 1] - th = x / radius - xp = radius * np.sin(th) - xn = radius * (1 - np.cos(th)) - - ccomp = np.tile(y, [3, 1]).T * np.tile(caxis, [num, 1]) - pcomp = np.tile(xp, [3, 1]).T * np.tile(paxis, [num, 1]) - ncomp = np.tile(xn, [3, 1]).T * np.tile(naxis, [num, 1]) - cart3d = pcomp + ccomp + ncomp - - tVec_c_l = np.dot(rmat_s, tVec_c) - - res = cart3d + np.tile(tvec - tVec_s - tVec_c_l, [1, num]).T - - if normalize: - return res / np.tile(np.linalg.norm(res, axis=1), [3, 1]).T - else: - return res - - -def _dvec_to_angs( - dvecs: np.ndarray, bvec: np.ndarray, evec: np.ndarray -) -> tuple[np.ndarray, np.ndarray]: - """ - convert diffraction vectors to (tth, eta) - angles in the 'eta' frame - dvecs is assumed to have (nx3) shape - """ - num = dvecs.shape[0] - exb = np.cross(evec, bvec) - exb = exb / np.linalg.norm(exb) - bxexb = np.cross(bvec, exb) - bxexb = bxexb / np.linalg.norm(bxexb) - - dp = np.dot(bvec, dvecs.T) - dp[np.abs(dp) > 1.0] = np.sign(dp[np.abs(dp) > 1.0]) - tth = np.arccos(dp) - - dvecs_p = dvecs - np.tile(dp, [3, 1]).T * np.tile(bvec, [num, 1]) - - dpx = np.dot(bxexb, dvecs_p.T) - dpy = np.dot(exb, dvecs_p.T) - eta = np.arctan2(dpy, dpx) - - return tth, eta - - -def simulateGVecs( - pd: PlaneData, - detector_params: np.ndarray, - grain_params: np.ndarray, - ome_range: list[tuple[float]] = [ - (-np.pi, np.pi), - ], - ome_period: tuple[float] = (-np.pi, np.pi), - eta_range: list[tuple[float]] = [ - (-np.pi, np.pi), - ], - panel_dims: list[tuple[float]] = [(-204.8, -204.8), (204.8, 204.8)], - pixel_pitch: tuple[float] = (0.2, 0.2), - distortion: DistortionABC = None, - beam_vector: np.ndarray = constants.beam_vec, -) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: - """ - returns valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps - - panel_dims are [(xmin, ymin), (xmax, ymax)] in mm - - pixel_pitch is [row_size, column_size] in mm - - simulate the monochormatic scattering for a specified - - - space group - - wavelength - - orientation - - strain - - position - - detector parameters - - oscillation axis tilt (chi) - - subject to - - - omega (oscillation) ranges (list of (min, max) tuples) - - eta (azimuth) ranges - - pd................a hexrd.crystallography.PlaneData instance - detector_params...a (10,) ndarray containing the tilt angles (3), - translation (3), chi (1), and sample frame translation - (3) parameters - grain_params......a (12,) ndarray containing the exponential map (3), - translation (3), and inverse stretch tensor compnents - in Mandel-Voigt notation (6). - - * currently only one panel is supported, but this will likely change soon - """ - bMat = pd.latVecOps['B'] - wlen = pd.wavelength - full_hkls = _fetch_hkls_from_planedata(pd) - - # extract variables for convenience - rMat_d = xfcapi.make_detector_rmat(detector_params[:3]) - tVec_d = np.ascontiguousarray(detector_params[3:6]) - chi = detector_params[6] - tVec_s = np.ascontiguousarray(detector_params[7:10]) - rMat_c = xfcapi.make_rmat_of_expmap(grain_params[:3]) - tVec_c = np.ascontiguousarray(grain_params[3:6]) - vInv_s = np.ascontiguousarray(grain_params[6:12]) - beam_vector = np.ascontiguousarray(beam_vector) - - # first find valid G-vectors - angList = np.vstack( - xfcapi.oscill_angles_of_hkls( - full_hkls[:, 1:], - chi, - rMat_c, - bMat, - wlen, - v_inv=vInv_s, - beam_vec=beam_vector, - ) - ) - allAngs, allHKLs = _filter_hkls_eta_ome( - full_hkls, angList, eta_range, ome_range - ) - - if len(allAngs) == 0: - valid_ids = [] - valid_hkl = [] - valid_ang = [] - valid_xy = [] - ang_ps = [] - else: - # ??? preallocate for speed? - det_xy, rMat_ss, _ = _project_on_detector_plane( - allAngs, - rMat_d, - rMat_c, - chi, - tVec_d, - tVec_c, - tVec_s, - distortion, - beamVec=beam_vector, - ) - - on_panel = np.logical_and( - np.logical_and( - det_xy[:, 0] >= panel_dims[0][0], - det_xy[:, 0] <= panel_dims[1][0], - ), - np.logical_and( - det_xy[:, 1] >= panel_dims[0][1], - det_xy[:, 1] <= panel_dims[1][1], - ), - ) - - op_idx = np.where(on_panel)[0] - - valid_ang = allAngs[op_idx, :] - valid_ang[:, 2] = xfcapi.mapAngle(valid_ang[:, 2], ome_period) - valid_ids = allHKLs[op_idx, 0] - valid_hkl = allHKLs[op_idx, 1:] - valid_xy = det_xy[op_idx, :] - ang_ps = angularPixelSize( - valid_xy, - pixel_pitch, - rMat_d, - # Provide only the first sample rotation matrix to angularPixelSize - # Perhaps this is something that can be improved in the future? - rMat_ss[0], - tVec_d, - tVec_s, - tVec_c, - distortion=distortion, - beamVec=beam_vector, - ) - - return valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps - - -@deprecated(new_func=simlp, removal_date='2025-01-01') +@deprecated(new_func=simlp, removal_date='2026-01-01') def simulateLauePattern( hkls, bMat, @@ -1193,335 +196,3 @@ def simulateLauePattern( dspacing[iG, keepers] = dsp[keepers] energy[iG, keepers] = processWavelength(wlen[keepers]) return xy_det, hkls_in, angles, dspacing, energy - - -@numba.njit(nogil=True, cache=True) -def _expand_pixels( - original: np.ndarray, w: float, h: float, result: np.ndarray -) -> np.ndarray: - hw = 0.5 * w - hh = 0.5 * h - for el in range(len(original)): - x, y = original[el, 0], original[el, 1] - result[el * 4 + 0, 0] = x - hw - result[el * 4 + 0, 1] = y - hh - result[el * 4 + 1, 0] = x + hw - result[el * 4 + 1, 1] = y - hh - result[el * 4 + 2, 0] = x + hw - result[el * 4 + 2, 1] = y + hh - result[el * 4 + 3, 0] = x - hw - result[el * 4 + 3, 1] = y + hh - - return result - - -@numba.njit(nogil=True, cache=True) -def _compute_max( - tth: np.ndarray, eta: np.ndarray, result: np.ndarray -) -> np.ndarray: - period = 2.0 * np.pi - hperiod = np.pi - for el in range(0, len(tth), 4): - max_tth = np.abs(tth[el + 0] - tth[el + 3]) - eta_diff = eta[el + 0] - eta[el + 3] - max_eta = np.abs(np.remainder(eta_diff + hperiod, period) - hperiod) - for i in range(3): - curr_tth = np.abs(tth[el + i] - tth[el + i + 1]) - eta_diff = eta[el + i] - eta[el + i + 1] - curr_eta = np.abs( - np.remainder(eta_diff + hperiod, period) - hperiod - ) - max_tth = np.maximum(curr_tth, max_tth) - max_eta = np.maximum(curr_eta, max_eta) - result[el // 4, 0] = max_tth - result[el // 4, 1] = max_eta - - return result - - -def angularPixelSize( - xy_det: np.ndarray, - xy_pixelPitch: tuple[float], - rMat_d: np.ndarray, - rMat_s: np.ndarray, - tVec_d: np.ndarray, - tVec_s: np.ndarray, - tVec_c: np.ndarray, - distortion: DistortionABC = None, - beamVec: np.ndarray = None, - etaVec: np.ndarray = None, -) -> np.ndarray: - """ - Calculate angular pixel sizes on a detector. - - * choices to beam vector and eta vector specs have been supressed - * assumes xy_det in UNWARPED configuration - """ - xy_det = np.atleast_2d(xy_det) - if distortion is not None: # !!! check this logic - xy_det = distortion.apply(xy_det) - if beamVec is None: - beamVec = constants.beam_vec - if etaVec is None: - etaVec = constants.eta_vec - - # Verify that rMat_s is only 2D (a single matrix). - # Arrays of matrices were previously provided, which `xy_to_gvec` - # cannot currently handle. - if rMat_s.ndim != 2: - msg = ( - f'rMat_s should have 2 dimensions, but has {rMat_s.ndim} ' - 'dimensions instead' - ) - raise ValueError(msg) - - xy_expanded = np.empty((len(xy_det) * 4, 2), dtype=xy_det.dtype) - xy_expanded = _expand_pixels( - xy_det, xy_pixelPitch[0], xy_pixelPitch[1], xy_expanded - ) - - rmat_b = xfcapi.make_beam_rmat(beamVec, etaVec) - - gvec_space, _ = xfcapi.xy_to_gvec( - xy_expanded, - rMat_d, - rMat_s, - tVec_d, - tVec_s, - tVec_c, - rmat_b=rmat_b, - ) - result = np.empty_like(xy_det) - return _compute_max(gvec_space[0], gvec_space[1], result) - - -def make_reflection_patches( - instr_cfg: dict[str, Any], - tth_eta: np.ndarray, - ang_pixel_size: np.ndarray, - omega: Optional[np.ndarray] = None, - tth_tol: float = 0.2, - eta_tol: float = 1.0, - rmat_c: np.ndarray = np.eye(3), - tvec_c: np.ndarray = np.zeros((3, 1)), - npdiv: int = 1, - quiet: bool = False, # TODO: Remove this parameter - it isn't used - compute_areas_func: np.ndarray = gutil.compute_areas, -) -> Generator[ - tuple[ - np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray - ], - None, - None, -]: - """Make angular patches on a detector. - - panel_dims are [(xmin, ymin), (xmax, ymax)] in mm - - pixel_pitch is [row_size, column_size] in mm - - FIXME: DISTORTION HANDING IS STILL A KLUDGE!!! - - patches are: - - delta tth - d ------------- ... ------------- - e | x | x | x | ... | x | x | x | - l ------------- ... ------------- - t . - a . - . - e ------------- ... ------------- - t | x | x | x | ... | x | x | x | - a ------------- ... ------------- - - outputs are: - (tth_vtx, eta_vtx), - (x_vtx, y_vtx), - connectivity, - subpixel_areas, - (x_center, y_center), - (i_row, j_col) - """ - - # detector quantities - rmat_d = xfcapi.make_rmat_of_expmap( - np.r_[instr_cfg['detector']['transform']['tilt']] - ) - tvec_d = np.r_[instr_cfg['detector']['transform']['translation']] - pixel_size = instr_cfg['detector']['pixels']['size'] - - frame_nrows = instr_cfg['detector']['pixels']['rows'] - frame_ncols = instr_cfg['detector']['pixels']['columns'] - - panel_dims = ( - -0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]], - 0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]], - ) - row_edges = ( - np.arange(frame_nrows + 1)[::-1] * pixel_size[1] + panel_dims[0][1] - ) - col_edges = np.arange(frame_ncols + 1) * pixel_size[0] + panel_dims[0][0] - - # handle distortion - distortion = None - if distortion_key in instr_cfg['detector']: - distortion_cfg = instr_cfg['detector'][distortion_key] - if distortion_cfg is not None: - try: - func_name = distortion_cfg['function_name'] - dparams = distortion_cfg['parameters'] - distortion = distortion_pkg.get_mapping(func_name, dparams) - except KeyError: - raise RuntimeError("problem with distortion specification") - - # sample frame - chi = instr_cfg['oscillation_stage']['chi'] - tvec_s = np.r_[instr_cfg['oscillation_stage']['translation']] - bvec = np.r_[instr_cfg['beam']['vector']] - - # data to loop - # ??? WOULD IT BE CHEAPER TO CARRY ZEROS OR USE CONDITIONAL? - if omega is None: - full_angs = np.hstack([tth_eta, np.zeros((len(tth_eta), 1))]) - else: - full_angs = np.hstack([tth_eta, omega.reshape(len(tth_eta), 1)]) - - for angs, pix in zip(full_angs, ang_pixel_size): - # calculate bin edges for patch based on local angular pixel size - # tth - ntths, tth_edges = gutil.make_tolerance_grid( - bin_width=np.degrees(pix[0]), - window_width=tth_tol, - num_subdivisions=npdiv, - ) - - # eta - netas, eta_edges = gutil.make_tolerance_grid( - bin_width=np.degrees(pix[1]), - window_width=eta_tol, - num_subdivisions=npdiv, - ) - - # FOR ANGULAR MESH - conn = gutil.cellConnectivity(netas, ntths, origin='ll') - - # meshgrid args are (cols, rows), a.k.a (fast, slow) - m_tth, m_eta = np.meshgrid(tth_edges, eta_edges) - npts_patch = m_tth.size - - # calculate the patch XY coords from the (tth, eta) angles - # !!! will CHEAT and ignore the small perturbation the different - # omega angle values causes and simply use the central value - gVec_angs_vtx = np.tile(angs, (npts_patch, 1)) + np.radians( - np.vstack( - [m_tth.flatten(), m_eta.flatten(), np.zeros(npts_patch)] - ).T - ) - - xy_eval_vtx, _, _ = _project_on_detector_plane( - gVec_angs_vtx, - rmat_d, - rmat_c, - chi, - tvec_d, - tvec_c, - tvec_s, - distortion, - beamVec=bvec, - ) - - areas = compute_areas_func(xy_eval_vtx, conn) - - # EVALUATION POINTS - # !!! for lack of a better option will use centroids - tth_eta_cen = gutil.cellCentroids( - np.atleast_2d(gVec_angs_vtx[:, :2]), conn - ) - - gVec_angs = np.hstack( - [tth_eta_cen, np.tile(angs[2], (len(tth_eta_cen), 1))] - ) - - xy_eval, _, _ = _project_on_detector_plane( - gVec_angs, - rmat_d, - rmat_c, - chi, - tvec_d, - tvec_c, - tvec_s, - distortion, - beamVec=bvec, - ) - - row_indices = gutil.cellIndices(row_edges, xy_eval[:, 1]) - col_indices = gutil.cellIndices(col_edges, xy_eval[:, 0]) - - yield ( - ( - ( - gVec_angs_vtx[:, 0].reshape(m_tth.shape), - gVec_angs_vtx[:, 1].reshape(m_tth.shape), - ), - ( - xy_eval_vtx[:, 0].reshape(m_tth.shape), - xy_eval_vtx[:, 1].reshape(m_tth.shape), - ), - conn, - areas.reshape(netas, ntths), - ( - xy_eval[:, 0].reshape(netas, ntths), - xy_eval[:, 1].reshape(netas, ntths), - ), - ( - row_indices.reshape(netas, ntths), - col_indices.reshape(netas, ntths), - ), - ) - ) - - -def extract_detector_transformation( - detector_params: Union[dict[str, Any], np.ndarray], -) -> tuple[np.ndarray, np.ndarray, float, np.ndarray]: - """ - Construct arrays from detector parameters. - - goes from 10 vector of detector parames OR instrument config dictionary - (from YAML spec) to affine transformation arrays - - Parameters - ---------- - detector_params : TYPE - DESCRIPTION. - - Returns - ------- - rMat_d : TYPE - DESCRIPTION. - tVec_d : TYPE - DESCRIPTION. - chi : TYPE - DESCRIPTION. - tVec_s : TYPE - DESCRIPTION. - - """ - # extract variables for convenience - if isinstance(detector_params, dict): - rMat_d = xfcapi.make_rmat_of_expmap( - np.array(detector_params['detector']['transform']['tilt']) - ) - tVec_d = np.r_[detector_params['detector']['transform']['translation']] - chi = detector_params['oscillation_stage']['chi'] - tVec_s = np.r_[detector_params['oscillation_stage']['translation']] - else: - assert len( - detector_params >= 10 - ), "list of detector parameters must have length >= 10" - rMat_d = xfcapi.make_rmat_of_expmap(detector_params[:3]) - tVec_d = np.ascontiguousarray(detector_params[3:6]) - chi = detector_params[6] - tVec_s = np.ascontiguousarray(detector_params[7:10]) - return rMat_d, tVec_d, chi, tVec_s diff --git a/hexrd/powder/instrument/__init__.py b/hexrd/powder/instrument/__init__.py index b5414013c..396a0d078 100644 --- a/hexrd/powder/instrument/__init__.py +++ b/hexrd/powder/instrument/__init__.py @@ -10,4 +10,4 @@ unwrap_dict_to_h5, unwrap_h5_to_dict, ) -from .detector import Detector +from hexrd.core.instrument.detector import Detector diff --git a/hexrd/powder/instrument/detector.py b/hexrd/powder/instrument/detector.py deleted file mode 100644 index 37cb8fcfe..000000000 --- a/hexrd/powder/instrument/detector.py +++ /dev/null @@ -1,2127 +0,0 @@ -from abc import abstractmethod -import copy -import os -from typing import Optional - -from hexrd.core.instrument.constants import ( - COATING_DEFAULT, - FILTER_DEFAULTS, - PHOSPHOR_DEFAULT, -) -from hexrd.core.instrument.physics_package import AbstractPhysicsPackage -import numpy as np -import numba - -from hexrd.core import constants as ct -from hexrd.core import distortion as distortion_pkg -from hexrd.core import matrixutil as mutil - -from hexrd.hedm import xrdutil -from hexrd.core.rotations import mapAngle - -from hexrd.core.material import crystallography -from hexrd.core.material.crystallography import PlaneData - -from hexrd.core.transforms.xfcapi import ( - xy_to_gvec, - gvec_to_xy, - make_beam_rmat, - make_rmat_of_expmap, - oscill_angles_of_hkls, - angles_to_dvec, -) - -from hexrd.core.utils.decorators import memoize -from hexrd.core.gridutil import cellIndices -from hexrd.core.instrument import detector_coatings -from hexrd.core.material.utils import ( - calculate_linear_absorption_length, - calculate_incoherent_scattering, -) - -distortion_registry = distortion_pkg.Registry() - -max_workers_DFLT = max(1, os.cpu_count() - 1) - -beam_energy_DFLT = 65.351 - -# Memoize these, so each detector can avoid re-computing if nothing -# has changed. -_lorentz_factor = memoize(crystallography.lorentz_factor) -_polarization_factor = memoize(crystallography.polarization_factor) - - -class Detector: - """ - Base class for 2D detectors with functions and properties - common to planar and cylindrical detectors. This class - will be inherited by both those classes. - """ - - __pixelPitchUnit = 'mm' - - # Abstract methods that must be redefined in derived classes - @property - @abstractmethod - def detector_type(self): - raise NotImplementedError - - @abstractmethod - def cart_to_angles( - self, - xy_data, - rmat_s=None, - tvec_s=None, - tvec_c=None, - apply_distortion=False, - ): - """ - Transform cartesian coordinates to angular. - - Parameters - ---------- - xy_data : TYPE - The (n, 2) array of n (x, y) coordinates to be transformed in - either the raw or ideal cartesian plane (see `apply_distortion` - kwarg below). - rmat_s : array_like, optional - The (3, 3) COB matrix for the sample frame. The default is None. - tvec_s : array_like, optional - The (3, ) translation vector for the sample frame. - The default is None. - tvec_c : array_like, optional - The (3, ) translation vector for the crystal frame. - The default is None. - apply_distortion : bool, optional - If True, apply distortion to the inpout cartesian coordinates. - The default is False. - - Returns - ------- - tth_eta : TYPE - DESCRIPTION. - g_vec : TYPE - DESCRIPTION. - - """ - raise NotImplementedError - - @abstractmethod - def angles_to_cart( - self, - tth_eta, - rmat_s=None, - tvec_s=None, - rmat_c=None, - tvec_c=None, - apply_distortion=False, - ): - """ - Transform angular coordinates to cartesian. - - Parameters - ---------- - tth_eta : array_like - The (n, 2) array of n (tth, eta) coordinates to be transformed. - rmat_s : array_like, optional - The (3, 3) COB matrix for the sample frame. The default is None. - tvec_s : array_like, optional - The (3, ) translation vector for the sample frame. - The default is None. - rmat_c : array_like, optional - (3, 3) COB matrix for the crystal frame. - The default is None. - tvec_c : array_like, optional - The (3, ) translation vector for the crystal frame. - The default is None. - apply_distortion : bool, optional - If True, apply distortion to take cartesian coordinates to the - "warped" configuration. The default is False. - - Returns - ------- - xy_det : array_like - The (n, 2) array on the n input coordinates in the . - - """ - raise NotImplementedError - - @abstractmethod - def cart_to_dvecs(self, xy_data): - """Convert cartesian coordinates to dvectors""" - raise NotImplementedError - - @abstractmethod - def pixel_angles(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def pixel_tth_gradient(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def pixel_eta_gradient(self, origin=ct.zeros_3): - raise NotImplementedError - - @abstractmethod - def calc_filter_coating_transmission(self, energy): - pass - - @property - @abstractmethod - def beam_position(self): - """ - returns the coordinates of the beam in the cartesian detector - frame {Xd, Yd, Zd}. NaNs if no intersection. - """ - raise NotImplementedError - - @property - def extra_config_kwargs(self): - return {} - - # End of abstract methods - - def __init__( - self, - rows=2048, - cols=2048, - pixel_size=(0.2, 0.2), - tvec=np.r_[0.0, 0.0, -1000.0], - tilt=ct.zeros_3, - name='default', - bvec=ct.beam_vec, - xrs_dist=None, - evec=ct.eta_vec, - saturation_level=None, - panel_buffer=None, - tth_distortion=None, - roi=None, - group=None, - distortion=None, - max_workers=max_workers_DFLT, - detector_filter: Optional[detector_coatings.Filter] = None, - detector_coating: Optional[detector_coatings.Coating] = None, - phosphor: Optional[detector_coatings.Phosphor] = None, - ): - """ - Instantiate a PlanarDetector object. - - Parameters - ---------- - rows : TYPE, optional - DESCRIPTION. The default is 2048. - cols : TYPE, optional - DESCRIPTION. The default is 2048. - pixel_size : TYPE, optional - DESCRIPTION. The default is (0.2, 0.2). - tvec : TYPE, optional - DESCRIPTION. The default is np.r_[0., 0., -1000.]. - tilt : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - name : TYPE, optional - DESCRIPTION. The default is 'default'. - bvec : TYPE, optional - DESCRIPTION. The default is ct.beam_vec. - evec : TYPE, optional - DESCRIPTION. The default is ct.eta_vec. - saturation_level : TYPE, optional - DESCRIPTION. The default is None. - panel_buffer : TYPE, optional - If a scalar or len(2) array_like, the interpretation is a border - in mm. If an array with shape (nrows, ncols), interpretation is a - boolean with True marking valid pixels. The default is None. - roi : TYPE, optional - DESCRIPTION. The default is None. - group : TYPE, optional - DESCRIPTION. The default is None. - distortion : TYPE, optional - DESCRIPTION. The default is None. - detector_filter : detector_coatings.Filter, optional - filter specifications including material type, - density and thickness. Used for absorption correction - calculations. - detector_coating : detector_coatings.Coating, optional - coating specifications including material type, - density and thickness. Used for absorption correction - calculations. - phosphor : detector_coatings.Phosphor, optional - phosphor specifications including material type, - density and thickness. Used for absorption correction - calculations. - - Returns - ------- - None. - - """ - self._name = name - - self._rows = rows - self._cols = cols - - self._pixel_size_row = pixel_size[0] - self._pixel_size_col = pixel_size[1] - - self._saturation_level = saturation_level - - self._panel_buffer = panel_buffer - - self._tth_distortion = tth_distortion - - if roi is None: - self._roi = roi - else: - assert len(roi) == 2, "roi is set via (start_row, start_col)" - self._roi = ( - (roi[0], roi[0] + self._rows), - (roi[1], roi[1] + self._cols), - ) - - self._tvec = np.array(tvec).flatten() - self._tilt = np.array(tilt).flatten() - - self._bvec = np.array(bvec).flatten() - self._xrs_dist = xrs_dist - - self._evec = np.array(evec).flatten() - - self._distortion = distortion - - self.max_workers = max_workers - - self.group = group - - if detector_filter is None: - detector_filter = detector_coatings.Filter( - **FILTER_DEFAULTS.TARDIS - ) - self.filter = detector_filter - - if detector_coating is None: - detector_coating = detector_coatings.Coating(**COATING_DEFAULT) - self.coating = detector_coating - - if phosphor is None: - phosphor = detector_coatings.Phosphor(**PHOSPHOR_DEFAULT) - self.phosphor = phosphor - - # detector ID - @property - def name(self): - return self._name - - @name.setter - def name(self, s): - assert isinstance(s, str), "requires string input" - self._name = s - - @property - def lmfit_name(self): - # lmfit requires underscores instead of dashes - return self.name.replace('-', '_') - - # properties for physical size of rectangular detector - @property - def rows(self): - return self._rows - - @rows.setter - def rows(self, x): - assert isinstance(x, int) - self._rows = x - - @property - def cols(self): - return self._cols - - @cols.setter - def cols(self, x): - assert isinstance(x, int) - self._cols = x - - @property - def pixel_size_row(self): - return self._pixel_size_row - - @pixel_size_row.setter - def pixel_size_row(self, x): - self._pixel_size_row = float(x) - - @property - def pixel_size_col(self): - return self._pixel_size_col - - @pixel_size_col.setter - def pixel_size_col(self, x): - self._pixel_size_col = float(x) - - @property - def pixel_area(self): - return self.pixel_size_row * self.pixel_size_col - - @property - def saturation_level(self): - return self._saturation_level - - @saturation_level.setter - def saturation_level(self, x): - if x is not None: - assert np.isreal(x) - self._saturation_level = x - - @property - def panel_buffer(self): - return self._panel_buffer - - @panel_buffer.setter - def panel_buffer(self, x): - """if not None, a buffer in mm (x, y)""" - if x is not None: - assert len(x) == 2 or x.ndim == 2 - self._panel_buffer = x - - @property - def tth_distortion(self): - return self._tth_distortion - - @tth_distortion.setter - def tth_distortion(self, x): - """if not None, a buffer in mm (x, y)""" - if x is not None: - assert x.ndim == 2 and x.shape == self.shape - self._tth_distortion = x - - @property - def roi(self): - return self._roi - - @roi.setter - def roi(self, vertex_array): - """ - !!! vertex array must be (r0, c0) - """ - if vertex_array is not None: - assert ( - len(vertex_array) == 2 - ), "roi is set via (start_row, start_col)" - self._roi = ( - (vertex_array[0], vertex_array[0] + self.rows), - (vertex_array[1], vertex_array[1] + self.cols), - ) - - @property - def row_dim(self): - return self.rows * self.pixel_size_row - - @property - def col_dim(self): - return self.cols * self.pixel_size_col - - @property - def row_pixel_vec(self): - return self.pixel_size_row * ( - 0.5 * (self.rows - 1) - np.arange(self.rows) - ) - - @property - def row_edge_vec(self): - return _row_edge_vec(self.rows, self.pixel_size_row) - - @property - def col_pixel_vec(self): - return self.pixel_size_col * ( - np.arange(self.cols) - 0.5 * (self.cols - 1) - ) - - @property - def col_edge_vec(self): - return _col_edge_vec(self.cols, self.pixel_size_col) - - @property - def corner_ul(self): - return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] - - @property - def corner_ll(self): - return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim] - - @property - def corner_lr(self): - return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim] - - @property - def corner_ur(self): - return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] - - @property - def shape(self): - return (self.rows, self.cols) - - @property - def tvec(self): - return self._tvec - - @tvec.setter - def tvec(self, x): - x = np.array(x).flatten() - assert len(x) == 3, 'input must have length = 3' - self._tvec = x - - @property - def tilt(self): - return self._tilt - - @tilt.setter - def tilt(self, x): - assert len(x) == 3, 'input must have length = 3' - self._tilt = np.array(x).squeeze() - - @property - def bvec(self): - return self._bvec - - @bvec.setter - def bvec(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._bvec = x - - @property - def xrs_dist(self): - return self._xrs_dist - - @xrs_dist.setter - def xrs_dist(self, x): - assert x is None or np.isscalar( - x - ), f"'source_distance' must be None or scalar; you input '{x}'" - self._xrs_dist = x - - @property - def evec(self): - return self._evec - - @evec.setter - def evec(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._evec = x - - @property - def distortion(self): - return self._distortion - - @distortion.setter - def distortion(self, x): - if x is not None: - registry = distortion_registry.distortion_registry - check_arg = np.zeros(len(registry), dtype=bool) - for i, dcls in enumerate(registry.values()): - check_arg[i] = isinstance(x, dcls) - assert np.any(check_arg), 'input distortion is not in registry!' - self._distortion = x - - @property - def rmat(self): - return make_rmat_of_expmap(self.tilt) - - @property - def normal(self): - return self.rmat[:, 2] - - # ...memoize??? - @property - def pixel_coords(self): - pix_i, pix_j = np.meshgrid( - self.row_pixel_vec, self.col_pixel_vec, indexing='ij' - ) - return pix_i, pix_j - - # ========================================================================= - # METHODS - # ========================================================================= - - def pixel_Q( - self, energy: np.floating, origin: np.ndarray = ct.zeros_3 - ) -> np.ndarray: - '''get the equivalent momentum transfer - for the angles. - - Parameters - ---------- - energy: float - incident photon energy in keV - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise Q in A^-1 - - ''' - lam = ct.keVToAngstrom(energy) - tth, _ = self.pixel_angles(origin=origin) - return 4.0 * np.pi * np.sin(tth * 0.5) / lam - - def pixel_compton_energy_loss( - self, - energy: np.floating, - origin: np.ndarray = ct.zeros_3, - ) -> np.ndarray: - '''inelastic compton scattering leads - to energy loss of the incident photons. - compute the final energy of the photons - for each pixel. - - Parameters - ---------- - energy: float - incident photon energy in keV - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise energy of inelastically - scatterd photons in keV - ''' - energy = np.asarray(energy) - tth, _ = self.pixel_angles() - ang_fact = 1 - np.cos(tth) - beta = energy / ct.cRestmasskeV - return energy / (1 + beta * ang_fact) - - def pixel_compton_attenuation_length( - self, - energy: np.floating, - density: np.floating, - formula: str, - origin: np.ndarray = ct.zeros_3, - ) -> np.ndarray: - '''each pixel intercepts inelastically - scattered photons of different energy. - the attenuation length and the transmission - for these photons are different. this function - calculate attenuatin length for each pixel - on the detector. - - Parameters - ---------- - energy: float - incident photon energy in keV - density: float - density of material in g/cc - formula: str - formula of the material scattering - origin: np.ndarray - origin of diffraction volume - - Returns - ------- - np.ndarray - pixel wise attentuation length of compton - scattered photons - ''' - pixel_energy = self.pixel_compton_energy_loss(energy) - - pixel_attenuation_length = calculate_linear_absorption_length( - density, - formula, - pixel_energy.flatten(), - ) - return pixel_attenuation_length.reshape(self.shape) - - def compute_compton_scattering_intensity( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - origin: np.array = ct.zeros_3, - ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - '''compute the theoretical compton scattering - signal on the detector. this value is corrected - for the transmission of compton scattered photons - and normlaized before getting subtracting from the - raw intensity - - Parameters - ----------- - energy: float - energy of incident photon - rMat_s: np.ndarray - rotation matrix of sample orientation - physics_package: AbstractPhysicsPackage - physics package information - Returns - ------- - compton_intensity: np.ndarray - transmission corrected compton scattering - intensity - ''' - - q = self.pixel_Q(energy) - inc_s = calculate_incoherent_scattering( - physics_package.sample_material, q.flatten() - ).reshape(self.shape) - - inc_w = calculate_incoherent_scattering( - physics_package.window_material, q.flatten() - ).reshape(self.shape) - - t_s = self.calc_compton_physics_package_transmission( - energy, rMat_s, physics_package - ) - - t_w = self.calc_compton_window_transmission( - energy, rMat_s, physics_package - ) - - return inc_s * t_s + inc_w * t_w, t_s, t_w - - def polarization_factor(self, f_hor, f_vert, unpolarized=False): - """ - Calculated the polarization factor for every pixel. - - Parameters - ---------- - f_hor : float - the fraction of horizontal polarization. for XFELs - this is close to 1. - f_vert : TYPE - the fraction of vertical polarization, which is ~0 for XFELs. - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - s = f_hor + f_vert - if np.abs(s - 1) > ct.sqrt_epsf: - msg = ( - "sum of fraction of " - "horizontal and vertical polarizations " - "must be equal to 1." - ) - raise RuntimeError(msg) - - if f_hor < 0 or f_vert < 0: - msg = ( - "fraction of polarization in horizontal " - "or vertical directions can't be negative." - ) - raise RuntimeError(msg) - - tth, eta = self.pixel_angles() - kwargs = { - 'tth': tth, - 'eta': eta, - 'f_hor': f_hor, - 'f_vert': f_vert, - 'unpolarized': unpolarized, - } - - return _polarization_factor(**kwargs) - - def lorentz_factor(self): - """ - calculate the lorentz factor for every pixel - - Parameters - ---------- - None - - Raises - ------ - None - - Returns - ------- - numpy.ndarray - returns an array the same size as the detector panel - with each element containg the lorentz factor of the - corresponding pixel - """ - tth, eta = self.pixel_angles() - return _lorentz_factor(tth) - - def config_dict( - self, - chi=0, - tvec=ct.zeros_3, - beam_energy=beam_energy_DFLT, - beam_vector=ct.beam_vec, - sat_level=None, - panel_buffer=None, - style='yaml', - ): - """ - Return a dictionary of detector parameters. - - Optional instrument level parameters. This is a convenience function - to work with the APIs in several functions in xrdutil. - - Parameters - ---------- - chi : float, optional - DESCRIPTION. The default is 0. - tvec : array_like (3,), optional - DESCRIPTION. The default is ct.zeros_3. - beam_energy : float, optional - DESCRIPTION. The default is beam_energy_DFLT. - beam_vector : aray_like (3,), optional - DESCRIPTION. The default is ct.beam_vec. - sat_level : scalar, optional - DESCRIPTION. The default is None. - panel_buffer : scalar, array_like (2,), optional - DESCRIPTION. The default is None. - - Returns - ------- - config_dict : dict - DESCRIPTION. - - """ - assert style.lower() in ['yaml', 'hdf5'], ( - "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style - ) - - config_dict = {} - - # ===================================================================== - # DETECTOR PARAMETERS - # ===================================================================== - # transform and pixels - # - # assign local vars; listify if necessary - tilt = self.tilt - translation = self.tvec - roi = ( - None - if self.roi is None - else np.array([self.roi[0][0], self.roi[1][0]]).flatten() - ) - if style.lower() == 'yaml': - tilt = tilt.tolist() - translation = translation.tolist() - tvec = tvec.tolist() - roi = None if roi is None else roi.tolist() - - det_dict = dict( - detector_type=self.detector_type, - transform=dict( - tilt=tilt, - translation=translation, - ), - pixels=dict( - rows=int(self.rows), - columns=int(self.cols), - size=[float(self.pixel_size_row), float(self.pixel_size_col)], - ), - ) - - if roi is not None: - # Only add roi if it is not None - det_dict['pixels']['roi'] = roi - - if self.group is not None: - # Only add group if it is not None - det_dict['group'] = self.group - - # distortion - if self.distortion is not None: - dparams = self.distortion.params - if style.lower() == 'yaml': - dparams = dparams.tolist() - dist_d = dict( - function_name=self.distortion.maptype, parameters=dparams - ) - det_dict['distortion'] = dist_d - - # saturation level - if sat_level is None: - sat_level = self.saturation_level - det_dict['saturation_level'] = float(sat_level) - - # panel buffer - if panel_buffer is None: - # could be none, a 2-element list, or a 2-d array (rows, cols) - panel_buffer = copy.deepcopy(self.panel_buffer) - # !!! now we have to do some style-dependent munging of panel_buffer - if isinstance(panel_buffer, np.ndarray): - if panel_buffer.ndim == 1: - assert len(panel_buffer) == 2, "length of 1-d buffer must be 2" - # if here is a 2-element array - if style.lower() == 'yaml': - panel_buffer = panel_buffer.tolist() - elif panel_buffer.ndim == 2: - if style.lower() == 'yaml': - # !!! can't practically write array-like buffers to YAML - # so forced to clobber - print("clobbering panel buffer array in yaml-ready output") - panel_buffer = [0.0, 0.0] - else: - raise RuntimeError( - "panel buffer ndim must be 1 or 2; you specified %d" - % panel_buffer.ndmin - ) - elif panel_buffer is None: - # still None on self - # !!! this gets handled by unwrap_dict_to_h5 now - - # if style.lower() == 'hdf5': - # # !!! can't write None to hdf5; substitute with zeros - # panel_buffer = np.r_[0., 0.] - pass - det_dict['buffer'] = panel_buffer - - det_dict.update(self.extra_config_kwargs) - - # ===================================================================== - # SAMPLE STAGE PARAMETERS - # ===================================================================== - stage_dict = dict(chi=chi, translation=tvec) - - # ===================================================================== - # BEAM PARAMETERS - # ===================================================================== - # !!! make_reflection_patches is still using the vector - # azim, pola = calc_angles_from_beam_vec(beam_vector) - # beam_dict = dict( - # energy=beam_energy, - # vector=dict( - # azimuth=azim, - # polar_angle=pola - # ) - # ) - beam_dict = dict(energy=beam_energy, vector=beam_vector) - - config_dict['detector'] = det_dict - config_dict['oscillation_stage'] = stage_dict - config_dict['beam'] = beam_dict - - return config_dict - - def cartToPixel(self, xy_det, pixels=False, apply_distortion=False): - """ - Coverts cartesian coordinates to pixel coordinates - - Parameters - ---------- - xy_det : array_like - The (n, 2) vstacked array of (x, y) pairs in the reference - cartesian frame (possibly subject to distortion). - pixels : bool, optional - If True, return discrete pixel indices; otherwise fractional pixel - coordinates are returned. The default is False. - apply_distortion : bool, optional - If True, apply self.distortion to the input (if applicable). - The default is False. - - Returns - ------- - ij_det : array_like - The (n, 2) array of vstacked (i, j) coordinates in the pixel - reference frame where i is the (slow) row dimension and j is the - (fast) column dimension. - - """ - xy_det = np.atleast_2d(xy_det) - if apply_distortion and self.distortion is not None: - xy_det = self.distortion.apply(xy_det) - - npts = len(xy_det) - - tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1)) - i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5 - j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5 - - ij_det = np.vstack([i_pix, j_pix]).T - if pixels: - # Hide any runtime warnings in this conversion. Their output values - # will certainly be off the detector, which is fine. - with np.errstate(invalid='ignore'): - ij_det = np.array(np.round(ij_det), dtype=int) - - return ij_det - - def pixelToCart(self, ij_det): - """ - Convert vstacked array or list of [i,j] pixel indices - (or UL corner-based points) and convert to (x,y) in the - cartesian frame {Xd, Yd, Zd} - """ - ij_det = np.atleast_2d(ij_det) - - x = (ij_det[:, 1] + 0.5) * self.pixel_size_col + self.corner_ll[0] - y = ( - self.rows - ij_det[:, 0] - 0.5 - ) * self.pixel_size_row + self.corner_ll[1] - return np.vstack([x, y]).T - - def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): - """ - Notes - ----- - !!! assumes xy are in raw (distorted) frame, if applicable - """ - # munge kwargs - if rMat_s is None: - rMat_s = ct.identity_3x3 - if tVec_s is None: - tVec_s = ct.zeros_3x1 - if tVec_c is None: - tVec_c = ct.zeros_3x1 - - # FIXME: perhaps not necessary, but safe... - xy = np.atleast_2d(xy) - - ''' - # --------------------------------------------------------------------- - # TODO: needs testing and memoized gradient arrays! - # --------------------------------------------------------------------- - # need origin arg - origin = np.dot(rMat_s, tVec_c).flatten() + tVec_s.flatten() - - # get pixel indices - i_crds = cellIndices(self.row_edge_vec, xy[:, 1]) - j_crds = cellIndices(self.col_edge_vec, xy[:, 0]) - - ptth_grad = self.pixel_tth_gradient(origin=origin)[i_crds, j_crds] - peta_grad = self.pixel_eta_gradient(origin=origin)[i_crds, j_crds] - - return np.vstack([ptth_grad, peta_grad]).T - ''' - # call xrdutil function - ang_ps = xrdutil.angularPixelSize( - xy, - (self.pixel_size_row, self.pixel_size_col), - self.rmat, - rMat_s, - self.tvec, - tVec_s, - tVec_c, - distortion=self.distortion, - beamVec=self.bvec, - etaVec=self.evec, - ) - return ang_ps - - def clip_to_panel(self, xy, buffer_edges=True): - """ - if self.roi is not None, uses it by default - - TODO: check if need shape kwarg - TODO: optimize ROI search better than list comprehension below - TODO: panel_buffer can be a 2-d boolean mask, but needs testing - - """ - xy = np.atleast_2d(xy) - - ''' - # !!! THIS LOGIC IS OBSOLETE - if self.roi is not None: - ij_crds = self.cartToPixel(xy, pixels=True) - ii, jj = polygon(self.roi[:, 0], self.roi[:, 1], - shape=(self.rows, self.cols)) - on_panel_rows = [i in ii for i in ij_crds[:, 0]] - on_panel_cols = [j in jj for j in ij_crds[:, 1]] - on_panel = np.logical_and(on_panel_rows, on_panel_cols) - else: - ''' - xlim = 0.5 * self.col_dim - ylim = 0.5 * self.row_dim - if buffer_edges and self.panel_buffer is not None: - if self.panel_buffer.ndim == 2: - pix = self.cartToPixel(xy, pixels=True) - - roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows) - coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols) - - idx = np.logical_or(roff, coff) - - on_panel = np.full(pix.shape[0], False) - valid_pix = pix[~idx, :] - on_panel[~idx] = self.panel_buffer[ - valid_pix[:, 0], valid_pix[:, 1] - ] - else: - xlim -= self.panel_buffer[0] - ylim -= self.panel_buffer[1] - on_panel_x = np.logical_and( - xy[:, 0] >= -xlim, xy[:, 0] <= xlim - ) - on_panel_y = np.logical_and( - xy[:, 1] >= -ylim, xy[:, 1] <= ylim - ) - on_panel = np.logical_and(on_panel_x, on_panel_y) - elif not buffer_edges or self.panel_buffer is None: - on_panel_x = np.logical_and(xy[:, 0] >= -xlim, xy[:, 0] <= xlim) - on_panel_y = np.logical_and(xy[:, 1] >= -ylim, xy[:, 1] <= ylim) - on_panel = np.logical_and(on_panel_x, on_panel_y) - return xy[on_panel, :], on_panel - - def interpolate_nearest(self, xy, img, pad_with_nans=True): - """ - TODO: revisit normalization in here? - - """ - is_2d = img.ndim == 2 - right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert ( - is_2d and right_shape - ), "input image must be 2-d with shape (%d, %d)" % ( - self.rows, - self.cols, - ) - - # initialize output with nans - if pad_with_nans: - int_xy = np.nan * np.ones(len(xy)) - else: - int_xy = np.zeros(len(xy)) - - # clip away points too close to or off the edges of the detector - xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) - - # get pixel indices of clipped points - i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) - j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) - - # next interpolate across cols - int_vals = img[i_src, j_src] - int_xy[on_panel] = int_vals - return int_xy - - def interpolate_bilinear( - self, - xy, - img, - pad_with_nans=True, - clip_to_panel=True, - on_panel: Optional[np.ndarray] = None, - ): - """ - Interpolate an image array at the specified cartesian points. - - Parameters - ---------- - xy : array_like, (n, 2) - Array of cartesian coordinates in the image plane at which - to evaluate intensity. - img : array_like - 2-dimensional image array. - pad_with_nans : bool, optional - Toggle for assigning NaN to points that fall off the detector. - The default is True. - on_panel : np.ndarray, optional - If you want to skip clip_to_panel() for performance reasons, - just provide an array of which pixels are on the panel. - - Returns - ------- - int_xy : array_like, (n,) - The array of interpolated intensities at each of the n input - coordinates. - - Notes - ----- - TODO: revisit normalization in here? - """ - - is_2d = img.ndim == 2 - right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert ( - is_2d and right_shape - ), "input image must be 2-d with shape (%d, %d)" % ( - self.rows, - self.cols, - ) - - # initialize output with nans - if pad_with_nans: - int_xy = np.nan * np.ones(len(xy)) - else: - int_xy = np.zeros(len(xy)) - - if on_panel is None: - # clip away points too close to or off the edges of the detector - xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True) - else: - xy_clip = xy[on_panel] - - # grab fractional pixel indices of clipped points - ij_frac = self.cartToPixel(xy_clip) - - # get floors/ceils from array of pixel _centers_ - # and fix indices running off the pixel centers - # !!! notice we already clipped points to the panel! - i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1]) - i_floor_img = _fix_indices(i_floor, 0, self.rows - 1) - - j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0]) - j_floor_img = _fix_indices(j_floor, 0, self.cols - 1) - - # ceilings from floors - i_ceil = i_floor + 1 - i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1) - - j_ceil = j_floor + 1 - j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) - - # first interpolate at top/bottom rows - row_floor_int = (j_ceil - ij_frac[:, 1]) * img[ - i_floor_img, j_floor_img - ] + (ij_frac[:, 1] - j_floor) * img[i_floor_img, j_ceil_img] - row_ceil_int = (j_ceil - ij_frac[:, 1]) * img[ - i_ceil_img, j_floor_img - ] + (ij_frac[:, 1] - j_floor) * img[i_ceil_img, j_ceil_img] - - # next interpolate across cols - int_vals = (i_ceil - ij_frac[:, 0]) * row_floor_int + ( - ij_frac[:, 0] - i_floor - ) * row_ceil_int - int_xy[on_panel] = int_vals - return int_xy - - def make_powder_rings( - self, - pd, - merge_hkls=False, - delta_tth=None, - delta_eta=10.0, - eta_period=None, - eta_list=None, - rmat_s=ct.identity_3x3, - tvec_s=ct.zeros_3, - tvec_c=ct.zeros_3, - full_output=False, - tth_distortion=None, - ): - """ - Generate points on Debye_Scherrer rings over the detector. - - !!! it is assuming that rmat_s is built from (chi, ome) as it the case - for HEDM! - - Parameters - ---------- - pd : TYPE - DESCRIPTION. - merge_hkls : TYPE, optional - DESCRIPTION. The default is False. - delta_tth : TYPE, optional - DESCRIPTION. The default is None. - delta_eta : TYPE, optional - DESCRIPTION. The default is 10.. - eta_period : TYPE, optional - DESCRIPTION. The default is None. - eta_list : TYPE, optional - DESCRIPTION. The default is None. - rmat_s : TYPE, optional - DESCRIPTION. The default is ct.identity_3x3. - tvec_s : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - tvec_c : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - full_output : TYPE, optional - DESCRIPTION. The default is False. - tth_distortion : special class, optional - Special distortion class. The default is None. - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - if tth_distortion is not None: - tnorms = mutil.rowNorm(np.vstack([tvec_s, tvec_c])) - assert ( - np.all(tnorms) < ct.sqrt_epsf - ), "If using distrotion function, translations must be zero" - - # in case you want to give it tth angles directly - if isinstance(pd, PlaneData): - pd = PlaneData(None, pd) - if delta_tth is not None: - pd.tThWidth = np.radians(delta_tth) - else: - delta_tth = np.degrees(pd.tThWidth) - - # !!! conversions, meh... - del_eta = np.radians(delta_eta) - - # do merging if asked - if merge_hkls: - _, tth_ranges = pd.getMergedRanges(cullDupl=True) - tth = np.average(tth_ranges, axis=1) - else: - tth_ranges = pd.getTThRanges() - tth = pd.getTTh() - tth_pm = tth_ranges - np.tile(tth, (2, 1)).T - sector_vertices = np.vstack( - [ - [ - i[0], - -del_eta, - i[0], - del_eta, - i[1], - del_eta, - i[1], - -del_eta, - 0.0, - 0.0, - ] - for i in tth_pm - ] - ) - else: - # Okay, we have a array-like tth specification - tth = np.array(pd).flatten() - if delta_tth is None: - raise RuntimeError( - "If supplying a 2theta list as first arg, " - + "must supply a delta_tth" - ) - tth_pm = 0.5 * delta_tth * np.r_[-1.0, 1.0] - tth_ranges = np.radians([i + tth_pm for i in tth]) # !!! units - sector_vertices = np.tile( - 0.5 - * np.radians( - [ - -delta_tth, - -delta_eta, - -delta_tth, - delta_eta, - delta_tth, - delta_eta, - delta_tth, - -delta_eta, - 0.0, - 0.0, - ] - ), - (len(tth), 1), - ) - # !! conversions, meh... - tth = np.radians(tth) - del_eta = np.radians(delta_eta) - - # for generating rings, make eta vector in correct period - if eta_period is None: - eta_period = (-np.pi, np.pi) - - if eta_list is None: - neta = int(360.0 / float(delta_eta)) - # this is the vector of ETA EDGES - eta_edges = mapAngle( - np.radians(delta_eta * np.linspace(0.0, neta, num=neta + 1)) - + eta_period[0], - eta_period, - ) - - # get eta bin centers from edges - """ - # !!! this way is probably overkill, since we have delta eta - eta_centers = np.average( - np.vstack([eta[:-1], eta[1:]), - axis=0) - """ - # !!! should be safe as eta_edges are monotonic - eta_centers = eta_edges[:-1] + 0.5 * del_eta - else: - eta_centers = np.radians(eta_list).flatten() - neta = len(eta_centers) - eta_edges = ( - np.tile(eta_centers, (2, 1)) - + np.tile(0.5 * del_eta * np.r_[-1, 1], (neta, 1)).T - ).T.flatten() - - # get chi and ome from rmat_s - # !!! API ambiguity - # !!! this assumes rmat_s was made from the composition - # !!! rmat_s = R(Xl, chi) * R(Yl, ome) - ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) - - # make list of angle tuples - angs = [ - np.vstack([i * np.ones(neta), eta_centers, ome * np.ones(neta)]) - for i in tth - ] - - # need xy coords and pixel sizes - valid_ang = [] - valid_xy = [] - map_indices = [] - npp = 5 # [ll, ul, ur, lr, center] - for i_ring in range(len(angs)): - # expand angles to patch vertices - these_angs = angs[i_ring].T - - # push to vertices to see who falls off - # FIXME: clipping is not checking if masked regions are on the - # patch interior - patch_vertices = ( - np.tile(these_angs[:, :2], (1, npp)) - + np.tile(sector_vertices[i_ring], (neta, 1)) - ).reshape(npp * neta, 2) - - # find vertices that all fall on the panel - # !!! not API ambiguity regarding rmat_s above - all_xy = self.angles_to_cart( - patch_vertices, - rmat_s=rmat_s, - tvec_s=tvec_s, - rmat_c=None, - tvec_c=tvec_c, - apply_distortion=True, - ) - - _, on_panel = self.clip_to_panel(all_xy) - - # all vertices must be on... - - patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) - patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] - - # !!! Have to apply after clipping, distortion can get wonky near - # the edeg of the panel, and it is assumed to be <~1 deg - # !!! The tth_ranges are NOT correct! - if tth_distortion is not None: - patch_valid_angs = tth_distortion.apply( - self.angles_to_cart(these_angs[patch_is_on, :2]), - return_nominal=True, - ) - patch_valid_xys = self.angles_to_cart( - patch_valid_angs, apply_distortion=True - ) - else: - patch_valid_angs = these_angs[patch_is_on, :2] - patch_valid_xys = patch_xys[:, -1, :].squeeze() - - # form output arrays - valid_ang.append(patch_valid_angs) - valid_xy.append(patch_valid_xys) - map_indices.append(patch_is_on) - # ??? is this option necessary? - if full_output: - return valid_ang, valid_xy, tth_ranges, map_indices, eta_edges - else: - return valid_ang, valid_xy, tth_ranges - - def map_to_plane(self, pts, rmat, tvec): - """ - Map detctor points to specified plane. - - Parameters - ---------- - pts : TYPE - DESCRIPTION. - rmat : TYPE - DESCRIPTION. - tvec : TYPE - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - Notes - ----- - by convention: - - n * (u*pts_l - tvec) = 0 - - [pts]_l = rmat*[pts]_m + tvec - - """ - # arg munging - pts = np.atleast_2d(pts) - npts = len(pts) - - # map plane normal & translation vector, LAB FRAME - nvec_map_lab = rmat[:, 2].reshape(3, 1) - tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1) - tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1) - - # put pts as 3-d in panel CS and transform to 3-d lab coords - pts_det = np.hstack([pts, np.zeros((npts, 1))]) - pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab - - # scaling along pts vectors to hit map plane - u = np.dot(nvec_map_lab.T, tvec_map_lab) / np.dot( - nvec_map_lab.T, pts_lab - ) - - # pts on map plane, in LAB FRAME - pts_map_lab = np.tile(u, (3, 1)) * pts_lab - - return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T - - def simulate_rotation_series( - self, - plane_data, - grain_param_list, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_ranges=[ - (-np.pi, np.pi), - ], - ome_period=(-np.pi, np.pi), - chi=0.0, - tVec_s=ct.zeros_3, - wavelength=None, - ): - """ - Simulate a monochromatic rotation series for a list of grains. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_param_list : TYPE - DESCRIPTION. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - chi : TYPE, optional - DESCRIPTION. The default is 0.. - tVec_s : TYPE, optional - DESCRIPTION. The default is ct.zeros_3. - wavelength : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - valid_ids : TYPE - DESCRIPTION. - valid_hkls : TYPE - DESCRIPTION. - valid_angs : TYPE - DESCRIPTION. - valid_xys : TYPE - DESCRIPTION. - ang_pixel_size : TYPE - DESCRIPTION. - - """ - # grab B-matrix from plane data - bMat = plane_data.latVecOps['B'] - - # reconcile wavelength - # * added sanity check on exclusions here; possible to - # * make some reflections invalid (NaN) - if wavelength is None: - wavelength = plane_data.wavelength - else: - if plane_data.wavelength != wavelength: - plane_data.wavelength = ct.keVToAngstrom(wavelength) - assert not np.any( - np.isnan(plane_data.getTTh()) - ), "plane data exclusions incompatible with wavelength" - - # vstacked G-vector id, h, k, l - full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) - - """ LOOP OVER GRAINS """ - valid_ids = [] - valid_hkls = [] - valid_angs = [] - valid_xys = [] - ang_pixel_size = [] - for gparm in grain_param_list: - - # make useful parameters - rMat_c = make_rmat_of_expmap(gparm[:3]) - tVec_c = gparm[3:6] - vInv_s = gparm[6:] - - # All possible bragg conditions as vstacked [tth, eta, ome] - # for each omega solution - angList = np.vstack( - oscill_angles_of_hkls( - full_hkls[:, 1:], - chi, - rMat_c, - bMat, - wavelength, - v_inv=vInv_s, - beam_vec=self.bvec, - ) - ) - - # filter by eta and omega ranges - # ??? get eta range from detector? - allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( - full_hkls, angList, eta_ranges, ome_ranges - ) - allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) - - # find points that fall on the panel - det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( - allAngs, - self.rmat, - rMat_c, - chi, - self.tvec, - tVec_c, - tVec_s, - self.distortion, - self.bvec, - ) - xys_p, on_panel = self.clip_to_panel(det_xy) - valid_xys.append(xys_p) - - # filter angs and hkls that are on the detector plane - # !!! check this -- seems unnecessary but the results of - # _project_on_detector_plane() can have len < the input? - # the output of _project_on_detector_plane has been modified to - # hand back the index array to remedy this JVB 2020-05-27 - if np.any(~on_plane): - allAngs = np.atleast_2d(allAngs[on_plane, :]) - allHKLs = np.atleast_2d(allHKLs[on_plane, :]) - - # grab hkls and gvec ids for this panel - valid_hkls.append(allHKLs[on_panel, 1:]) - valid_ids.append(allHKLs[on_panel, 0]) - - # reflection angles (voxel centers) and pixel size in (tth, eta) - valid_angs.append(allAngs[on_panel, :]) - ang_pixel_size.append(self.angularPixelSize(xys_p)) - return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size - - def simulate_laue_pattern( - self, - crystal_data, - minEnergy=5.0, - maxEnergy=35.0, - rmat_s=None, - tvec_s=None, - grain_params=None, - beam_vec=None, - ): - """ """ - if isinstance(crystal_data, PlaneData): - - plane_data = crystal_data - - # grab the expanded list of hkls from plane_data - hkls = np.hstack(plane_data.getSymHKLs()) - - # and the unit plane normals (G-vectors) in CRYSTAL FRAME - gvec_c = np.dot(plane_data.latVecOps['B'], hkls) - - # Filter out g-vectors going in the wrong direction. `gvec_to_xy()` used - # to do this, but not anymore. - to_keep = np.dot(gvec_c.T, self.bvec) <= 0 - - hkls = hkls[:, to_keep] - gvec_c = gvec_c[:, to_keep] - elif len(crystal_data) == 2: - # !!! should clean this up - hkls = np.array(crystal_data[0]) - bmat = crystal_data[1] - gvec_c = np.dot(bmat, hkls) - else: - raise RuntimeError( - f'argument list not understood: {crystal_data=}' - ) - nhkls_tot = hkls.shape[1] - - # parse energy ranges - # TODO: allow for spectrum parsing - multipleEnergyRanges = False - if hasattr(maxEnergy, '__len__'): - assert len(maxEnergy) == len( - minEnergy - ), 'energy cutoff ranges must have the same length' - multipleEnergyRanges = True - lmin = [] - lmax = [] - for i in range(len(maxEnergy)): - lmin.append(ct.keVToAngstrom(maxEnergy[i])) - lmax.append(ct.keVToAngstrom(minEnergy[i])) - else: - lmin = ct.keVToAngstrom(maxEnergy) - lmax = ct.keVToAngstrom(minEnergy) - - # parse grain parameters kwarg - if grain_params is None: - grain_params = np.atleast_2d( - np.hstack([np.zeros(6), ct.identity_6x1]) - ) - n_grains = len(grain_params) - - # sample rotation - if rmat_s is None: - rmat_s = ct.identity_3x3 - - # dummy translation vector... make input - if tvec_s is None: - tvec_s = ct.zeros_3 - - # beam vector - if beam_vec is None: - beam_vec = ct.beam_vec - - # ========================================================================= - # LOOP OVER GRAINS - # ========================================================================= - - # pre-allocate output arrays - xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) - hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) - angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) - dspacing = np.nan * np.ones((n_grains, nhkls_tot)) - energy = np.nan * np.ones((n_grains, nhkls_tot)) - for iG, gp in enumerate(grain_params): - rmat_c = make_rmat_of_expmap(gp[:3]) - tvec_c = gp[3:6].reshape(3, 1) - vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) - - # stretch them: V^(-1) * R * Gc - gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) - ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) - - # project - dpts = gvec_to_xy( - ghat_c_str.T, - self.rmat, - rmat_s, - rmat_c, - self.tvec, - tvec_s, - tvec_c, - beam_vec=beam_vec, - ) - - # check intersections with detector plane - canIntersect = ~np.isnan(dpts[:, 0]) - npts_in = sum(canIntersect) - - if np.any(canIntersect): - dpts = dpts[canIntersect, :].reshape(npts_in, 2) - dhkl = hkls[:, canIntersect].reshape(3, npts_in) - - rmat_b = make_beam_rmat(beam_vec, ct.eta_vec) - # back to angles - tth_eta, gvec_l = xy_to_gvec( - dpts, - self.rmat, - rmat_s, - self.tvec, - tvec_s, - tvec_c, - rmat_b=rmat_b, - ) - tth_eta = np.vstack(tth_eta).T - - # warp measured points - if self.distortion is not None: - dpts = self.distortion.apply_inverse(dpts) - - # plane spacings and energies - dsp = 1.0 / mutil.rowNorm(gvec_s_str[:, canIntersect].T) - wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) - - # clip to detector panel - _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) - - if multipleEnergyRanges: - validEnergy = np.zeros(len(wlen), dtype=bool) - for i in range(len(lmin)): - in_energy_range = np.logical_and( - wlen >= lmin[i], wlen <= lmax[i] - ) - validEnergy = validEnergy | in_energy_range - else: - validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) - - # index for valid reflections - keepers = np.where(np.logical_and(on_panel, validEnergy))[0] - - # assign output arrays - xy_det[iG][keepers, :] = dpts[keepers, :] - hkls_in[iG][:, keepers] = dhkl[:, keepers] - angles[iG][keepers, :] = tth_eta[keepers, :] - dspacing[iG, keepers] = dsp[keepers] - energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) - return xy_det, hkls_in, angles, dspacing, energy - - @staticmethod - def update_memoization_sizes(all_panels): - funcs = [ - _polarization_factor, - _lorentz_factor, - ] - - min_size = len(all_panels) - return Detector.increase_memoization_sizes(funcs, min_size) - - @staticmethod - def increase_memoization_sizes(funcs, min_size): - for f in funcs: - cache_info = f.cache_info() - if cache_info['maxsize'] < min_size: - f.set_cache_maxsize(min_size) - - def calc_physics_package_transmission( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - ) -> np.float64: - """get the transmission from the physics package - need to consider HED and HEDM samples separately - """ - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_sample = self.calc_transmission_sample( - seca, secb, energy, physics_package - ) - T_window = self.calc_transmission_window(secb, energy, physics_package) - - transmission_physics_package = T_sample * T_window - return transmission_physics_package - - def calc_compton_physics_package_transmission( - self, - energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - '''calculate the attenuation of inelastically - scattered photons. since these photons lose energy, - the attenuation length is angle dependent ergo a separate - routine than elastically scattered absorption. - ''' - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_sample = self.calc_compton_transmission( - seca, secb, energy, physics_package, 'sample' - ) - T_window = self.calc_compton_transmission_window( - secb, energy, physics_package - ) - - return T_sample * T_window - - def calc_compton_window_transmission( - self, - energy: np.floating, - rMat_s: np.ndarray, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - '''calculate the attenuation of inelastically - scattered photons just fropm the window. - since these photons lose energy, the attenuation length - is angle dependent ergo a separate routine than - elastically scattered absorption. - ''' - bvec = self.bvec - sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) - seca = 1.0 / np.dot(bvec, sample_normal) - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cosb = np.dot(dvecs, sample_normal) - '''angles for which secb <= 0 or close are diffracted beams - almost parallel to the sample surface or backscattered, we - can mask out these values by setting secb to nan - ''' - mask = np.logical_or( - cosb < 0, - np.isclose( - cosb, - 0.0, - atol=5e-2, - ), - ) - cosb[mask] = np.nan - secb = 1.0 / cosb.reshape(self.shape) - - T_window = self.calc_compton_transmission( - seca, secb, energy, physics_package, 'window' - ) - T_sample = self.calc_compton_transmission_sample( - seca, energy, physics_package - ) - - return T_sample * T_window - - def calc_transmission_sample( - self, - seca: np.array, - secb: np.array, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.array: - thickness_s = physics_package.sample_thickness # in microns - if np.isclose(thickness_s, 0): - return np.ones(self.shape) - - # in microns^-1 - mu_s = 1.0 / physics_package.sample_absorption_length(energy) - x = mu_s * thickness_s - pre = 1.0 / x / (secb - seca) - num = np.exp(-x * seca) - np.exp(-x * secb) - return pre * num - - def calc_transmission_window( - self, - secb: np.array, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.array: - material_w = physics_package.window_material - thickness_w = physics_package.window_thickness # in microns - if material_w is None or np.isclose(thickness_w, 0): - return np.ones(self.shape) - - # in microns^-1 - mu_w = 1.0 / physics_package.window_absorption_length(energy) - return np.exp(-thickness_w * mu_w * secb) - - def calc_compton_transmission( - self, - seca: np.ndarray, - secb: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - pp_layer: str, - ) -> np.ndarray: - - if pp_layer == 'sample': - formula = physics_package.sample_material - density = physics_package.sample_density - thickness = physics_package.sample_thickness - mu = 1.0 / physics_package.sample_absorption_length(energy) - mu_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, - density, - formula, - ) - elif pp_layer == 'window': - formula = physics_package.window_material - if formula is None: - return np.ones(self.shape) - - density = physics_package.window_density - thickness = physics_package.window_thickness - mu = 1.0 / physics_package.sample_absorption_length(energy) - mu_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, density, formula - ) - - if thickness <= 0: - return np.ones(self.shape) - - x1 = mu * thickness * seca - x2 = mu_prime * thickness * secb - num = np.exp(-x1) - np.exp(-x2) - return -num / (x1 - x2) - - def calc_compton_transmission_sample( - self, - seca: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - thickness_s = physics_package.sample_thickness # in microns - - mu_s = 1.0 / physics_package.sample_absorption_length(energy) - return np.exp(-mu_s * thickness_s * seca) - - def calc_compton_transmission_window( - self, - secb: np.ndarray, - energy: np.floating, - physics_package: AbstractPhysicsPackage, - ) -> np.ndarray: - formula = physics_package.window_material - if formula is None: - return np.ones(self.shape) - - density = physics_package.window_density # in g/cc - thickness_w = physics_package.window_thickness # in microns - - mu_w_prime = 1.0 / self.pixel_compton_attenuation_length( - energy, density, formula - ) - return np.exp(-mu_w_prime * thickness_w * secb) - - def calc_effective_pinhole_area( - self, physics_package: AbstractPhysicsPackage - ) -> np.array: - """get the effective pinhole area correction""" - if np.isclose(physics_package.pinhole_diameter, 0) or np.isclose( - physics_package.pinhole_thickness, 0 - ): - return np.ones(self.shape) - - hod = ( - physics_package.pinhole_thickness - / physics_package.pinhole_diameter - ) - bvec = self.bvec - - tth, eta = self.pixel_angles() - angs = np.vstack( - (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) - ).T - dvecs = angles_to_dvec(angs, beam_vec=bvec) - - cth = -dvecs[:, 2].reshape(self.shape) - tanth = np.tan(np.arccos(cth)) - f = hod * tanth - f[np.abs(f) > 1.0] = np.nan - asinf = np.arcsin(f) - return 2 / np.pi * cth * (np.pi / 2 - asinf - f * np.cos(asinf)) - - def calc_transmission_generic( - self, - secb: np.array, - thickness: np.floating, - absorption_length: np.floating, - ) -> np.array: - if np.isclose(thickness, 0): - return np.ones(self.shape) - - mu = 1.0 / absorption_length # in microns^-1 - return np.exp(-thickness * mu * secb) - - def calc_transmission_phosphor( - self, - secb: np.array, - thickness: np.floating, - readout_length: np.floating, - absorption_length: np.floating, - energy: np.floating, - pre_U0: np.floating, - ) -> np.array: - if np.isclose(thickness, 0): - return np.ones(self.shape) - - f1 = absorption_length * thickness - f2 = absorption_length * readout_length - arg = secb + 1 / f2 - return pre_U0 * energy * ((1.0 - np.exp(-f1 * arg)) / arg) - - -# ============================================================================= -# UTILITY METHODS -# ============================================================================= - - -def _fix_indices(idx, lo, hi): - nidx = np.array(idx) - off_lo = nidx < lo - off_hi = nidx > hi - nidx[off_lo] = lo - nidx[off_hi] = hi - return nidx - - -def _row_edge_vec(rows, pixel_size_row): - return pixel_size_row * (0.5 * rows - np.arange(rows + 1)) - - -def _col_edge_vec(cols, pixel_size_col): - return pixel_size_col * (np.arange(cols + 1) - 0.5 * cols) - - -# FIXME find a better place for this, and maybe include loop over pixels -@numba.njit(nogil=True, cache=True) -def _solid_angle_of_triangle(vtx_list): - norms = np.sqrt(np.sum(vtx_list * vtx_list, axis=1)) - norms_prod = norms[0] * norms[1] * norms[2] - scalar_triple_product = np.dot( - vtx_list[0], np.cross(vtx_list[2], vtx_list[1]) - ) - denominator = ( - norms_prod - + norms[0] * np.dot(vtx_list[1], vtx_list[2]) - + norms[1] * np.dot(vtx_list[2], vtx_list[0]) - + norms[2] * np.dot(vtx_list[0], vtx_list[1]) - ) - - return 2.0 * np.arctan2(scalar_triple_product, denominator) diff --git a/hexrd/powder/instrument/hedm_instrument.py b/hexrd/powder/instrument/hedm_instrument.py deleted file mode 100644 index 520586804..000000000 --- a/hexrd/powder/instrument/hedm_instrument.py +++ /dev/null @@ -1,3007 +0,0 @@ -# -*- coding: utf-8 -*- -# ============================================================================= -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. -# All rights reserved. -# -# This file is part of HEXRD. For details on dowloading the source, -# see the file COPYING. -# -# Please also see the file LICENSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License (as published by the Free -# Software Foundation) version 2.1 dated February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program (see file LICENSE); if not, write to -# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, -# Boston, MA 02111-1307 USA or visit . -# ============================================================================= -""" -Created on Fri Dec 9 13:05:27 2016 - -@author: bernier2 -""" -from contextlib import contextmanager -import copy -import logging -import os -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor -from functools import partial -from typing import Optional - -from tqdm import tqdm - -import yaml - -import h5py - -import numpy as np - -from io import IOBase - -from scipy import ndimage -from scipy.linalg import logm -from skimage.measure import regionprops - -from hexrd.core import constants -from hexrd.core.imageseries import ImageSeries -from hexrd.core.imageseries.process import ProcessedImageSeries -from hexrd.core.imageseries.omega import OmegaImageSeries -from hexrd.core.fitting.utils import fit_ring -from hexrd.core.gridutil import make_tolerance_grid -from hexrd.core import matrixutil as mutil -from hexrd.core.transforms.xfcapi import ( - angles_to_gvec, - gvec_to_xy, - make_sample_rmat, - make_rmat_of_expmap, - unit_vector, -) - -# TODO: Resolve extra-workflow dependency -from hexrd.hedm import xrdutil -from hexrd.powder.material.crystallography import PlaneData -from hexrd.core import constants as ct -from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler, mapAngle -from hexrd.core import distortion as distortion_pkg -from hexrd.core.utils.concurrent import distribute_tasks -from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict -from hexrd.core.utils.yaml import NumpyToNativeDumper -from hexrd.core.valunits import valWUnit -from hexrd.powder.wppf import LeBail - -from hexrd.core.instrument.cylindrical_detector import CylindricalDetector -from hexrd.core.instrument.detector import ( - beam_energy_DFLT, - max_workers_DFLT, - Detector, -) -from hexrd.core.instrument.planar_detector import PlanarDetector - -from skimage.draw import polygon -from skimage.util import random_noise -from hexrd.powder.wppf import wppfsupport - -try: - from fast_histogram import histogram1d - - fast_histogram = True -except ImportError: - from numpy import histogram as histogram1d - - fast_histogram = False - -logger = logging.getLogger() -logger.setLevel('INFO') - -# ============================================================================= -# PARAMETERS -# ============================================================================= - -instrument_name_DFLT = 'instrument' - -beam_vec_DFLT = ct.beam_vec -source_distance_DFLT = np.inf - -eta_vec_DFLT = ct.eta_vec - -panel_id_DFLT = 'generic' -nrows_DFLT = 2048 -ncols_DFLT = 2048 -pixel_size_DFLT = (0.2, 0.2) - -tilt_params_DFLT = np.zeros(3) -t_vec_d_DFLT = np.r_[0.0, 0.0, -1000.0] - -chi_DFLT = 0.0 -t_vec_s_DFLT = np.zeros(3) - -multi_ims_key = ct.shared_ims_key -ims_classes = (ImageSeries, ProcessedImageSeries, OmegaImageSeries) - -buffer_key = 'buffer' -distortion_key = 'distortion' - -# ============================================================================= -# UTILITY METHODS -# ============================================================================= - - -def generate_chunks( - nrows, ncols, base_nrows, base_ncols, row_gap=0, col_gap=0 -): - """ - Generate chunking data for regularly tiled composite detectors. - - Parameters - ---------- - nrows : int - DESCRIPTION. - ncols : int - DESCRIPTION. - base_nrows : int - DESCRIPTION. - base_ncols : int - DESCRIPTION. - row_gap : int, optional - DESCRIPTION. The default is 0. - col_gap : int, optional - DESCRIPTION. The default is 0. - - Returns - ------- - rects : array_like - The (nrows*ncols, ) list of ROI specs (see Notes). - labels : array_like - The (nrows*ncols, ) list of ROI (i, j) matrix indexing labels 'i_j'. - - Notes - ----- - ProcessedImageSeries needs a (2, 2) array for the 'rect' kwarg: - [[row_start, row_stop], - [col_start, col_stop]] - """ - row_starts = np.array([i * (base_nrows + row_gap) for i in range(nrows)]) - col_starts = np.array([i * (base_ncols + col_gap) for i in range(ncols)]) - rr = np.vstack([row_starts, row_starts + base_nrows]) - cc = np.vstack([col_starts, col_starts + base_ncols]) - rects = [] - labels = [] - for i in range(nrows): - for j in range(ncols): - this_rect = np.array([[rr[0, i], rr[1, i]], [cc[0, j], cc[1, j]]]) - rects.append(this_rect) - labels.append('%d_%d' % (i, j)) - return rects, labels - - -def chunk_instrument(instr, rects, labels, use_roi=False): - """ - Generate chunked config fro regularly tiled composite detectors. - - Parameters - ---------- - instr : TYPE - DESCRIPTION. - rects : TYPE - DESCRIPTION. - labels : TYPE - DESCRIPTION. - - Returns - ------- - new_icfg_dict : TYPE - DESCRIPTION. - - """ - icfg_dict = instr.write_config() - new_icfg_dict = dict( - beam=icfg_dict['beam'], - oscillation_stage=icfg_dict['oscillation_stage'], - detectors={}, - ) - for panel_id, panel in instr.detectors.items(): - pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] - - for pnum, pdata in enumerate(zip(rects, labels)): - rect, label = pdata - panel_name = f'{panel_id}_{label}' - - row_col_dim = np.diff(rect) # (2, 1) - shape = tuple(row_col_dim.flatten()) - center = rect[:, 0].reshape(2, 1) + 0.5 * row_col_dim - - sp_tvec = np.concatenate( - [panel.pixelToCart(center.T).flatten(), np.zeros(1)] - ) - - tvec = np.dot(panel.rmat, sp_tvec) + panel.tvec - - # new config dict - tmp_cfg = copy.deepcopy(pcfg_dict) - - # fix sizes - tmp_cfg['pixels']['rows'] = shape[0] - tmp_cfg['pixels']['columns'] = shape[1] - if use_roi: - tmp_cfg['pixels']['roi'] = (rect[0][0], rect[1][0]) - - # update tvec - tmp_cfg['transform']['translation'] = tvec.tolist() - - new_icfg_dict['detectors'][panel_name] = copy.deepcopy(tmp_cfg) - - if panel.panel_buffer is not None: - if panel.panel_buffer.ndim == 2: # have a mask array! - submask = panel.panel_buffer[ - rect[0, 0] : rect[0, 1], rect[1, 0] : rect[1, 1] - ] - new_icfg_dict['detectors'][panel_name]['buffer'] = submask - return new_icfg_dict - - -def _parse_imgser_dict(imgser_dict, det_key, roi=None): - """ - Associates a dict of imageseries to the target panel(s). - - Parameters - ---------- - imgser_dict : dict - The input dict of imageseries. Either `det_key` is in imgser_dict, or - the shared key is. Entries can be an ImageSeries object or a 2- or 3-d - ndarray of images. - det_key : str - The target detector key. - roi : tuple or None, optional - The roi of the target images. Format is - ((row_start, row_stop), (col_start, col_stop)) - The stops are used in the normal sense of a slice. The default is None. - - Raises - ------ - RuntimeError - If niether `det_key` nor the shared key is in the input imgser_dict; - Also, if the shared key is specified but the roi is None. - - Returns - ------- - ims : hexrd.core.imageseries - The desired imageseries object. - - """ - # grab imageseries for this detector - try: - ims = imgser_dict[det_key] - except KeyError: - matched_det_keys = [det_key in k for k in imgser_dict] - if multi_ims_key in imgser_dict: - images_in = imgser_dict[multi_ims_key] - elif np.any(matched_det_keys): - if sum(matched_det_keys) != 1: - raise RuntimeError(f"multiple entries found for '{det_key}'") - # use boolean array to index the proper key - # !!! these should be in the same order - img_keys = img_keys = np.asarray(list(imgser_dict.keys())) - matched_det_key = img_keys[matched_det_keys][0] # !!! only one - images_in = imgser_dict[matched_det_key] - else: - raise RuntimeError( - f"neither '{det_key}' nor '{multi_ims_key}' found" - + 'in imageseries input' - ) - - # have images now - if roi is None: - raise RuntimeError( - "roi must be specified to use shared imageseries" - ) - - if isinstance(images_in, ims_classes): - # input is an imageseries of some kind - ims = ProcessedImageSeries( - images_in, - [ - ('rectangle', roi), - ], - ) - if isinstance(images_in, OmegaImageSeries): - # if it was an OmegaImageSeries, must re-cast - ims = OmegaImageSeries(ims) - elif isinstance(images_in, np.ndarray): - # 2- or 3-d array of images - ndim = images_in.ndim - if ndim == 2: - ims = images_in[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]] - elif ndim == 3: - nrows = roi[0][1] - roi[0][0] - ncols = roi[1][1] - roi[1][0] - n_images = len(images_in) - ims = np.empty((n_images, nrows, ncols), dtype=images_in.dtype) - for i, image in images_in: - ims[i, :, :] = images_in[ - roi[0][0] : roi[0][1], roi[1][0] : roi[1][1] - ] - else: - raise RuntimeError( - f"image input dim must be 2 or 3; you gave {ndim}" - ) - return ims - - -def calc_beam_vec(azim, pola): - """ - Calculate unit beam propagation vector from - spherical coordinate spec in DEGREES. - - ...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL! - """ - tht = np.radians(azim) - phi = np.radians(pola) - bv = np.r_[ - np.sin(phi) * np.cos(tht), np.cos(phi), np.sin(phi) * np.sin(tht) - ] - return -bv - - -def calc_angles_from_beam_vec(bvec): - """ - Return the azimuth and polar angle from a beam - vector - """ - bvec = np.atleast_1d(bvec).flatten() - nvec = unit_vector(-bvec) - azim = float(np.degrees(np.arctan2(nvec[2], nvec[0]))) - pola = float(np.degrees(np.arccos(nvec[1]))) - return azim, pola - - -def migrate_instrument_config(instrument_config): - """utility function to generate old instrument config dictionary""" - cfg_list = [] - for detector_id in instrument_config['detectors']: - cfg_list.append( - dict( - detector=instrument_config['detectors'][detector_id], - oscillation_stage=instrument_config['oscillation_stage'], - ) - ) - return cfg_list - - -def angle_in_range(angle, ranges, ccw=True, units='degrees'): - """ - Return the index of the first wedge the angle is found in - - WARNING: always clockwise; assumes wedges are not overlapping - """ - tau = 360.0 - if units.lower() == 'radians': - tau = 2 * np.pi - w = np.nan - for i, wedge in enumerate(ranges): - amin = wedge[0] - amax = wedge[1] - check = amin + np.mod(angle - amin, tau) - if check < amax: - w = i - break - return w - - -# ???: move to gridutil? -def centers_of_edge_vec(edges): - assert np.asarray(edges).ndim == 1, "edges must be 1-d" - return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0) - - -def max_tth(instr): - """ - Return the maximum Bragg angle (in radians) subtended by the instrument. - - Parameters - ---------- - instr : hexrd.hedm.instrument.HEDMInstrument instance - the instrument class to evalutate. - - Returns - ------- - tth_max : float - The maximum observable Bragg angle by the instrument in radians. - """ - tth_max = 0.0 - for det in instr.detectors.values(): - ptth, peta = det.pixel_angles() - tth_max = max(np.max(ptth), tth_max) - return tth_max - - -def pixel_resolution(instr): - """ - Return the minimum, median, and maximum angular - resolution of the instrument. - - Parameters - ---------- - instr : HEDMInstrument instance - An instrument. - - Returns - ------- - tth_stats : float - min/median/max tth resolution in radians. - eta_stats : TYPE - min/median/max eta resolution in radians. - - """ - max_tth = np.inf - max_eta = np.inf - min_tth = -np.inf - min_eta = -np.inf - ang_ps_full = [] - for panel in instr.detectors.values(): - angps = panel.angularPixelSize( - np.stack(panel.pixel_coords, axis=0) - .reshape(2, np.cumprod(panel.shape)[-1]) - .T - ) - ang_ps_full.append(angps) - max_tth = min(max_tth, np.min(angps[:, 0])) - max_eta = min(max_eta, np.min(angps[:, 1])) - min_tth = max(min_tth, np.max(angps[:, 0])) - min_eta = max(min_eta, np.max(angps[:, 1])) - med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten() - return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta) - - -def max_resolution(instr): - """ - Return the maximum angular resolution of the instrument. - - Parameters - ---------- - instr : HEDMInstrument instance - An instrument. - - Returns - ------- - max_tth : float - Maximum tth resolution in radians. - max_eta : TYPE - maximum eta resolution in radians. - - """ - max_tth = np.inf - max_eta = np.inf - for panel in instr.detectors.values(): - angps = panel.angularPixelSize( - np.stack(panel.pixel_coords, axis=0) - .reshape(2, np.cumprod(panel.shape)[-1]) - .T - ) - max_tth = min(max_tth, np.min(angps[:, 0])) - max_eta = min(max_eta, np.min(angps[:, 1])) - return max_tth, max_eta - - -def _gaussian_dist(x, cen, fwhm): - sigm = fwhm / (2 * np.sqrt(2 * np.log(2))) - return np.exp(-0.5 * (x - cen) ** 2 / sigm**2) - - -def _sigma_to_fwhm(sigm): - return sigm * ct.sigma_to_fwhm - - -def _fwhm_to_sigma(fwhm): - return fwhm / ct.sigma_to_fwhm - - -# ============================================================================= -# CLASSES -# ============================================================================= - - -class HEDMInstrument(object): - """ - Abstraction of XRD instrument. - - * Distortion needs to be moved to a class with registry; tuple unworkable - * where should reference eta be defined? currently set to default config - """ - - def __init__( - self, - instrument_config=None, - image_series=None, - eta_vector=None, - instrument_name=None, - tilt_calibration_mapping=None, - max_workers=max_workers_DFLT, - physics_package=None, - active_beam_name: Optional[str] = None, - ): - self._id = instrument_name_DFLT - - self._active_beam_name = active_beam_name - self._beam_dict = {} - - if eta_vector is None: - self._eta_vector = eta_vec_DFLT - else: - self._eta_vector = eta_vector - - self.max_workers = max_workers - - self.physics_package = physics_package - - if instrument_config is None: - # Default instrument - if instrument_name is not None: - self._id = instrument_name - self._num_panels = 1 - self._create_default_beam() - - # FIXME: must add cylindrical - self._detectors = dict( - panel_id_DFLT=PlanarDetector( - rows=nrows_DFLT, - cols=ncols_DFLT, - pixel_size=pixel_size_DFLT, - tvec=t_vec_d_DFLT, - tilt=tilt_params_DFLT, - bvec=self.beam_vector, - xrs_dist=self.source_distance, - evec=self._eta_vector, - distortion=None, - roi=None, - group=None, - max_workers=self.max_workers, - ), - ) - - self._tvec = t_vec_s_DFLT - self._chi = chi_DFLT - else: - if isinstance(instrument_config, h5py.File): - tmp = {} - unwrap_h5_to_dict(instrument_config, tmp) - instrument_config = tmp['instrument'] - elif not isinstance(instrument_config, dict): - raise RuntimeError( - "instrument_config must be either an HDF5 file object" - + "or a dictionary. You gave a %s" - % type(instrument_config) - ) - if instrument_name is None: - if 'id' in instrument_config: - self._id = instrument_config['id'] - else: - self._id = instrument_name - - self._num_panels = len(instrument_config['detectors']) - - if instrument_config.get('physics_package', None) is not None: - self.physics_package = instrument_config['physics_package'] - - xrs_config = instrument_config['beam'] - is_single_beam = 'energy' in xrs_config and 'vector' in xrs_config - if is_single_beam: - # Assume single beam. Load the same way as multibeam - self._create_default_beam() - xrs_config = {self.active_beam_name: xrs_config} - - # Multi beam load - for beam_name, beam in xrs_config.items(): - self._beam_dict[beam_name] = { - 'energy': beam['energy'], - 'vector': calc_beam_vec( - beam['vector']['azimuth'], - beam['vector']['polar_angle'], - ), - 'distance': beam.get('source_distance', np.inf), - } - - # Set the active beam name if not set already - if self._active_beam_name is None: - self._active_beam_name = next(iter(self._beam_dict)) - - # now build detector dict - detectors_config = instrument_config['detectors'] - det_dict = dict.fromkeys(detectors_config) - for det_id, det_info in detectors_config.items(): - det_group = det_info.get('group') # optional detector group - pixel_info = det_info['pixels'] - affine_info = det_info['transform'] - detector_type = det_info.get('detector_type', 'planar') - filter = det_info.get('filter', None) - coating = det_info.get('coating', None) - phosphor = det_info.get('phosphor', None) - try: - saturation_level = det_info['saturation_level'] - except KeyError: - saturation_level = 2**16 - shape = (pixel_info['rows'], pixel_info['columns']) - - panel_buffer = None - if buffer_key in det_info: - det_buffer = det_info[buffer_key] - if det_buffer is not None: - if isinstance(det_buffer, np.ndarray): - if det_buffer.ndim == 2: - if det_buffer.shape != shape: - msg = ( - f'Buffer shape for {det_id} ' - f'({det_buffer.shape}) does not match ' - f'detector shape ({shape})' - ) - raise BufferShapeMismatchError(msg) - else: - assert len(det_buffer) == 2 - panel_buffer = det_buffer - elif isinstance(det_buffer, list): - panel_buffer = np.asarray(det_buffer) - elif np.isscalar(det_buffer): - panel_buffer = det_buffer * np.ones(2) - else: - raise RuntimeError( - "panel buffer spec invalid for %s" % det_id - ) - - # optional roi - roi = pixel_info.get('roi') - - # handle distortion - distortion = None - if distortion_key in det_info: - distortion_cfg = det_info[distortion_key] - if distortion_cfg is not None: - try: - func_name = distortion_cfg['function_name'] - dparams = distortion_cfg['parameters'] - distortion = distortion_pkg.get_mapping( - func_name, dparams - ) - except KeyError: - raise RuntimeError( - "problem with distortion specification" - ) - if detector_type.lower() not in DETECTOR_TYPES: - msg = f'Unknown detector type: {detector_type}' - raise NotImplementedError(msg) - - DetectorClass = DETECTOR_TYPES[detector_type.lower()] - kwargs = dict( - name=det_id, - rows=pixel_info['rows'], - cols=pixel_info['columns'], - pixel_size=pixel_info['size'], - panel_buffer=panel_buffer, - saturation_level=saturation_level, - tvec=affine_info['translation'], - tilt=affine_info['tilt'], - bvec=self.beam_vector, - xrs_dist=self.source_distance, - evec=self._eta_vector, - distortion=distortion, - roi=roi, - group=det_group, - max_workers=self.max_workers, - detector_filter=filter, - detector_coating=coating, - phosphor=phosphor, - ) - - if DetectorClass is CylindricalDetector: - # Add cylindrical detector kwargs - kwargs['radius'] = det_info.get('radius', 49.51) - - det_dict[det_id] = DetectorClass(**kwargs) - - self._detectors = det_dict - - self._tvec = np.r_[ - instrument_config['oscillation_stage']['translation'] - ] - self._chi = instrument_config['oscillation_stage']['chi'] - - # grab angles from beam vec - # !!! these are in DEGREES! - azim, pola = calc_angles_from_beam_vec(self.beam_vector) - - self.update_memoization_sizes() - - @property - def mean_detector_center(self) -> np.ndarray: - """Return the mean center for all detectors""" - centers = np.array([panel.tvec for panel in self.detectors.values()]) - return centers.sum(axis=0) / len(centers) - - def mean_group_center(self, group: str) -> np.ndarray: - """Return the mean center for detectors belonging to a group""" - centers = np.array( - [x.tvec for x in self.detectors_in_group(group).values()] - ) - return centers.sum(axis=0) / len(centers) - - @property - def detector_groups(self) -> list[str]: - groups = [] - for panel in self.detectors.values(): - group = panel.group - if group is not None and group not in groups: - groups.append(group) - - return groups - - def detectors_in_group(self, group: str) -> dict[str, Detector]: - return {k: v for k, v in self.detectors.items() if v.group == group} - - # properties for physical size of rectangular detector - @property - def id(self): - return self._id - - @property - def num_panels(self): - return self._num_panels - - @property - def detectors(self): - return self._detectors - - @property - def detector_parameters(self): - pdict = {} - for key, panel in self.detectors.items(): - pdict[key] = panel.config_dict( - self.chi, - self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - return pdict - - @property - def tvec(self): - return self._tvec - - @tvec.setter - def tvec(self, x): - x = np.array(x).flatten() - assert len(x) == 3, 'input must have length = 3' - self._tvec = x - - @property - def chi(self): - return self._chi - - @chi.setter - def chi(self, x): - self._chi = float(x) - - @property - def beam_energy(self) -> float: - return self.active_beam['energy'] - - @beam_energy.setter - def beam_energy(self, x: float): - self.active_beam['energy'] = float(x) - self.beam_dict_modified() - - @property - def beam_wavelength(self): - return ct.keVToAngstrom(self.beam_energy) - - @property - def has_multi_beam(self) -> bool: - return len(self.beam_dict) > 1 - - @property - def beam_dict(self) -> dict: - return self._beam_dict - - def _create_default_beam(self): - name = 'XRS1' - self._beam_dict[name] = { - 'energy': beam_energy_DFLT, - 'vector': beam_vec_DFLT.copy(), - 'distance': np.inf, - } - - if self._active_beam_name is None: - self._active_beam_name = name - - @property - def beam_names(self) -> list[str]: - return list(self.beam_dict) - - def xrs_beam_energy(self, beam_name: Optional[str]) -> float: - if beam_name is None: - beam_name = self.active_beam_name - - return self.beam_dict[beam_name]['energy'] - - @property - def active_beam_name(self) -> str: - return self._active_beam_name - - @active_beam_name.setter - def active_beam_name(self, name: str): - if self._active_beam_name not in self.beam_dict: - raise RuntimeError( - f'"{name}" is not present in "{self.beam_names}"' - ) - - self._active_beam_name = name - - # Update anything beam related where we need to - self._update_panel_beams() - - def beam_dict_modified(self): - # A function to call to indicate that the beam dict was modified. - # Update anything beam related where we need to - self._update_panel_beams() - - @property - def active_beam(self) -> dict: - return self.beam_dict[self.active_beam_name] - - def _update_panel_beams(self): - # FIXME: maybe we shouldn't store these on the panels? - # Might be hard to fix, though... - for panel in self.detectors.values(): - panel.bvec = self.beam_vector - panel.xrs_dist = self.source_distance - - @property - def beam_vector(self) -> np.ndarray: - return self.active_beam['vector'] - - @beam_vector.setter - def beam_vector(self, x: np.ndarray): - x = np.array(x).flatten() - if len(x) == 3: - assert ( - sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - bvec = x - elif len(x) == 2: - bvec = calc_beam_vec(*x) - else: - raise RuntimeError("input must be a unit vector or angle pair") - - # Modify the beam vector for the active beam dict - self.active_beam['vector'] = bvec - self.beam_dict_modified() - - @property - def source_distance(self): - return self.active_beam['distance'] - - @source_distance.setter - def source_distance(self, x): - assert np.isscalar( - x - ), f"'source_distance' must be a scalar; you input '{x}'" - self.active_beam['distance'] = x - self.beam_dict_modified() - - @property - def eta_vector(self): - return self._eta_vector - - @eta_vector.setter - def eta_vector(self, x): - x = np.array(x).flatten() - assert ( - len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf - ), 'input must have length = 3 and have unit magnitude' - self._eta_vector = x - # ...maybe change dictionary item behavior for 3.x compatibility? - for detector_id in self.detectors: - panel = self.detectors[detector_id] - panel.evec = self._eta_vector - - # ========================================================================= - # METHODS - # ========================================================================= - - def write_config(self, file=None, style='yaml', calibration_dict={}): - """WRITE OUT YAML FILE""" - # initialize output dictionary - assert style.lower() in ['yaml', 'hdf5'], ( - "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style - ) - - par_dict = {} - - par_dict['id'] = self.id - - # Multi beam writer - beam_dict = {} - for beam_name, beam in self.beam_dict.items(): - azim, polar = calc_angles_from_beam_vec(beam['vector']) - beam_dict[beam_name] = { - 'energy': beam['energy'], - 'vector': { - 'azimuth': azim, - 'polar_angle': polar, - }, - } - if beam['distance'] != np.inf: - beam_dict[beam_name]['source_distance'] = beam['distance'] - - if len(beam_dict) == 1: - # Just write it out a single beam (classical way) - beam_dict = next(iter(beam_dict.values())) - - par_dict['beam'] = beam_dict - - if calibration_dict: - par_dict['calibration_crystal'] = calibration_dict - - ostage = dict(chi=self.chi, translation=self.tvec.tolist()) - par_dict['oscillation_stage'] = ostage - - det_dict = dict.fromkeys(self.detectors) - for det_name, detector in self.detectors.items(): - # grab panel config - # !!! don't need beam or tvec - # !!! have vetted style - pdict = detector.config_dict( - chi=self.chi, - tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style=style, - ) - det_dict[det_name] = pdict['detector'] - par_dict['detectors'] = det_dict - - # handle output file if requested - if file is not None: - if style.lower() == 'yaml': - with open(file, 'w') as f: - yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) - else: - - def _write_group(file): - instr_grp = file.create_group('instrument') - unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) - - # hdf5 - if isinstance(file, str): - with h5py.File(file, 'w') as f: - _write_group(f) - elif isinstance(file, h5py.File): - _write_group(file) - else: - raise TypeError("Unexpected file type.") - - return par_dict - - def extract_polar_maps( - self, - plane_data, - imgser_dict, - active_hkls=None, - threshold=None, - tth_tol=None, - eta_tol=0.25, - ): - """ - Extract eta-omega maps from an imageseries. - - Quick and dirty way to histogram angular patch data for make - pole figures suitable for fiber generation - - TODO: streamline projection code - TODO: normalization - !!!: images must be non-negative! - !!!: plane_data is NOT a copy! - """ - if tth_tol is not None: - plane_data.tThWidth = np.radians(tth_tol) - else: - tth_tol = np.degrees(plane_data.tThWidth) - - # make rings clipped to panel - # !!! eta_idx has the same length as plane_data.exclusions - # each entry are the integer indices into the bins - # !!! eta_edges is the list of eta bin EDGES; same for all - # detectors, so calculate it once - # !!! grab first panel - panel = next(iter(self.detectors.values())) - pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = ( - panel.make_powder_rings( - plane_data, - merge_hkls=False, - delta_eta=eta_tol, - full_output=True, - ) - ) - - if active_hkls is not None: - assert hasattr( - active_hkls, '__len__' - ), "active_hkls must be an iterable with __len__" - - # need to re-cast for element-wise operations - active_hkls = np.array(active_hkls) - - # these are all active reflection unique hklIDs - active_hklIDs = plane_data.getHKLID(plane_data.hkls, master=True) - - # find indices - idx = np.zeros_like(active_hkls, dtype=int) - for i, input_hklID in enumerate(active_hkls): - try: - idx[i] = np.where(active_hklIDs == input_hklID)[0] - except ValueError: - raise RuntimeError(f"hklID '{input_hklID}' is invalid") - tth_ranges = tth_ranges[idx] - - delta_eta = eta_edges[1] - eta_edges[0] - ncols_eta = len(eta_edges) - 1 - - ring_maps_panel = dict.fromkeys(self.detectors) - for i_d, det_key in enumerate(self.detectors): - print("working on detector '%s'..." % det_key) - - # grab panel - panel = self.detectors[det_key] - # native_area = panel.pixel_area # pixel ref area - - # pixel angular coords for the detector panel - ptth, peta = panel.pixel_angles() - - # grab imageseries for this detector - ims = _parse_imgser_dict(imgser_dict, det_key, roi=panel.roi) - - # grab omegas from imageseries and squawk if missing - try: - omegas = ims.metadata['omega'] - except KeyError: - raise RuntimeError( - f"imageseries for '{det_key}' has no omega info" - ) - - # initialize maps and assing by row (omega/frame) - nrows_ome = len(omegas) - - # init map with NaNs - shape = (len(tth_ranges), nrows_ome, ncols_eta) - ring_maps = np.full(shape, np.nan) - - # Generate ring parameters once, and re-use them for each image - ring_params = [] - for tthr in tth_ranges: - kwargs = { - 'tthr': tthr, - 'ptth': ptth, - 'peta': peta, - 'eta_edges': eta_edges, - 'delta_eta': delta_eta, - } - ring_params.append(_generate_ring_params(**kwargs)) - - # Divide up the images among processes - tasks = distribute_tasks(len(ims), self.max_workers) - func = partial( - _run_histograms, - ims=ims, - tth_ranges=tth_ranges, - ring_maps=ring_maps, - ring_params=ring_params, - threshold=threshold, - ) - - max_workers = self.max_workers - if max_workers == 1 or len(tasks) == 1: - # Just execute it serially. - for task in tasks: - func(task) - else: - with ThreadPoolExecutor(max_workers=max_workers) as executor: - # Evaluate the results via `list()`, so that if an - # exception is raised in a thread, it will be re-raised - # and visible to the user. - list(executor.map(func, tasks)) - - ring_maps_panel[det_key] = ring_maps - - return ring_maps_panel, eta_edges - - def extract_line_positions( - self, - plane_data, - imgser_dict, - tth_tol=None, - eta_tol=1.0, - npdiv=2, - eta_centers=None, - collapse_eta=True, - collapse_tth=False, - do_interpolation=True, - do_fitting=False, - tth_distortion=None, - fitting_kwargs=None, - ): - """ - Perform annular interpolation on diffraction images. - - Provides data for extracting the line positions from powder diffraction - images, pole figure patches from imageseries, or Bragg peaks from - Laue diffraction images. - - Parameters - ---------- - plane_data : hexrd.crystallography.PlaneData object or array_like - Object determining the 2theta positions for the integration - sectors. If PlaneData, this will be all non-excluded reflections, - subject to merging within PlaneData.tThWidth. If array_like, - interpreted as a list of 2theta angles IN DEGREES. - imgser_dict : dict - Dictionary of powder diffraction images, one for each detector. - tth_tol : scalar, optional - The radial (i.e. 2theta) width of the integration sectors - IN DEGREES. This arg is required if plane_data is array_like. - The default is None. - eta_tol : scalar, optional - The azimuthal (i.e. eta) width of the integration sectors - IN DEGREES. The default is 1. - npdiv : int, optional - The number of oversampling pixel subdivision (see notes). - The default is 2. - eta_centers : array_like, optional - The desired azimuthal sector centers. The default is None. If - None, then bins are distrubted sequentially from (-180, 180). - collapse_eta : bool, optional - Flag for summing sectors in eta. The default is True. - collapse_tth : bool, optional - Flag for summing sectors in 2theta. The default is False. - do_interpolation : bool, optional - If True, perform bilinear interpolation. The default is True. - do_fitting : bool, optional - If True, then perform spectrum fitting, and append the results - to the returned data. collapse_eta must also be True for this - to have any effect. The default is False. - tth_distortion : special class, optional - for special case of pinhole camera distortions. See - hexrd.hedm.xrdutil.phutil.SampleLayerDistortion (only type supported) - fitting_kwargs : dict, optional - kwargs passed to hexrd.core.fitting.utils.fit_ring if do_fitting is True - - Raises - ------ - RuntimeError - DESCRIPTION. - - Returns - ------- - panel_data : dict - Dictionary over the detctors with the following structure: - [list over (merged) 2theta ranges] - [list over valid eta sectors] - [angle data , - bin intensities , - fitting results ] - - Notes - ----- - TODO: May change the array_like input units to degrees. - TODO: rename function. - - """ - - if fitting_kwargs is None: - fitting_kwargs = {} - - # ===================================================================== - # LOOP OVER DETECTORS - # ===================================================================== - logger.info("Interpolating ring data") - pbar_dets = partial( - tqdm, - total=self.num_panels, - desc="Detector", - position=self.num_panels, - ) - - # Split up the workers among the detectors - max_workers_per_detector = max(1, self.max_workers // self.num_panels) - - kwargs = { - 'plane_data': plane_data, - 'tth_tol': tth_tol, - 'eta_tol': eta_tol, - 'eta_centers': eta_centers, - 'npdiv': npdiv, - 'collapse_tth': collapse_tth, - 'collapse_eta': collapse_eta, - 'do_interpolation': do_interpolation, - 'do_fitting': do_fitting, - 'fitting_kwargs': fitting_kwargs, - 'tth_distortion': tth_distortion, - 'max_workers': max_workers_per_detector, - } - func = partial(_extract_detector_line_positions, **kwargs) - - def make_instr_cfg(panel): - return panel.config_dict( - chi=self.chi, - tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - - images = [] - for detector_id, panel in self.detectors.items(): - images.append( - _parse_imgser_dict(imgser_dict, detector_id, roi=panel.roi) - ) - - panels = [self.detectors[k] for k in self.detectors] - instr_cfgs = [make_instr_cfg(x) for x in panels] - pbp_array = np.arange(self.num_panels) - iter_args = zip(panels, instr_cfgs, images, pbp_array) - with ProcessPoolExecutor( - mp_context=constants.mp_context, max_workers=self.num_panels - ) as executor: - results = list(pbar_dets(executor.map(func, iter_args))) - - panel_data = {} - for det, res in zip(self.detectors, results): - panel_data[det] = res - - return panel_data - - def simulate_powder_pattern( - self, mat_list, params=None, bkgmethod=None, origin=None, noise=None - ): - """ - Generate powder diffraction iamges from specified materials. - - Parameters - ---------- - mat_list : array_like (n, ) - List of Material classes. - params : dict, optional - Dictionary of LeBail parameters (see Notes). The default is None. - bkgmethod : dict, optional - Background function specification. The default is None. - origin : array_like (3,), optional - Vector describing the origin of the diffrction volume. - The default is None, wiich is equivalent to [0, 0, 0]. - noise : str, optional - Flag describing type of noise to be applied. The default is None. - - Returns - ------- - img_dict : dict - Dictionary of diffraciton images over the detectors. - - Notes - ----- - TODO: add more controls for noise function. - TODO: modify hooks to LeBail parameters. - TODO: add optional volume fraction weights for phases in mat_list - """ - """ - >> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab, - saransh1@llnl.gov - >> @DATE: 01/22/2021 SS 1.0 original - >> @DETAILS: adding hook to WPPF class. this changes the input list - significantly - """ - if origin is None: - origin = self.tvec - origin = np.asarray(origin).squeeze() - assert len(origin) == 3, "origin must be a 3-element sequence" - - if bkgmethod is None: - bkgmethod = {'chebyshev': 3} - - ''' - if params is none, fill in some sane default values - only the first value is used. the rest of the values are - the upper, lower bounds and vary flag for refinement which - are not used but required for interfacing with WPPF - - zero_error : zero shift error - U, V, W : Cagliotti parameters - P, X, Y : Lorentzian parameters - eta1, eta2, eta3 : Mixing parameters - ''' - if params is None: - # params = {'zero_error': [0.0, -1., 1., True], - # 'U': [2e-1, -1., 1., True], - # 'V': [2e-2, -1., 1., True], - # 'W': [2e-2, -1., 1., True], - # 'X': [2e-1, -1., 1., True], - # 'Y': [2e-1, -1., 1., True] - # } - params = wppfsupport._generate_default_parameters_LeBail( - mat_list, - 1, - bkgmethod, - ) - ''' - use the material list to obtain the dictionary of initial intensities - we need to make sure that the intensities are properly scaled by the - lorentz polarization factor. since the calculation is done in the - LeBail class, all that means is the initial intensity needs that factor - in there - ''' - img_dict = dict.fromkeys(self.detectors) - - # find min and max tth over all panels - tth_mi = np.inf - tth_ma = 0.0 - ptth_dict = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - ptth, peta = panel.pixel_angles(origin=origin) - tth_mi = min(tth_mi, ptth.min()) - tth_ma = max(tth_ma, ptth.max()) - ptth_dict[det_key] = ptth - - ''' - now make a list of two theta and dummy ones for the experimental - spectrum this is never really used so any values should be okay. We - could also pas the integrated detector image if we would like to - simulate some realistic background. But thats for another day. - ''' - # convert angles to degrees because thats what the WPPF expects - tth_mi = np.degrees(tth_mi) - tth_ma = np.degrees(tth_ma) - - # get tth angular resolution for instrument - ang_res = max_resolution(self) - - # !!! calc nsteps by oversampling - nsteps = int(np.ceil(2 * (tth_ma - tth_mi) / np.degrees(ang_res[0]))) - - # evaulation vector for LeBail - tth = np.linspace(tth_mi, tth_ma, nsteps) - - expt = np.vstack([tth, np.ones_like(tth)]).T - - wavelength = [ - valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), - 1.0, - ] - - ''' - now go through the material list and get the intensity dictionary - ''' - intensity = {} - for mat in mat_list: - - multiplicity = mat.planeData.getMultiplicity() - - tth = mat.planeData.getTTh() - - LP = ( - (1 + np.cos(tth) ** 2) - / np.cos(0.5 * tth) - / np.sin(0.5 * tth) ** 2 - ) - - intensity[mat.name] = {} - intensity[mat.name]['synchrotron'] = ( - mat.planeData.structFact * LP * multiplicity - ) - - kwargs = { - 'expt_spectrum': expt, - 'params': params, - 'phases': mat_list, - 'wavelength': {'synchrotron': wavelength}, - 'bkgmethod': bkgmethod, - 'intensity_init': intensity, - 'peakshape': 'pvtch', - } - - self.WPPFclass = LeBail(**kwargs) - - self.simulated_spectrum = self.WPPFclass.spectrum_sim - self.background = self.WPPFclass.background - - ''' - now that we have the simulated intensities, its time to get the - two theta for the detector pixels and interpolate what the intensity - for each pixel should be - ''' - - img_dict = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - ptth = ptth_dict[det_key] - - img = np.interp( - np.degrees(ptth), - self.simulated_spectrum.x, - self.simulated_spectrum.y + self.background.y, - ) - - if noise is None: - img_dict[det_key] = img - - else: - # Rescale to be between 0 and 1 so random_noise() will work - prev_max = img.max() - img /= prev_max - - if noise.lower() == 'poisson': - im_noise = random_noise(img, mode='poisson', clip=True) - mi = im_noise.min() - ma = im_noise.max() - if ma > mi: - im_noise = (im_noise - mi) / (ma - mi) - - elif noise.lower() == 'gaussian': - im_noise = random_noise(img, mode='gaussian', clip=True) - - elif noise.lower() == 'salt': - im_noise = random_noise(img, mode='salt') - - elif noise.lower() == 'pepper': - im_noise = random_noise(img, mode='pepper') - - elif noise.lower() == 's&p': - im_noise = random_noise(img, mode='s&p') - - elif noise.lower() == 'speckle': - im_noise = random_noise(img, mode='speckle', clip=True) - - # Now scale back up - img_dict[det_key] = im_noise * prev_max - - return img_dict - - def simulate_laue_pattern( - self, - crystal_data, - minEnergy=5.0, - maxEnergy=35.0, - rmat_s=None, - grain_params=None, - ): - """ - Simulate Laue diffraction over the instrument. - - Parameters - ---------- - crystal_data : TYPE - DESCRIPTION. - minEnergy : TYPE, optional - DESCRIPTION. The default is 5.. - maxEnergy : TYPE, optional - DESCRIPTION. The default is 35.. - rmat_s : TYPE, optional - DESCRIPTION. The default is None. - grain_params : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - results : TYPE - DESCRIPTION. - - xy_det, hkls_in, angles, dspacing, energy - - TODO: revisit output; dict, or concatenated list? - """ - results = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - results[det_key] = panel.simulate_laue_pattern( - crystal_data, - minEnergy=minEnergy, - maxEnergy=maxEnergy, - rmat_s=rmat_s, - tvec_s=self.tvec, - grain_params=grain_params, - beam_vec=self.beam_vector, - ) - return results - - def simulate_rotation_series( - self, - plane_data, - grain_param_list, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_ranges=[ - (-np.pi, np.pi), - ], - ome_period=(-np.pi, np.pi), - wavelength=None, - ): - """ - Simulate a monochromatic rotation series over the instrument. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_param_list : TYPE - DESCRIPTION. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - wavelength : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - results : TYPE - DESCRIPTION. - - TODO: revisit output; dict, or concatenated list? - """ - results = dict.fromkeys(self.detectors) - for det_key, panel in self.detectors.items(): - results[det_key] = panel.simulate_rotation_series( - plane_data, - grain_param_list, - eta_ranges=eta_ranges, - ome_ranges=ome_ranges, - ome_period=ome_period, - chi=self.chi, - tVec_s=self.tvec, - wavelength=wavelength, - ) - return results - - def pull_spots( - self, - plane_data, - grain_params, - imgser_dict, - tth_tol=0.25, - eta_tol=1.0, - ome_tol=1.0, - npdiv=2, - threshold=10, - eta_ranges=[ - (-np.pi, np.pi), - ], - ome_period=None, - dirname='results', - filename=None, - output_format='text', - return_spot_list=False, - quiet=True, - check_only=False, - interp='nearest', - ): - """ - Exctract reflection info from a rotation series. - - Input must be encoded as an OmegaImageseries object. - - Parameters - ---------- - plane_data : TYPE - DESCRIPTION. - grain_params : TYPE - DESCRIPTION. - imgser_dict : TYPE - DESCRIPTION. - tth_tol : TYPE, optional - DESCRIPTION. The default is 0.25. - eta_tol : TYPE, optional - DESCRIPTION. The default is 1.. - ome_tol : TYPE, optional - DESCRIPTION. The default is 1.. - npdiv : TYPE, optional - DESCRIPTION. The default is 2. - threshold : TYPE, optional - DESCRIPTION. The default is 10. - eta_ranges : TYPE, optional - DESCRIPTION. The default is [(-np.pi, np.pi), ]. - ome_period : TYPE, optional - DESCRIPTION. The default is (-np.pi, np.pi). - dirname : TYPE, optional - DESCRIPTION. The default is 'results'. - filename : TYPE, optional - DESCRIPTION. The default is None. - output_format : TYPE, optional - DESCRIPTION. The default is 'text'. - return_spot_list : TYPE, optional - DESCRIPTION. The default is False. - quiet : TYPE, optional - DESCRIPTION. The default is True. - check_only : TYPE, optional - DESCRIPTION. The default is False. - interp : TYPE, optional - DESCRIPTION. The default is 'nearest'. - - Returns - ------- - compl : TYPE - DESCRIPTION. - output : TYPE - DESCRIPTION. - - """ - # grain parameters - rMat_c = make_rmat_of_expmap(grain_params[:3]) - tVec_c = grain_params[3:6] - - # grab omega ranges from first imageseries - # - # WARNING: all imageseries AND all wedges within are assumed to have - # the same omega values; put in a check that they are all the same??? - oims0 = next(iter(imgser_dict.values())) - ome_ranges = [ - np.radians([i['ostart'], i['ostop']]) - for i in oims0.omegawedges.wedges - ] - if ome_period is None: - ims = next(iter(imgser_dict.values())) - ostart = ims.omega[0, 0] - ome_period = np.radians(ostart + np.r_[0.0, 360.0]) - - # delta omega in DEGREES grabbed from first imageseries in the dict - delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] - - # make omega grid for frame expansion around reference frame - # in DEGREES - ndiv_ome, ome_del = make_tolerance_grid( - delta_ome, - ome_tol, - 1, - adjust_window=True, - ) - - # generate structuring element for connected component labeling - if ndiv_ome == 1: - label_struct = ndimage.generate_binary_structure(2, 2) - else: - label_struct = ndimage.generate_binary_structure(3, 3) - - # simulate rotation series - sim_results = self.simulate_rotation_series( - plane_data, - [ - grain_params, - ], - eta_ranges=eta_ranges, - ome_ranges=ome_ranges, - ome_period=ome_period, - ) - - # patch vertex generator (global for instrument) - tol_vec = 0.5 * np.radians( - [ - -tth_tol, - -eta_tol, - -tth_tol, - eta_tol, - tth_tol, - eta_tol, - tth_tol, - -eta_tol, - ] - ) - - # prepare output if requested - if filename is not None and output_format.lower() == 'hdf5': - this_filename = os.path.join(dirname, filename) - writer = GrainDataWriter_h5( - os.path.join(dirname, filename), - self.write_config(), - grain_params, - ) - - # ===================================================================== - # LOOP OVER PANELS - # ===================================================================== - iRefl = 0 - next_invalid_peak_id = -100 - compl = [] - output = dict.fromkeys(self.detectors) - for detector_id, panel in self.detectors.items(): - # initialize text-based output writer - if filename is not None and output_format.lower() == 'text': - output_dir = os.path.join(dirname, detector_id) - os.makedirs(output_dir, exist_ok=True) - this_filename = os.path.join(output_dir, filename) - writer = PatchDataWriter(this_filename) - - # grab panel - instr_cfg = panel.config_dict( - self.chi, - self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style='hdf5', - ) - native_area = panel.pixel_area # pixel ref area - - # pull out the OmegaImageSeries for this panel from input dict - ome_imgser = _parse_imgser_dict( - imgser_dict, detector_id, roi=panel.roi - ) - - # extract simulation results - sim_results_p = sim_results[detector_id] - hkl_ids = sim_results_p[0][0] - hkls_p = sim_results_p[1][0] - ang_centers = sim_results_p[2][0] - xy_centers = sim_results_p[3][0] - ang_pixel_size = sim_results_p[4][0] - - # now verify that full patch falls on detector... - # ???: strictly necessary? - # - # patch vertex array from sim - nangs = len(ang_centers) - patch_vertices = ( - np.tile(ang_centers[:, :2], (1, 4)) - + np.tile(tol_vec, (nangs, 1)) - ).reshape(4 * nangs, 2) - ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape( - len(patch_vertices), 1 - ) - - # find vertices that all fall on the panel - det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( - np.hstack([patch_vertices, ome_dupl]), - panel.rmat, - rMat_c, - self.chi, - panel.tvec, - tVec_c, - self.tvec, - panel.distortion, - ) - _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) - - # all vertices must be on... - patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) - patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] - - # re-filter... - hkl_ids = hkl_ids[patch_is_on] - hkls_p = hkls_p[patch_is_on, :] - ang_centers = ang_centers[patch_is_on, :] - xy_centers = xy_centers[patch_is_on, :] - ang_pixel_size = ang_pixel_size[patch_is_on, :] - - # TODO: add polygon testing right here! - # done - if check_only: - patch_output = [] - for i_pt, angs in enumerate(ang_centers): - # the evaluation omegas; - # expand about the central value using tol vector - ome_eval = np.degrees(angs[2]) + ome_del - - # ...vectorize the omega_to_frame function to avoid loop? - frame_indices = [ - ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval - ] - if -1 in frame_indices: - if not quiet: - msg = """ - window for (%d %d %d) falls outside omega range - """ % tuple( - hkls_p[i_pt, :] - ) - print(msg) - continue - else: - these_vertices = patch_xys[i_pt] - ijs = panel.cartToPixel(these_vertices) - ii, jj = polygon(ijs[:, 0], ijs[:, 1]) - contains_signal = False - for i_frame in frame_indices: - contains_signal = contains_signal or np.any( - ome_imgser[i_frame][ii, jj] > threshold - ) - compl.append(contains_signal) - patch_output.append((ii, jj, frame_indices)) - else: - # make the tth,eta patches for interpolation - patches = xrdutil.make_reflection_patches( - instr_cfg, - ang_centers[:, :2], - ang_pixel_size, - omega=ang_centers[:, 2], - tth_tol=tth_tol, - eta_tol=eta_tol, - rmat_c=rMat_c, - tvec_c=tVec_c, - npdiv=npdiv, - quiet=True, - ) - - # GRAND LOOP over reflections for this panel - patch_output = [] - for i_pt, patch in enumerate(patches): - - # strip relevant objects out of current patch - vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch - - prows, pcols = areas.shape - nrm_fac = areas / float(native_area) - nrm_fac = nrm_fac / np.min(nrm_fac) - - # grab hkl info - hkl = hkls_p[i_pt, :] - hkl_id = hkl_ids[i_pt] - - # edge arrays - tth_edges = vtx_angs[0][0, :] - delta_tth = tth_edges[1] - tth_edges[0] - eta_edges = vtx_angs[1][:, 0] - delta_eta = eta_edges[1] - eta_edges[0] - - # need to reshape eval pts for interpolation - xy_eval = np.vstack( - [xy_eval[0].flatten(), xy_eval[1].flatten()] - ).T - - # the evaluation omegas; - # expand about the central value using tol vector - ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del - - # ???: vectorize the omega_to_frame function to avoid loop? - frame_indices = [ - ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval - ] - - if -1 in frame_indices: - if not quiet: - msg = """ - window for (%d%d%d) falls outside omega range - """ % tuple( - hkl - ) - print(msg) - continue - else: - # initialize spot data parameters - # !!! maybe change these to nan to not fuck up writer - peak_id = next_invalid_peak_id - sum_int = np.nan - max_int = np.nan - meas_angs = np.nan * np.ones(3) - meas_xy = np.nan * np.ones(2) - - # quick check for intensity - contains_signal = False - patch_data_raw = [] - for i_frame in frame_indices: - tmp = ome_imgser[i_frame][ijs[0], ijs[1]] - contains_signal = contains_signal or np.any( - tmp > threshold - ) - patch_data_raw.append(tmp) - patch_data_raw = np.stack(patch_data_raw, axis=0) - compl.append(contains_signal) - - if contains_signal: - # initialize patch data array for intensities - if interp.lower() == 'bilinear': - patch_data = np.zeros( - (len(frame_indices), prows, pcols) - ) - for i, i_frame in enumerate(frame_indices): - patch_data[i] = panel.interpolate_bilinear( - xy_eval, - ome_imgser[i_frame], - pad_with_nans=False, - ).reshape( - prows, pcols - ) # * nrm_fac - elif interp.lower() == 'nearest': - patch_data = patch_data_raw # * nrm_fac - else: - msg = ( - "interpolation option " - + "'%s' not understood" - ) - raise RuntimeError(msg % interp) - - # now have interpolated patch data... - labels, num_peaks = ndimage.label( - patch_data > threshold, structure=label_struct - ) - slabels = np.arange(1, num_peaks + 1) - - if num_peaks > 0: - peak_id = iRefl - props = regionprops(labels, patch_data) - coms = np.vstack( - [x.weighted_centroid for x in props] - ) - if num_peaks > 1: - center = np.r_[patch_data.shape] * 0.5 - center_t = np.tile(center, (num_peaks, 1)) - com_diff = coms - center_t - closest_peak_idx = np.argmin( - np.sum(com_diff**2, axis=1) - ) - else: - closest_peak_idx = 0 - coms = coms[closest_peak_idx] - # meas_omes = \ - # ome_edges[0] + (0.5 + coms[0])*delta_ome - meas_omes = ome_eval[0] + coms[0] * delta_ome - meas_angs = np.hstack( - [ - tth_edges[0] - + (0.5 + coms[2]) * delta_tth, - eta_edges[0] - + (0.5 + coms[1]) * delta_eta, - mapAngle( - np.radians(meas_omes), ome_period - ), - ] - ) - - # intensities - # - summed is 'integrated' over interpolated - # data - # - max is max of raw input data - sum_int = np.sum( - patch_data[ - labels == slabels[closest_peak_idx] - ] - ) - max_int = np.max( - patch_data_raw[ - labels == slabels[closest_peak_idx] - ] - ) - # ???: Should this only use labeled pixels? - # Those are segmented from interpolated data, - # not raw; likely ok in most cases. - - # need MEASURED xy coords - # FIXME: overload angles_to_cart? - gvec_c = angles_to_gvec( - meas_angs, - chi=self.chi, - rmat_c=rMat_c, - beam_vec=self.beam_vector, - ) - rMat_s = make_sample_rmat( - self.chi, meas_angs[2] - ) - meas_xy = gvec_to_xy( - gvec_c, - panel.rmat, - rMat_s, - rMat_c, - panel.tvec, - self.tvec, - tVec_c, - beam_vec=self.beam_vector, - ) - if panel.distortion is not None: - meas_xy = panel.distortion.apply_inverse( - np.atleast_2d(meas_xy) - ).flatten() - # FIXME: why is this suddenly necessary??? - meas_xy = meas_xy.squeeze() - else: - patch_data = patch_data_raw - - if peak_id < 0: - # The peak is invalid. - # Decrement the next invalid peak ID. - next_invalid_peak_id -= 1 - - # write output - if filename is not None: - if output_format.lower() == 'text': - writer.dump_patch( - peak_id, - hkl_id, - hkl, - sum_int, - max_int, - ang_centers[i_pt], - meas_angs, - xy_centers[i_pt], - meas_xy, - ) - elif output_format.lower() == 'hdf5': - xyc_arr = xy_eval.reshape( - prows, pcols, 2 - ).transpose(2, 0, 1) - writer.dump_patch( - detector_id, - iRefl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - np.radians(ome_eval), - xyc_arr, - ijs, - frame_indices, - patch_data, - ang_centers[i_pt], - xy_centers[i_pt], - meas_angs, - meas_xy, - ) - - if return_spot_list: - # Full output - xyc_arr = xy_eval.reshape( - prows, pcols, 2 - ).transpose(2, 0, 1) - _patch_output = [ - detector_id, - iRefl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - np.radians(ome_eval), - xyc_arr, - ijs, - frame_indices, - patch_data, - ang_centers[i_pt], - xy_centers[i_pt], - meas_angs, - meas_xy, - ] - else: - # Trimmed output - _patch_output = [ - peak_id, - hkl_id, - hkl, - sum_int, - max_int, - ang_centers[i_pt], - meas_angs, - meas_xy, - ] - patch_output.append(_patch_output) - iRefl += 1 - output[detector_id] = patch_output - if filename is not None and output_format.lower() == 'text': - writer.close() - if filename is not None and output_format.lower() == 'hdf5': - writer.close() - return compl, output - - def update_memoization_sizes(self): - # Resize all known memoization functions to have a cache at least - # the size of the number of detectors. - all_panels = list(self.detectors.values()) - PlanarDetector.update_memoization_sizes(all_panels) - CylindricalDetector.update_memoization_sizes(all_panels) - - def calc_transmission( - self, rMat_s: np.ndarray = None - ) -> dict[str, np.ndarray]: - """calculate the transmission from the - filter and polymer coating. the inverse of this - number is the intensity correction that needs - to be applied. actual computation is done inside - the detector class - """ - if rMat_s is None: - rMat_s = ct.identity_3x3 - - energy = self.beam_energy - transmissions = {} - for det_name, det in self.detectors.items(): - transmission_filter, transmission_phosphor = ( - det.calc_filter_coating_transmission(energy) - ) - - transmission = transmission_filter * transmission_phosphor - - if self.physics_package is not None: - transmission_physics_package = ( - det.calc_physics_package_transmission( - energy, rMat_s, self.physics_package - ) - ) - effective_pinhole_area = det.calc_effective_pinhole_area( - self.physics_package - ) - - transmission = ( - transmission - * transmission_physics_package - * effective_pinhole_area - ) - - transmissions[det_name] = transmission - return transmissions - - -# ============================================================================= -# UTILITIES -# ============================================================================= - - -class PatchDataWriter(object): - """Class for dumping Bragg reflection data.""" - - def __init__(self, filename): - self._delim = ' ' - # fmt: off - header_items = ( - '# ID', 'PID', - 'H', 'K', 'L', - 'sum(int)', 'max(int)', - 'pred tth', 'pred eta', 'pred ome', - 'meas tth', 'meas eta', 'meas ome', - 'pred X', 'pred Y', - 'meas X', 'meas Y' - ) - self._header = self._delim.join([ - self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]), - self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), - self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) - ]) - # fmt: on - if isinstance(filename, IOBase): - self.fid = filename - else: - self.fid = open(filename, 'w') - print(self._header, file=self.fid) - - def __del__(self): - self.close() - - def close(self): - self.fid.close() - - def dump_patch( - self, peak_id, hkl_id, hkl, spot_int, max_int, pangs, mangs, pxy, mxy - ): - """ - !!! maybe need to check that last four inputs are arrays - """ - if mangs is None: - spot_int = np.nan - max_int = np.nan - mangs = np.nan * np.ones(3) - mxy = np.nan * np.ones(2) - - res = ( - [int(peak_id), int(hkl_id)] - + np.array(hkl, dtype=int).tolist() - + [spot_int, max_int] - + pangs.tolist() - + mangs.tolist() - + pxy.tolist() - + mxy.tolist() - ) - - output_str = self._delim.join( - [ - self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), - self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), - self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:]), - ] - ) - print(output_str, file=self.fid) - return output_str - - -class GrainDataWriter(object): - """Class for dumping grain data.""" - - def __init__(self, filename=None, array=None): - """Writes to either file or np array - - Array must be initialized with number of rows to be written. - """ - if filename is None and array is None: - raise RuntimeError( - 'GrainDataWriter must be specified with filename or array' - ) - - self.array = None - self.fid = None - - # array supersedes filename - if array is not None: - assert ( - array.shape[1] == 21 - ), f'grain data table must have 21 columns not {array.shape[21]}' - self.array = array - self._array_row = 0 - return - - self._delim = ' ' - # fmt: off - header_items = ( - '# grain ID', 'completeness', 'chi^2', - 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', - 't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]', - 'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]', - 'inv(V_s)[1,2]*sqrt(2)', - 'inv(V_s)[0,2]*sqrt(2)', - 'inv(V_s)[0,1]*sqrt(2)', - 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]', - 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]' - ) - self._header = self._delim.join( - [self._delim.join( - np.tile('{:<12}', 3) - ).format(*header_items[:3]), - self._delim.join( - np.tile('{:<23}', len(header_items) - 3) - ).format(*header_items[3:])] - ) - # fmt: on - if isinstance(filename, IOBase): - self.fid = filename - else: - self.fid = open(filename, 'w') - print(self._header, file=self.fid) - - def __del__(self): - self.close() - - def close(self): - if self.fid is not None: - self.fid.close() - - def dump_grain(self, grain_id, completeness, chisq, grain_params): - assert ( - len(grain_params) == 12 - ), "len(grain_params) must be 12, not %d" % len(grain_params) - - # extract strain - emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) - evec = mutil.symmToVecMV(emat, scale=False) - - res = ( - [int(grain_id), completeness, chisq] - + grain_params.tolist() - + evec.tolist() - ) - - if self.array is not None: - row = self._array_row - assert ( - row < self.array.shape[0] - ), f'invalid row {row} in array table' - self.array[row] = res - self._array_row += 1 - return res - - # (else) format and write to file - output_str = self._delim.join( - [ - self._delim.join(['{:<12d}', '{:<12f}', '{:<12e}']).format( - *res[:3] - ), - self._delim.join(np.tile('{:<23.16e}', len(res) - 3)).format( - *res[3:] - ), - ] - ) - print(output_str, file=self.fid) - return output_str - - -class GrainDataWriter_h5(object): - """Class for dumping grain results to an HDF5 archive. - - TODO: add material spec - """ - - def __init__(self, filename, instr_cfg, grain_params, use_attr=False): - if isinstance(filename, h5py.File): - self.fid = filename - else: - self.fid = h5py.File(filename + ".hdf5", "w") - icfg = dict(instr_cfg) - - # add instrument groups and attributes - self.instr_grp = self.fid.create_group('instrument') - unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr) - - # add grain group - self.grain_grp = self.fid.create_group('grain') - rmat_c = make_rmat_of_expmap(grain_params[:3]) - tvec_c = np.array(grain_params[3:6]).flatten() - vinv_s = np.array(grain_params[6:]).flatten() - vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) - - if use_attr: # attribute version - self.grain_grp.attrs.create('rmat_c', rmat_c) - self.grain_grp.attrs.create('tvec_c', tvec_c) - self.grain_grp.attrs.create('inv(V)_s', vinv_s) - self.grain_grp.attrs.create('vmat_s', vmat_s) - else: # dataset version - self.grain_grp.create_dataset('rmat_c', data=rmat_c) - self.grain_grp.create_dataset('tvec_c', data=tvec_c) - self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) - self.grain_grp.create_dataset('vmat_s', data=vmat_s) - - data_key = 'reflection_data' - self.data_grp = self.fid.create_group(data_key) - - for det_key in self.instr_grp['detectors'].keys(): - self.data_grp.create_group(det_key) - - # FIXME: throws exception when called after close method - # def __del__(self): - # self.close() - - def close(self): - self.fid.close() - - def dump_patch( - self, - panel_id, - i_refl, - peak_id, - hkl_id, - hkl, - tth_edges, - eta_edges, - ome_centers, - xy_centers, - ijs, - frame_indices, - spot_data, - pangs, - pxy, - mangs, - mxy, - gzip=1, - ): - """ - to be called inside loop over patches - - default GZIP level for data arrays is 1 - """ - fi = np.array(frame_indices, dtype=int) - - panel_grp = self.data_grp[panel_id] - spot_grp = panel_grp.create_group("spot_%05d" % i_refl) - spot_grp.attrs.create('peak_id', int(peak_id)) - spot_grp.attrs.create('hkl_id', int(hkl_id)) - spot_grp.attrs.create('hkl', np.array(hkl, dtype=int)) - spot_grp.attrs.create('predicted_angles', pangs) - spot_grp.attrs.create('predicted_xy', pxy) - if mangs is None: - mangs = np.nan * np.ones(3) - spot_grp.attrs.create('measured_angles', mangs) - if mxy is None: - mxy = np.nan * np.ones(3) - spot_grp.attrs.create('measured_xy', mxy) - - # get centers crds from edge arrays - # FIXME: export full coordinate arrays, or just center vectors??? - # - # ome_crd, eta_crd, tth_crd = np.meshgrid( - # ome_centers, - # centers_of_edge_vec(eta_edges), - # centers_of_edge_vec(tth_edges), - # indexing='ij') - # - # ome_dim, eta_dim, tth_dim = spot_data.shape - - # !!! for now just exporting center vectors for spot_data - tth_crd = centers_of_edge_vec(tth_edges) - eta_crd = centers_of_edge_vec(eta_edges) - - shuffle_data = True # reduces size by 20% - spot_grp.create_dataset( - 'tth_crd', - data=tth_crd, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'eta_crd', - data=eta_crd, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'ome_crd', - data=ome_centers, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'xy_centers', - data=xy_centers, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'ij_centers', - data=ijs, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'frame_indices', - data=fi, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - spot_grp.create_dataset( - 'intensities', - data=spot_data, - compression="gzip", - compression_opts=gzip, - shuffle=shuffle_data, - ) - return - - -class GenerateEtaOmeMaps(object): - """ - eta-ome map class derived from new image_series and YAML config - - ...for now... - - must provide: - - self.dataStore - self.planeData - self.iHKLList - self.etaEdges # IN RADIANS - self.omeEdges # IN RADIANS - self.etas # IN RADIANS - self.omegas # IN RADIANS - - """ - - def __init__( - self, - image_series_dict, - instrument, - plane_data, - active_hkls=None, - eta_step=0.25, - threshold=None, - ome_period=(0, 360), - ): - """ - image_series must be OmegaImageSeries class - instrument_params must be a dict (loaded from yaml spec) - active_hkls must be a list (required for now) - - FIXME: get rid of omega period; should get it from imageseries - """ - - self._planeData = plane_data - - # ???: change name of iHKLList? - # ???: can we change the behavior of iHKLList? - if active_hkls is None: - self._iHKLList = plane_data.getHKLID(plane_data.hkls, master=True) - n_rings = len(self._iHKLList) - else: - assert hasattr( - active_hkls, '__len__' - ), "active_hkls must be an iterable with __len__" - self._iHKLList = active_hkls - n_rings = len(active_hkls) - - # grab a det key and corresponding imageseries (first will do) - # !!! assuming that the imageseries for all panels - # have the same length and omegas - det_key, this_det_ims = next(iter(image_series_dict.items())) - - # handle omegas - # !!! for multi wedge, enforncing monotonicity - # !!! wedges also cannot overlap or span more than 360 - omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES - delta_ome = omegas_array[0][-1] - omegas_array[0][0] - frame_mask = None - ome_period = omegas_array[0, 0] + np.r_[0.0, 360.0] # !!! be careful - if this_det_ims.omegawedges.nwedges > 1: - delta_omes = [ - (i['ostop'] - i['ostart']) / i['nsteps'] - for i in this_det_ims.omegawedges.wedges - ] - check_wedges = mutil.uniqueVectors( - np.atleast_2d(delta_omes), tol=1e-6 - ).squeeze() - assert ( - check_wedges.size == 1 - ), "all wedges must have the same delta omega to 1e-6" - # grab representative delta ome - # !!! assuming positive delta consistent with OmegaImageSeries - delta_ome = delta_omes[0] - - # grab full-range start/stop - # !!! be sure to map to the same period to enable arithmatic - # ??? safer to do this way rather than just pulling from - # the omegas attribute? - owedges = this_det_ims.omegawedges.wedges - ostart = owedges[0]['ostart'] # !!! DEGREES - ostop = float( - mapAngle(owedges[-1]['ostop'], ome_period, units='degrees') - ) - # compute total nsteps - # FIXME: need check for roundoff badness - nsteps = int((ostop - ostart) / delta_ome) - ome_edges_full = np.linspace( - ostart, ostop, num=nsteps + 1, endpoint=True - ) - omegas_array = np.vstack( - [ome_edges_full[:-1], ome_edges_full[1:]] - ).T - ome_centers = np.average(omegas_array, axis=1) - - # use OmegaImageSeries method to determine which bins have data - # !!! this array has -1 outside a wedge - # !!! again assuming the valid frame order increases monotonically - frame_mask = np.array( - [ - this_det_ims.omega_to_frame(ome)[0] != -1 - for ome in ome_centers - ] - ) - - # ???: need to pass a threshold? - eta_mapping, etas = instrument.extract_polar_maps( - plane_data, - image_series_dict, - active_hkls=active_hkls, - threshold=threshold, - tth_tol=None, - eta_tol=eta_step, - ) - - # for convenience grab map shape from first - map_shape = next(iter(eta_mapping.values())).shape[1:] - - # pack all detectors with masking - # FIXME: add omega masking - data_store = [] - for i_ring in range(n_rings): - # first handle etas - full_map = np.zeros(map_shape, dtype=float) - nan_mask_full = np.zeros( - (len(eta_mapping), map_shape[0], map_shape[1]) - ) - i_p = 0 - for det_key, eta_map in eta_mapping.items(): - nan_mask = ~np.isnan(eta_map[i_ring]) - nan_mask_full[i_p] = nan_mask - full_map[nan_mask] += eta_map[i_ring][nan_mask] - i_p += 1 - re_nan_these = np.sum(nan_mask_full, axis=0) == 0 - full_map[re_nan_these] = np.nan - - # now omegas - if frame_mask is not None: - # !!! must expand row dimension to include - # skipped omegas - tmp = np.ones((len(frame_mask), map_shape[1])) * np.nan - tmp[frame_mask, :] = full_map - full_map = tmp - data_store.append(full_map) - self._dataStore = data_store - - # set required attributes - self._omegas = mapAngle( - np.radians(np.average(omegas_array, axis=1)), - np.radians(ome_period), - ) - self._omeEdges = mapAngle( - np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), - np.radians(ome_period), - ) - - # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the - # indexer to work properly - if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf: - # !!! SIGNED delta ome - del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0]) - self._omeEdges[-1] = self._omeEdges[-2] + del_ome - - # handle etas - # WARNING: unlinke the omegas in imageseries metadata, - # these are in RADIANS and represent bin centers - self._etaEdges = etas - self._etas = self._etaEdges[:-1] + 0.5 * np.radians(eta_step) - - @property - def dataStore(self): - return self._dataStore - - @property - def planeData(self): - return self._planeData - - @property - def iHKLList(self): - return np.atleast_1d(self._iHKLList).flatten() - - @property - def etaEdges(self): - return self._etaEdges - - @property - def omeEdges(self): - return self._omeEdges - - @property - def etas(self): - return self._etas - - @property - def omegas(self): - return self._omegas - - def save(self, filename): - xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename) - - -def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): - # mark pixels in the spec'd tth range - pixels_in_tthr = np.logical_and(ptth >= tthr[0], ptth <= tthr[1]) - - # catch case where ring isn't on detector - if not np.any(pixels_in_tthr): - return None - - pixel_ids = np.where(pixels_in_tthr) - - # grab relevant eta coords using histogram - pixel_etas = peta[pixel_ids] - reta_hist = histogram(pixel_etas, eta_edges) - bins_on_detector = np.where(reta_hist)[0] - - return pixel_etas, eta_edges, pixel_ids, bins_on_detector - - -def run_fast_histogram(x, bins, weights=None): - return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), weights=weights) - - -def run_numpy_histogram(x, bins, weights=None): - return histogram1d(x, bins=bins, weights=weights)[0] - - -histogram = run_fast_histogram if fast_histogram else run_numpy_histogram - - -def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): - for i_row in range(*rows): - image = ims[i_row] - - # handle threshold if specified - if threshold is not None: - # !!! NaNs get preserved - image = np.array(image) - image[image < threshold] = 0.0 - - for i_r, tthr in enumerate(tth_ranges): - this_map = ring_maps[i_r] - params = ring_params[i_r] - if not params: - # We are supposed to skip this ring... - continue - - # Unpack the params - pixel_etas, eta_edges, pixel_ids, bins_on_detector = params - result = histogram(pixel_etas, eta_edges, weights=image[pixel_ids]) - - # Note that this preserves nan values for bins not on the detector. - this_map[i_row, bins_on_detector] = result[bins_on_detector] - - -def _extract_detector_line_positions( - iter_args, - plane_data, - tth_tol, - eta_tol, - eta_centers, - npdiv, - collapse_tth, - collapse_eta, - do_interpolation, - do_fitting, - fitting_kwargs, - tth_distortion, - max_workers, -): - panel, instr_cfg, images, pbp = iter_args - - if images.ndim == 2: - images = np.tile(images, (1, 1, 1)) - elif images.ndim != 3: - raise RuntimeError("images must be 2- or 3-d") - - # make rings - # !!! adding tth_distortion pass-through; comes in as dict over panels - tth_distr_cls = None - if tth_distortion is not None: - tth_distr_cls = tth_distortion[panel.name] - - pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( - plane_data, - merge_hkls=True, - delta_tth=tth_tol, - delta_eta=eta_tol, - eta_list=eta_centers, - tth_distortion=tth_distr_cls, - ) - - tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) - - # !!! this is only needed if doing fitting - if isinstance(plane_data, PlaneData): - tth_idx, tth_ranges = plane_data.getMergedRanges(cullDupl=True) - tth_ref = plane_data.getTTh() - tth0 = [np.degrees(tth_ref[i]) for i in tth_idx] - else: - tth0 = plane_data - - # ================================================================= - # LOOP OVER RING SETS - # ================================================================= - pbar_rings = partial( - tqdm, total=len(pow_angs), desc="Ringset", position=pbp - ) - - kwargs = { - 'instr_cfg': instr_cfg, - 'panel': panel, - 'eta_tol': eta_tol, - 'npdiv': npdiv, - 'collapse_tth': collapse_tth, - 'collapse_eta': collapse_eta, - 'images': images, - 'do_interpolation': do_interpolation, - 'do_fitting': do_fitting, - 'fitting_kwargs': fitting_kwargs, - 'tth_distortion': tth_distr_cls, - } - func = partial(_extract_ring_line_positions, **kwargs) - iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) - with ProcessPoolExecutor( - mp_context=constants.mp_context, max_workers=max_workers - ) as executor: - return list(pbar_rings(executor.map(func, iter_arg))) - - -def _extract_ring_line_positions( - iter_args, - instr_cfg, - panel, - eta_tol, - npdiv, - collapse_tth, - collapse_eta, - images, - do_interpolation, - do_fitting, - fitting_kwargs, - tth_distortion, -): - """ - Extracts data for a single Debye-Scherrer ring . - - Parameters - ---------- - iter_args : tuple - (angs [radians], - xys [mm], - tth_tol [deg], - this_tth0 [deg]) - instr_cfg : TYPE - DESCRIPTION. - panel : TYPE - DESCRIPTION. - eta_tol : TYPE - DESCRIPTION. - npdiv : TYPE - DESCRIPTION. - collapse_tth : TYPE - DESCRIPTION. - collapse_eta : TYPE - DESCRIPTION. - images : TYPE - DESCRIPTION. - do_interpolation : TYPE - DESCRIPTION. - do_fitting : TYPE - DESCRIPTION. - fitting_kwargs : TYPE - DESCRIPTION. - tth_distortion : TYPE - DESCRIPTION. - - Yields - ------ - patch_data : TYPE - DESCRIPTION. - - """ - # points are already checked to fall on detector - angs, xys, tth_tol, this_tth0 = iter_args - - # SS 01/31/25 noticed some nans in xys even after clipping - # going to do another round of masking to get rid of those - nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) - nan_mask = np.logical_or.reduce(nan_mask, 1) - if angs.ndim > 1 and xys.ndim > 1: - angs = angs[nan_mask, :] - xys = xys[nan_mask, :] - - n_images = len(images) - native_area = panel.pixel_area - - # make the tth,eta patches for interpolation - patches = xrdutil.make_reflection_patches( - instr_cfg, - angs, - panel.angularPixelSize(xys), - tth_tol=tth_tol, - eta_tol=eta_tol, - npdiv=npdiv, - quiet=True, - ) - - # loop over patches - # FIXME: fix initialization - if collapse_tth: - patch_data = np.zeros((len(angs), n_images)) - else: - patch_data = [] - for i_p, patch in enumerate(patches): - # strip relevant objects out of current patch - vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch - - # need to reshape eval pts for interpolation - xy_eval = np.vstack([xys_eval[0].flatten(), xys_eval[1].flatten()]).T - - _, on_panel = panel.clip_to_panel(xy_eval) - - if np.any(~on_panel): - continue - - if collapse_tth: - ang_data = (vtx_angs[0][0, [0, -1]], vtx_angs[1][[0, -1], 0]) - elif collapse_eta: - # !!! yield the tth bin centers - tth_centers = np.average( - np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]), axis=0 - ) - ang_data = (tth_centers, angs[i_p][-1]) - if do_fitting: - fit_data = [] - else: - ang_data = vtx_angs - - prows, pcols = areas.shape - area_fac = areas / float(native_area) - - # interpolate - if not collapse_tth: - ims_data = [] - for j_p in np.arange(len(images)): - # catch interpolation type - image = images[j_p] - if do_interpolation: - p_img = ( - panel.interpolate_bilinear( - xy_eval, - image, - ).reshape(prows, pcols) - * area_fac - ) - else: - p_img = image[ijs[0], ijs[1]] * area_fac - - # catch flat spectrum data, which will cause - # fitting to fail. - # ???: best here, or make fitting handle it? - mxval = np.max(p_img) - mnval = np.min(p_img) - if mxval == 0 or (1.0 - mnval / mxval) < 0.01: - continue - - # catch collapsing options - if collapse_tth: - patch_data[i_p, j_p] = np.average(p_img) - # ims_data.append(np.sum(p_img)) - else: - if collapse_eta: - lineout = np.average(p_img, axis=0) - ims_data.append(lineout) - if do_fitting: - if tth_distortion is not None: - # must correct tth0 - tmp = tth_distortion.apply( - panel.angles_to_cart( - np.vstack( - [ - np.radians(this_tth0), - np.tile( - ang_data[-1], len(this_tth0) - ), - ] - ).T - ), - return_nominal=True, - ) - pk_centers = np.degrees(tmp[:, 0]) - else: - pk_centers = this_tth0 - kwargs = { - 'tth_centers': np.degrees(tth_centers), - 'lineout': lineout, - 'tth_pred': pk_centers, - **fitting_kwargs, - } - result = fit_ring(**kwargs) - fit_data.append(result) - else: - ims_data.append(p_img) - if not collapse_tth: - output = [ang_data, ims_data] - if do_fitting: - output.append(fit_data) - patch_data.append(output) - - return patch_data - - -DETECTOR_TYPES = { - 'planar': PlanarDetector, - 'cylindrical': CylindricalDetector, -} - - -class BufferShapeMismatchError(RuntimeError): - # This is raised when the buffer shape does not match the detector shape - pass - - -@contextmanager -def switch_xray_source(instr: HEDMInstrument, xray_source: Optional[str]): - if xray_source is None: - # If the x-ray source is None, leave it as the current active one - yield - return - - prev_beam_name = instr.active_beam_name - instr.active_beam_name = xray_source - try: - yield - finally: - instr.active_beam_name = prev_beam_name diff --git a/hexrd/powder/material/crystallography.py b/hexrd/powder/material/crystallography.py deleted file mode 100644 index 29e621972..000000000 --- a/hexrd/powder/material/crystallography.py +++ /dev/null @@ -1,2260 +0,0 @@ -# -*- coding: utf-8 -*- -# ============================================================================= -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. -# All rights reserved. -# -# This file is part of HEXRD. For details on dowloading the source, -# see the file COPYING. -# -# Please also see the file LICENSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License (as published by the Free -# Software Foundation) version 2.1 dated February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program (see file LICENSE); if not, write to -# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, -# Boston, MA 02111-1307 USA or visit . -# ============================================================================= -import re -import copy -import csv -import os -from math import pi -from typing import Optional, Union, Dict, List, Tuple - -import numpy as np - -from hexrd.core.material.unitcell import unitcell -from hexrd.core.deprecation import deprecated -from hexrd.core import constants -from hexrd.core.matrixutil import unitVector -from hexrd.core.rotations import ( - rotMatOfExpMap, - mapAngle, - applySym, - ltypeOfLaueGroup, - quatOfLaueGroup, -) -from hexrd.core.transforms import xfcapi -from hexrd.core import valunits -from hexrd.core.valunits import toFloat -from hexrd.core.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf - -"""module vars""" - -# units -dUnit = 'angstrom' -outputDegrees = False -outputDegrees_bak = outputDegrees - - -def hklToStr(hkl: np.ndarray) -> str: - """ - Converts hkl representation to a string. - - Parameters - ---------- - hkl : np.ndarray - 3 element list of h, k, and l values (Miller indices). - - Returns - ------- - str - Space-separated string representation of h, k, and l values. - - """ - return re.sub(r'[\[\]\(\)\{\},]', '', str(hkl)) - - -def tempSetOutputDegrees(val: bool) -> None: - """ - Set the global outputDegrees flag temporarily. Can be reverted with - revertOutputDegrees(). - - Parameters - ---------- - val : bool - True to output angles in degrees, False to output angles in radians. - - Returns - ------- - None - - """ - global outputDegrees, outputDegrees_bak - outputDegrees_bak = outputDegrees - outputDegrees = val - - -def revertOutputDegrees() -> None: - """ - Revert the effect of tempSetOutputDegrees(), resetting the outputDegrees - flag to its previous value (True to output in degrees, False for radians). - - Returns - ------- - None - """ - global outputDegrees, outputDegrees_bak - outputDegrees = outputDegrees_bak - - -def cosineXform( - a: np.ndarray, b: np.ndarray, c: np.ndarray -) -> tuple[np.ndarray, np.ndarray]: - """ - Spherical trig transform to take alpha, beta, gamma to expressions - for cos(alpha*). See ref below. - - [1] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector algebra and - the relations between direct and reciprocal lattice quantities''. Acta - Cryst. (1968), A24, 247--248 - - Parameters - ---------- - a : np.ndarray - List of alpha angle values (radians). - b : np.ndarray - List of beta angle values (radians). - c : np.ndarray - List of gamma angle values (radians). - - Returns - ------- - np.ndarray - List of cos(alpha*) values. - np.ndarray - List of sin(alpha*) values. - - """ - cosar = (np.cos(b) * np.cos(c) - np.cos(a)) / (np.sin(b) * np.sin(c)) - sinar = np.sqrt(1 - cosar**2) - return cosar, sinar - - -def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: - """ - Convert an energy value to a wavelength. If argument has units of length - or energy, will convert to globally specified unit type for wavelength - (dUnit). If argument is a scalar, assumed input units are keV. - """ - if isinstance(arg, valunits.valWUnit): - # arg is a valunits.valWUnit object - if arg.isLength(): - return arg.getVal(dUnit) - elif arg.isEnergy(): - e = arg.getVal('keV') - return valunits.valWUnit( - 'wavelength', 'length', constants.keVToAngstrom(e), 'angstrom' - ).getVal(dUnit) - else: - raise RuntimeError('do not know what to do with ' + str(arg)) - else: - # !!! assuming arg is in keV - return valunits.valWUnit( - 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' - ).getVal(dUnit) - - -def latticeParameters(lvec): - """ - Generates direct and reciprocal lattice vector components in a - crystal-relative RHON basis, X. The convention for fixing X to the - lattice is such that a || x1 and c* || x3, where a and c* are - direct and reciprocal lattice vectors, respectively. - """ - lnorm = np.sqrt(np.sum(lvec**2, 0)) - - a = lnorm[0] - b = lnorm[1] - c = lnorm[2] - - ahat = lvec[:, 0] / a - bhat = lvec[:, 1] / b - chat = lvec[:, 2] / c - - gama = np.arccos(np.dot(ahat, bhat)) - beta = np.arccos(np.dot(ahat, chat)) - alfa = np.arccos(np.dot(bhat, chat)) - if outputDegrees: - gama = r2d * gama - beta = r2d * beta - alfa = r2d * alfa - - return [a, b, c, alfa, beta, gama] - - -def latticePlanes( - hkls: np.ndarray, - lparms: np.ndarray, - ltype: Optional[str] = 'cubic', - wavelength: Optional[float] = 1.54059292, - strainMag: Optional[float] = None, -) -> Dict[str, np.ndarray]: - """ - Generates lattice plane data in the direct lattice for a given set - of Miller indices. Vector components are written in the - crystal-relative RHON basis, X. The convention for fixing X to the - lattice is such that a || x1 and c* || x3, where a and c* are - direct and reciprocal lattice vectors, respectively. - - USAGE: - - planeInfo = latticePlanes(hkls, lparms, **kwargs) - - INPUTS: - - 1) hkls (3 x n float ndarray) is the array of Miller indices for - the planes of interest. The vectors are assumed to be - concatenated along the 1-axis (horizontal). - - 2) lparms (1 x m float list) is the array of lattice parameters, - where m depends on the symmetry group (see below). - - The following optional arguments are recognized: - - 3) ltype=(string) is a string representing the symmetry type of - the implied Laue group. The 11 available choices are shown - below. The default value is 'cubic'. Note that each group - expects a lattice parameter array of the indicated length - and order. - - latticeType lparms - ----------- ------------ - 'cubic' a - 'hexagonal' a, c - 'trigonal' a, c - 'rhombohedral' a, alpha (in degrees) - 'tetragonal' a, c - 'orthorhombic' a, b, c - 'monoclinic' a, b, c, beta (in degrees) - 'triclinic' a, b, c, alpha, beta, gamma (in degrees) - - 4) wavelength= is a value represented the wavelength in - Angstroms to calculate bragg angles for. The default value - is for Cu K-alpha radiation (1.54059292 Angstrom) - - 5) strainMag=None - - OUTPUTS: - - 1) planeInfo is a dictionary containing the following keys/items: - - normals (3, n) double array array of the components to the - unit normals for each {hkl} in - X (horizontally concatenated) - - dspacings (n, ) double array array of the d-spacings for - each {hkl} - - tThetas (n, ) double array array of the Bragg angles for - each {hkl} relative to the - specified wavelength - - NOTES: - - *) This function is effectively a wrapper to 'latticeVectors'. - See 'help(latticeVectors)' for additional info. - - *) Lattice plane d-spacings are calculated from the reciprocal - lattice vectors specified by {hkl} as shown in Appendix 1 of - [1]. - - REFERENCES: - - [1] B. D. Cullity, ``Elements of X-Ray Diffraction, 2 - ed.''. Addison-Wesley Publishing Company, Inc., 1978. ISBN - 0-201-01174-3 - - """ - location = 'latticePlanes' - - assert ( - hkls.shape[0] == 3 - ), f"hkls aren't column vectors in call to '{location}'!" - - tag = ltype - wlen = wavelength - - # get B - L = latticeVectors(lparms, tag) - - # get G-vectors -- reciprocal vectors in crystal frame - G = np.dot(L['B'], hkls) - - # magnitudes - d = 1 / np.sqrt(np.sum(G**2, 0)) - - aconv = 1.0 - if outputDegrees: - aconv = r2d - - # two thetas - sth = wlen / 2.0 / d - mask = np.abs(sth) < 1.0 - tth = np.zeros(sth.shape) - - tth[~mask] = np.nan - tth[mask] = aconv * 2.0 * np.arcsin(sth[mask]) - - p = dict(normals=unitVector(G), dspacings=d, tThetas=tth) - - if strainMag is not None: - p['tThetasLo'] = np.zeros(sth.shape) - p['tThetasHi'] = np.zeros(sth.shape) - - mask = (np.abs(wlen / 2.0 / (d * (1.0 + strainMag))) < 1.0) & ( - np.abs(wlen / 2.0 / (d * (1.0 - strainMag))) < 1.0 - ) - - p['tThetasLo'][~mask] = np.nan - p['tThetasHi'][~mask] = np.nan - - p['tThetasLo'][mask] = ( - aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 + strainMag))) - ) - p['tThetasHi'][mask] = ( - aconv * 2 * np.arcsin(wlen / 2.0 / (d[mask] * (1.0 - strainMag))) - ) - - return p - - -def latticeVectors( - lparms: np.ndarray, - tag: Optional[str] = 'cubic', - radians: Optional[bool] = False, -) -> Dict[str, Union[np.ndarray, float]]: - """ - Generates direct and reciprocal lattice vector components in a - crystal-relative RHON basis, X. The convention for fixing X to the - lattice is such that a || x1 and c* || x3, where a and c* are - direct and reciprocal lattice vectors, respectively. - - USAGE: - - lattice = LatticeVectors(lparms, ) - - INPUTS: - - 1) lparms (1 x n float list) is the array of lattice parameters, - where n depends on the symmetry group (see below). - - 2) tag (string) is a case-insensitive string representing the - symmetry type of the implied Laue group. The 11 available choices - are shown below. The default value is 'cubic'. Note that each - group expects a lattice parameter array of the indicated length - and order. - - latticeType lparms - ----------- ------------ - 'cubic' a - 'hexagonal' a, c - 'trigonal' a, c - 'rhombohedral' a, alpha (in degrees) - 'tetragonal' a, c - 'orthorhombic' a, b, c - 'monoclinic' a, b, c, beta (in degrees) - 'triclinic' a, b, c, alpha, beta, gamma (in degrees) - - The following optional arguments are recognized: - - 3) radians= is a boolean flag indicating usage of radians rather - than degrees, defaults to false. - - OUTPUTS: - - 1) lattice is a dictionary containing the following keys/items: - - F (3, 3) double array transformation matrix taking - componenents in the direct - lattice (i.e. {uvw}) to the - reference, X - - B (3, 3) double array transformation matrix taking - componenents in the reciprocal - lattice (i.e. {hkl}) to X - - BR (3, 3) double array transformation matrix taking - componenents in the reciprocal - lattice to the Fable reference - frame (see notes) - - U0 (3, 3) double array transformation matrix - (orthogonal) taking - componenents in the - Fable reference frame to X - - vol double the unit cell volume - - - dparms (6, ) double list the direct lattice parameters: - [a b c alpha beta gamma] - - rparms (6, ) double list the reciprocal lattice - parameters: - [a* b* c* alpha* beta* gamma*] - - NOTES: - - *) The conventions used for assigning a RHON basis, - X -> {x1, x2, x3}, to each point group are consistent with - those published in Appendix B of [1]. Namely: a || x1 and - c* || x3. This differs from the convention chosen by the Fable - group, where a* || x1 and c || x3 [2]. - - *) The unit cell angles are defined as follows: - alpha=acos(b'*c/|b||c|), beta=acos(c'*a/|c||a|), and - gamma=acos(a'*b/|a||b|). - - *) The reciprocal lattice vectors are calculated using the - crystallographic convention, where the prefactor of 2*pi is - omitted. In this convention, the reciprocal lattice volume is - 1/V. - - *) Several relations from [3] were employed in the component - calculations. - - REFERENCES: - - [1] J. F. Nye, ``Physical Properties of Crystals: Their - Representation by Tensors and Matrices''. Oxford University - Press, 1985. ISBN 0198511655 - - [2] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, - ``Tracking: a method for structural characterization of grains - in powders or polycrystals''. J. Appl. Cryst. (2001). 34, - 744--750 - - [3] R. J. Neustadt, F. W. Cagle, Jr., and J. Waser, ``Vector - algebra and the relations between direct and reciprocal - lattice quantities''. Acta Cryst. (1968), A24, 247--248 - - - """ - - # build index for sorting out lattice parameters - lattStrings = [ - 'cubic', - 'hexagonal', - 'trigonal', - 'rhombohedral', - 'tetragonal', - 'orthorhombic', - 'monoclinic', - 'triclinic', - ] - - if radians: - aconv = 1.0 - else: - aconv = pi / 180.0 # degToRad - deg90 = pi / 2.0 - deg120 = 2.0 * pi / 3.0 - # - if tag == lattStrings[0]: - # cubic - cellparms = np.r_[np.tile(lparms[0], (3,)), deg90 * np.ones((3,))] - elif tag == lattStrings[1] or tag == lattStrings[2]: - # hexagonal | trigonal (hex indices) - cellparms = np.r_[ - lparms[0], lparms[0], lparms[1], deg90, deg90, deg120 - ] - elif tag == lattStrings[3]: - # rhombohedral - cellparms = np.r_[ - np.tile(lparms[0], (3,)), np.tile(aconv * lparms[1], (3,)) - ] - elif tag == lattStrings[4]: - # tetragonal - cellparms = np.r_[lparms[0], lparms[0], lparms[1], deg90, deg90, deg90] - elif tag == lattStrings[5]: - # orthorhombic - cellparms = np.r_[lparms[0], lparms[1], lparms[2], deg90, deg90, deg90] - elif tag == lattStrings[6]: - # monoclinic - cellparms = np.r_[ - lparms[0], lparms[1], lparms[2], deg90, aconv * lparms[3], deg90 - ] - elif tag == lattStrings[7]: - # triclinic - cellparms = np.r_[ - lparms[0], - lparms[1], - lparms[2], - aconv * lparms[3], - aconv * lparms[4], - aconv * lparms[5], - ] - else: - raise RuntimeError(f'lattice tag "{tag}" is not recognized') - - alpha, beta, gamma = cellparms[3:6] - cosalfar, sinalfar = cosineXform(alpha, beta, gamma) - - a = cellparms[0] * np.r_[1, 0, 0] - b = cellparms[1] * np.r_[np.cos(gamma), np.sin(gamma), 0] - c = ( - cellparms[2] - * np.r_[ - np.cos(beta), -cosalfar * np.sin(beta), sinalfar * np.sin(beta) - ] - ) - - ad = np.sqrt(np.sum(a**2)) - bd = np.sqrt(np.sum(b**2)) - cd = np.sqrt(np.sum(c**2)) - - # Cell volume - V = np.dot(a, np.cross(b, c)) - - # F takes components in the direct lattice to X - F = np.c_[a, b, c] - - # Reciprocal lattice vectors - astar = np.cross(b, c) / V - bstar = np.cross(c, a) / V - cstar = np.cross(a, b) / V - - # and parameters - ar = np.sqrt(np.sum(astar**2)) - br = np.sqrt(np.sum(bstar**2)) - cr = np.sqrt(np.sum(cstar**2)) - - alfar = np.arccos(np.dot(bstar, cstar) / br / cr) - betar = np.arccos(np.dot(cstar, astar) / cr / ar) - gamar = np.arccos(np.dot(astar, bstar) / ar / br) - - # B takes components in the reciprocal lattice to X - B = np.c_[astar, bstar, cstar] - - cosalfar2, sinalfar2 = cosineXform(alfar, betar, gamar) - - afable = ar * np.r_[1, 0, 0] - bfable = br * np.r_[np.cos(gamar), np.sin(gamar), 0] - cfable = ( - cr - * np.r_[ - np.cos(betar), - -cosalfar2 * np.sin(betar), - sinalfar2 * np.sin(betar), - ] - ) - - BR = np.c_[afable, bfable, cfable] - U0 = np.dot(B, np.linalg.inv(BR)) - if outputDegrees: - dparms = np.r_[ad, bd, cd, r2d * np.r_[alpha, beta, gamma]] - rparms = np.r_[ar, br, cr, r2d * np.r_[alfar, betar, gamar]] - else: - dparms = np.r_[ad, bd, cd, np.r_[alpha, beta, gamma]] - rparms = np.r_[ar, br, cr, np.r_[alfar, betar, gamar]] - - return { - 'F': F, - 'B': B, - 'BR': BR, - 'U0': U0, - 'vol': V, - 'dparms': dparms, - 'rparms': rparms, - } - - -def hexagonalIndicesFromRhombohedral(hkl): - """ - converts rhombohedral hkl to hexagonal indices - """ - HKL = np.zeros((3, hkl.shape[1]), dtype='int') - - HKL[0, :] = hkl[0, :] - hkl[1, :] - HKL[1, :] = hkl[1, :] - hkl[2, :] - HKL[2, :] = hkl[0, :] + hkl[1, :] + hkl[2, :] - - return HKL - - -def rhombohedralIndicesFromHexagonal(HKL): - """ - converts hexagonal hkl to rhombohedral indices - """ - hkl = np.zeros((3, HKL.shape[1]), dtype='int') - - hkl[0, :] = 2 * HKL[0, :] + HKL[1, :] + HKL[2, :] - hkl[1, :] = -HKL[0, :] + HKL[1, :] + HKL[2, :] - hkl[2, :] = -HKL[0, :] - 2 * HKL[1, :] + HKL[2, :] - - hkl = hkl / 3.0 - return hkl - - -def rhombohedralParametersFromHexagonal(a_h, c_h): - """ - converts hexagonal lattice parameters (a, c) to rhombohedral - lattice parameters (a, alpha) - """ - a_r = np.sqrt(3 * a_h**2 + c_h**2) / 3.0 - alfa_r = 2 * np.arcsin(3.0 / (2 * np.sqrt(3 + (c_h / a_h) ** 2))) - if outputDegrees: - alfa_r = r2d * alfa_r - return a_r, alfa_r - - -def convert_Miller_direction_to_cartesian(uvw, a=1.0, c=1.0, normalize=False): - """ - Converts 3-index hexagonal Miller direction indices to components in the - crystal reference frame. - Parameters - ---------- - uvw : array_like - The (n, 3) array of 3-index hexagonal indices to convert. - a : scalar, optional - The `a` lattice parameter. The default value is 1. - c : scalar, optional - The `c` lattice parameter. The default value is 1. - normalize : bool, optional - Flag for whether or not to normalize output vectors - Returns - ------- - numpy.ndarray - The (n, 3) array of cartesian components associated with the input - direction indices. - Notes - ----- - 1) The [uv.w] the Miller-Bravais convention is in the hexagonal basis - {a1, a2, a3, c}. The basis for the output, {o1, o2, o3}, is - chosen such that - o1 || a1 - o3 || c - o2 = o3 ^ o1 - """ - u, v, w = np.atleast_2d(uvw).T - retval = np.vstack([1.5 * u * a, sqrt3by2 * a * (2 * v + u), w * c]) - if normalize: - return unitVector(retval).T - else: - return retval.T - - -def convert_Miller_direction_to_MillerBravias(uvw, suppress_redundant=True): - """ - Converts 3-index hexagonal Miller direction indices to 4-index - Miller-Bravais direction indices. - Parameters - ---------- - uvw : array_like - The (n, 3) array of 3-index hexagonal Miller indices to convert. - suppress_redundant : bool, optional - Flag to suppress the redundant 3rd index. The default is True. - Returns - ------- - numpy.ndarray - The (n, 3) or (n, 4) array -- depending on kwarg -- of Miller-Bravis - components associated with the input Miller direction indices. - Notes - ----- - * NOT for plane normals!!! - """ - u, v, w = np.atleast_2d(uvw).T - retval = np.vstack([(2 * u - v) / 3, (2 * v - u) / 3, w]).T - rem = np.vstack([np.mod(np.tile(i[0], 2), i[1:]) for i in retval]) - rem[abs(rem) < epsf] = np.nan - lcm = np.nanmin(rem, axis=1) - lcm[np.isnan(lcm)] = 1 - retval = retval / np.tile(lcm, (3, 1)).T - if suppress_redundant: - return retval - else: - t = np.atleast_2d(1 - np.sum(retval[:2], axis=1)).T - return np.hstack([retval[:, :2], t, np.atleast_2d(retval[:, 2]).T]) - - -def convert_MillerBravias_direction_to_Miller(UVW): - """ - Converts 4-index hexagonal Miller-Bravais direction indices to - 3-index Miller direction indices. - Parameters - ---------- - UVW : array_like - The (n, 3) array of **non-redundant** Miller-Bravais direction indices - to convert. - Returns - ------- - numpy.ndarray - The (n, 3) array of Miller direction indices associated with the - input Miller-Bravais indices. - Notes - ----- - * NOT for plane normals!!! - """ - U, V, W = np.atleast_2d(UVW).T - return np.vstack([2 * U + V, 2 * V + U, W]) - - -class PlaneData(object): - """ - Careful with ordering: Outputs are ordered by the 2-theta for the - hkl unless you get self._hkls directly, and this order can change - with changes in lattice parameters (lparms); setting and getting - exclusions works on the current hkl ordering, not the original - ordering (in self._hkls), but exclusions are stored in the - original ordering in case the hkl ordering does change with - lattice parameters - - if not None, tThWidth takes priority over strainMag in setting - two-theta ranges; changing strainMag automatically turns off - tThWidth - """ - - def __init__(self, hkls: Optional[np.ndarray], *args, **kwargs) -> None: - """ - Constructor for PlaneData - - Parameters - ---------- - hkls : np.ndarray - Miller indices to be used in the plane data. Can be None if - args is another PlaneData object - - *args - Unnamed arguments. Could be in the format of `lparms, laueGroup, - wavelength, strainMag`, or just a `PlaneData` object. - - **kwargs - Valid keyword arguments include: - - doTThSort - - exclusions - - tThMax - - tThWidth - """ - self._doTThSort = True - self._exclusions = None - self._tThMax = None - - if len(args) == 4: - lparms, laueGroup, wavelength, strainMag = args - tThWidth = None - self._wavelength = processWavelength(wavelength) - self._lparms = self._parseLParms(lparms) - elif len(args) == 1 and isinstance(args[0], PlaneData): - other = args[0] - lparms, laueGroup, wavelength, strainMag, tThWidth = ( - other.getParams() - ) - self._wavelength = wavelength - self._lparms = lparms - self._doTThSort = other._doTThSort - self._exclusions = other._exclusions - self._tThMax = other._tThMax - if hkls is None: - hkls = other._hkls - else: - raise NotImplementedError(f'args : {args}') - - self._laueGroup = laueGroup - self._hkls = copy.deepcopy(hkls) - self._strainMag = strainMag - self._structFact = np.ones(self._hkls.shape[1]) - self.tThWidth = tThWidth - - # ... need to implement tThMin too - if 'doTThSort' in kwargs: - self._doTThSort = kwargs.pop('doTThSort') - if 'exclusions' in kwargs: - self._exclusions = kwargs.pop('exclusions') - if 'tThMax' in kwargs: - self._tThMax = toFloat(kwargs.pop('tThMax'), 'radians') - if 'tThWidth' in kwargs: - self.tThWidth = kwargs.pop('tThWidth') - if len(kwargs) > 0: - raise RuntimeError( - f'have unparsed keyword arguments with keys: {kwargs.keys()}' - ) - - # This is only used to calculate the structure factor if invalidated - self._unitcell: unitcell = None - - self._calc() - - def _calc(self): - symmGroup = ltypeOfLaueGroup(self._laueGroup) - self._q_sym = quatOfLaueGroup(self._laueGroup) - _, latVecOps, hklDataList = PlaneData.makePlaneData( - self._hkls, - self._lparms, - self._q_sym, - symmGroup, - self._strainMag, - self.wavelength, - ) - 'sort by tTheta' - tThs = np.array( - [hklDataList[iHKL]['tTheta'] for iHKL in range(len(hklDataList))] - ) - if self._doTThSort: - # sorted hkl -> _hkl - # _hkl -> sorted hkl - self.tThSort = np.argsort(tThs) - self.tThSortInv = np.empty(len(hklDataList), dtype=int) - self.tThSortInv[self.tThSort] = np.arange(len(hklDataList)) - self.hklDataList = [hklDataList[iHKL] for iHKL in self.tThSort] - else: - self.tThSort = np.arange(len(hklDataList)) - self.tThSortInv = np.arange(len(hklDataList)) - self.hklDataList = hklDataList - self._latVecOps = latVecOps - self.nHKLs = len(self.getHKLs()) - - def __str__(self): - s = '========== plane data ==========\n' - s += 'lattice parameters:\n ' + str(self.lparms) + '\n' - s += f'two theta width: ({str(self.tThWidth)})\n' - s += f'strain magnitude: ({str(self.strainMag)})\n' - s += f'beam energy ({str(self.wavelength)})\n' - s += 'hkls: (%d)\n' % self.nHKLs - s += str(self.getHKLs()) - return s - - def getParams(self): - """ - Getter for the parameters of the plane data. - - Returns - ------- - tuple - The parameters of the plane data. In the order of - _lparams, _laueGroup, _wavelength, _strainMag, tThWidth - - """ - return ( - self._lparms, - self._laueGroup, - self._wavelength, - self._strainMag, - self.tThWidth, - ) - - def getNhklRef(self) -> int: - """ - Get the total number of hkl's in the plane data, not ignoring - ones that are excluded in exclusions. - - Returns - ------- - int - The total number of hkl's in the plane data. - """ - return len(self.hklDataList) - - @property - def hkls(self) -> np.ndarray: - """ - hStacked Hkls of the plane data (Miller indices). - """ - return self.getHKLs().T - - @hkls.setter - def hkls(self, hkls): - raise NotImplementedError('for now, not allowing hkls to be reset') - - @property - def tThMax(self) -> Optional[float]: - """ - Maximum 2-theta value of the plane data. - - float or None - """ - return self._tThMax - - @tThMax.setter - def tThMax(self, t_th_max: Union[float, valunits.valWUnit]) -> None: - self._tThMax = toFloat(t_th_max, 'radians') - - @property - def exclusions(self) -> np.ndarray: - """ - Excluded HKL's the plane data. - - Set as type np.ndarray, as a mask of length getNhklRef(), a list of - indices to be excluded, or a list of ranges of indices. - - Read as a mask of length getNhklRef(). - """ - retval = np.zeros(self.getNhklRef(), dtype=bool) - if self._exclusions is not None: - # report in current hkl ordering - retval[:] = self._exclusions[self.tThSortInv] - if self._tThMax is not None: - for iHKLr, hklData in enumerate(self.hklDataList): - if hklData['tTheta'] > self._tThMax: - retval[iHKLr] = True - return retval - - @exclusions.setter - def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: - excl = np.zeros(len(self.hklDataList), dtype=bool) - if new_exclusions is not None: - exclusions = np.atleast_1d(new_exclusions) - if len(exclusions) == len(self.hklDataList): - assert ( - exclusions.dtype == 'bool' - ), 'Exclusions should be bool if full length' - # convert from current hkl ordering to _hkl ordering - excl[:] = exclusions[self.tThSort] - else: - if len(exclusions.shape) == 1: - # treat exclusions as indices - excl[self.tThSort[exclusions]] = True - elif len(exclusions.shape) == 2: - # treat exclusions as ranges of indices - for r in exclusions: - excl[self.tThSort[r[0] : r[1]]] = True - else: - raise RuntimeError( - f'Unclear behavior for shape {exclusions.shape}' - ) - self._exclusions = excl - self.nHKLs = np.sum(np.logical_not(self._exclusions)) - - def exclude( - self, - dmin: Optional[float] = None, - dmax: Optional[float] = None, - tthmin: Optional[float] = None, - tthmax: Optional[float] = None, - sfacmin: Optional[float] = None, - sfacmax: Optional[float] = None, - pintmin: Optional[float] = None, - pintmax: Optional[float] = None, - ) -> None: - """ - Set exclusions according to various parameters - - Any hkl with a value below any min or above any max will be excluded. So - to be included, an hkl needs to have values between the min and max - for all of the conditions given. - - Note that method resets the tThMax attribute to None. - - PARAMETERS - ---------- - dmin: float > 0 - minimum lattice spacing (angstroms) - dmax: float > 0 - maximum lattice spacing (angstroms) - tthmin: float > 0 - minimum two theta (radians) - tthmax: float > 0 - maximum two theta (radians) - sfacmin: float > 0 - minimum structure factor as a proportion of maximum - sfacmax: float > 0 - maximum structure factor as a proportion of maximum - pintmin: float > 0 - minimum powder intensity as a proportion of maximum - pintmax: float > 0 - maximum powder intensity as a proportion of maximum - """ - excl = np.zeros(self.getNhklRef(), dtype=bool) - self.exclusions = None - self.tThMax = None - - if (dmin is not None) or (dmax is not None): - d = np.array(self.getPlaneSpacings()) - if dmin is not None: - excl[d < dmin] = True - if dmax is not None: - excl[d > dmax] = True - - if (tthmin is not None) or (tthmax is not None): - tth = self.getTTh() - if tthmin is not None: - excl[tth < tthmin] = True - if tthmax is not None: - excl[tth > tthmax] = True - - if (sfacmin is not None) or (sfacmax is not None): - sfac = self.structFact - sfac = sfac / sfac.max() - if sfacmin is not None: - excl[sfac < sfacmin] = True - if sfacmax is not None: - excl[sfac > sfacmax] = True - - if (pintmin is not None) or (pintmax is not None): - pint = self.powder_intensity - pint = pint / pint.max() - if pintmin is not None: - excl[pint < pintmin] = True - if pintmax is not None: - excl[pint > pintmax] = True - - self.exclusions = excl - - def _parseLParms( - self, lparms: List[Union[valunits.valWUnit, float]] - ) -> List[float]: - lparmsDUnit = [] - for lparmThis in lparms: - if isinstance(lparmThis, valunits.valWUnit): - if lparmThis.isLength(): - lparmsDUnit.append(lparmThis.getVal(dUnit)) - elif lparmThis.isAngle(): - # plumbing set up to default to degrees - # for lattice parameters - lparmsDUnit.append(lparmThis.getVal('degrees')) - else: - raise RuntimeError( - f'Do not know what to do with {lparmThis}' - ) - else: - lparmsDUnit.append(lparmThis) - return lparmsDUnit - - @property - def lparms(self) -> List[float]: - """ - Lattice parameters of the plane data. - - Can be set as a List[float | valWUnit], but will be converted to - List[float]. - """ - return self._lparms - - @lparms.setter - def lparms(self, lparms: List[Union[valunits.valWUnit, float]]) -> None: - self._lparms = self._parseLParms(lparms) - self._calc() - - @property - def strainMag(self) -> Optional[float]: - """ - Strain magnitude of the plane data. - - float or None - """ - return self._strainMag - - @strainMag.setter - def strainMag(self, strain_mag: float) -> None: - self._strainMag = strain_mag - self.tThWidth = None - self._calc() - - @property - def wavelength(self) -> float: - """ - Wavelength of the plane data. - - Set as float or valWUnit. - - Read as float - """ - return self._wavelength - - @wavelength.setter - def wavelength(self, wavelength: Union[float, valunits.valWUnit]) -> None: - wavelength = processWavelength(wavelength) - # Do not re-compute if it is almost the same - if np.isclose(self._wavelength, wavelength): - return - - self._wavelength = wavelength - self._calc() - - def invalidate_structure_factor(self, ucell: unitcell) -> None: - """ - It can be expensive to compute the structure factor - This method just invalidates it, providing a unit cell, - so that it can be lazily computed from the unit cell. - - Parameters: - ----------- - unitcell : unitcell - The unit cell to be used to compute the structure factor - """ - self._structFact = None - self._hedm_intensity = None - self._powder_intensity = None - self._unitcell = ucell - - def _compute_sf_if_needed(self): - any_invalid = ( - self._structFact is None - or self._hedm_intensity is None - or self._powder_intensity is None - ) - if any_invalid and self._unitcell is not None: - # Compute the structure factor first. - # This can be expensive to do, so we lazily compute it when needed. - hkls = self.getHKLs(allHKLs=True) - self.structFact = self._unitcell.CalcXRSF(hkls) - - @property - def structFact(self) -> np.ndarray: - """ - Structure factors for each hkl. - - np.ndarray - """ - self._compute_sf_if_needed() - return self._structFact[~self.exclusions] - - @structFact.setter - def structFact(self, structFact: np.ndarray) -> None: - self._structFact = structFact - multiplicity = self.getMultiplicity(allHKLs=True) - tth = self.getTTh(allHKLs=True) - - hedm_intensity = ( - structFact * lorentz_factor(tth) * polarization_factor(tth) - ) - - powderI = hedm_intensity * multiplicity - - # Now scale them - hedm_intensity = 100.0 * hedm_intensity / np.nanmax(hedm_intensity) - powderI = 100.0 * powderI / np.nanmax(powderI) - - self._hedm_intensity = hedm_intensity - self._powder_intensity = powderI - - @property - def powder_intensity(self) -> np.ndarray: - """ - Powder intensity for each hkl. - """ - self._compute_sf_if_needed() - return self._powder_intensity[~self.exclusions] - - @property - def hedm_intensity(self) -> np.ndarray: - """ - HEDM (high energy x-ray diffraction microscopy) intensity for each hkl. - """ - self._compute_sf_if_needed() - return self._hedm_intensity[~self.exclusions] - - @staticmethod - def makePlaneData( - hkls: np.ndarray, - lparms: np.ndarray, - qsym: np.ndarray, - symmGroup, - strainMag, - wavelength, - ) -> Tuple[ - Dict[str, np.ndarray], Dict[str, Union[np.ndarray, float]], List[Dict] - ]: - """ - Generate lattice plane data from inputs. - - Parameters: - ----------- - hkls: np.ndarray - Miller indices, as in crystallography.latticePlanes - lparms: np.ndarray - Lattice parameters, as in crystallography.latticePlanes - qsym: np.ndarray - (4, n) containing quaternions of symmetry - symmGroup: str - Tag for the symmetry (Laue) group of the lattice. Can generate from - ltypeOfLaueGroup - strainMag: float - Swag of strain magnitudes - wavelength: float - Wavelength - - Returns: - ------- - dict: - Dictionary containing lattice plane data - dict: - Dictionary containing lattice vector operators - list: - List of dictionaries, each containing the data for one hkl - """ - - tempSetOutputDegrees(False) - latPlaneData = latticePlanes( - hkls, - lparms, - ltype=symmGroup, - strainMag=strainMag, - wavelength=wavelength, - ) - - latVecOps = latticeVectors(lparms, symmGroup) - - hklDataList = [] - for iHKL in range(len(hkls.T)): - # need transpose because of convention for hkls ordering - - """ - latVec = latPlaneData['normals'][:,iHKL] - # ... if not spots, may be able to work with a subset of these - latPlnNrmlList = applySym( - np.c_[latVec], qsym, csFlag=True, cullPM=False - ) - """ - # returns UN-NORMALIZED lattice plane normals - latPlnNrmls = applySym( - np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), - qsym, - csFlag=True, - cullPM=False, - ) - - # check for +/- in symmetry group - latPlnNrmlsM = applySym( - np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), - qsym, - csFlag=False, - cullPM=False, - ) - - csRefl = latPlnNrmls.shape[1] == latPlnNrmlsM.shape[1] - - # added this so that I retain the actual symmetric - # integer hkls as well - symHKLs = np.array( - np.round(np.dot(latVecOps['F'].T, latPlnNrmls)), dtype='int' - ) - - hklDataList.append( - dict( - hklID=iHKL, - hkl=hkls[:, iHKL], - tTheta=latPlaneData['tThetas'][iHKL], - dSpacings=latPlaneData['dspacings'][iHKL], - tThetaLo=latPlaneData['tThetasLo'][iHKL], - tThetaHi=latPlaneData['tThetasHi'][iHKL], - latPlnNrmls=unitVector(latPlnNrmls), - symHKLs=symHKLs, - centrosym=csRefl, - ) - ) - - revertOutputDegrees() - return latPlaneData, latVecOps, hklDataList - - @property - def laueGroup(self) -> str: - """ - This is the Schoenflies tag, describing symmetry group of the lattice. - Note that setting this with incompatible lattice parameters will - cause an error. If changing both, use set_laue_and_lparms. - - str - """ - return self._laueGroup - - @laueGroup.setter - def laueGroup(self, laueGroup: str) -> None: - self._laueGroup = laueGroup - self._calc() - - def set_laue_and_lparms( - self, laueGroup: str, lparms: List[Union[valunits.valWUnit, float]] - ) -> None: - """ - Set the Laue group and lattice parameters simultaneously - - When the Laue group changes, the lattice parameters may be - incompatible, and cause an error in self._calc(). This function - allows us to update both the Laue group and lattice parameters - simultaneously to avoid this issue. - - Parameters: - ----------- - laueGroup : str - The symmetry (Laue) group to be set - lparms : List[valunits.valWUnit | float] - Lattice parameters to be set - """ - self._laueGroup = laueGroup - self._lparms = self._parseLParms(lparms) - self._calc() - - @property - def q_sym(self) -> np.ndarray: - """ - Quaternions of symmetry for each hkl, generated from the Laue group - - np.ndarray((4, n)) - """ - return self._q_sym # rotations.quatOfLaueGroup(self._laueGroup) - - def getPlaneSpacings(self) -> List[float]: - """ - Plane spacings for each hkl. - - Returns: - ------- - List[float] - List of plane spacings for each hkl - """ - dspacings = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - dspacings.append(hklData['dSpacings']) - return dspacings - - @property - def latVecOps(self) -> Dict[str, Union[np.ndarray, float]]: - """ - gets lattice vector operators as a new (deepcopy) - - Returns: - ------- - Dict[str, np.ndarray | float] - Dictionary containing lattice vector operators - """ - return copy.deepcopy(self._latVecOps) - - def _thisHKL(self, iHKLr: int) -> bool: - hklData = self.hklDataList[iHKLr] - if self._exclusions is not None: - if self._exclusions[self.tThSortInv[iHKLr]]: - return False - if self._tThMax is not None: - if hklData['tTheta'] > self._tThMax or np.isnan(hklData['tTheta']): - return False - return True - - def _getTThRange(self, iHKLr: int) -> Tuple[float, float]: - hklData = self.hklDataList[iHKLr] - if self.tThWidth is not None: # tThHi-tThLo < self.tThWidth - tTh = hklData['tTheta'] - tThHi = tTh + self.tThWidth * 0.5 - tThLo = tTh - self.tThWidth * 0.5 - else: - tThHi = hklData['tThetaHi'] - tThLo = hklData['tThetaLo'] - return (tThLo, tThHi) - - def getTThRanges(self, strainMag: Optional[float] = None) -> np.ndarray: - """ - Get the 2-theta ranges for included hkls - - Parameters: - ----------- - strainMag : Optional[float] - Optional swag of strain magnitude - - Returns: - ------- - np.ndarray: - hstacked array of hstacked tThLo and tThHi for each hkl (n x 2) - """ - tThRanges = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - if strainMag is None: - tThRanges.append(self._getTThRange(iHKLr)) - else: - hklData = self.hklDataList[iHKLr] - d = hklData['dSpacings'] - tThLo = 2.0 * np.arcsin( - self._wavelength / 2.0 / (d * (1.0 + strainMag)) - ) - tThHi = 2.0 * np.arcsin( - self._wavelength / 2.0 / (d * (1.0 - strainMag)) - ) - tThRanges.append((tThLo, tThHi)) - return np.array(tThRanges) - - def getMergedRanges( - self, cullDupl: Optional[bool] = False - ) -> Tuple[List[List[int]], List[List[float]]]: - """ - Return indices and ranges for specified planeData, merging where - there is overlap based on the tThWidth and line positions - - Parameters: - ----------- - cullDupl : (optional) bool - If True, cull duplicate 2-theta values (within sqrt_epsf). Defaults - to False. - - Returns: - -------- - List[List[int]] - List of indices for each merged range - - List[List[float]] - List of merged ranges, (n x 2) - """ - tThs = self.getTTh() - tThRanges = self.getTThRanges() - - # if you end exlcusions in a doublet (or multiple close rings) - # then this will 'fail'. May need to revisit... - nonoverlapNexts = np.hstack( - (tThRanges[:-1, 1] < tThRanges[1:, 0], True) - ) - iHKLLists = [] - mergedRanges = [] - hklsCur = [] - tThLoIdx = 0 - tThHiCur = 0.0 - for iHKL, nonoverlapNext in enumerate(nonoverlapNexts): - tThHi = tThRanges[iHKL, -1] - if not nonoverlapNext: - if cullDupl and abs(tThs[iHKL] - tThs[iHKL + 1]) < sqrt_epsf: - continue - else: - hklsCur.append(iHKL) - tThHiCur = tThHi - else: - hklsCur.append(iHKL) - tThHiCur = tThHi - iHKLLists.append(hklsCur) - mergedRanges.append([tThRanges[tThLoIdx, 0], tThHiCur]) - tThLoIdx = iHKL + 1 - hklsCur = [] - return iHKLLists, mergedRanges - - def getTTh(self, allHKLs: Optional[bool] = False) -> np.ndarray: - """ - Get the 2-theta values for each hkl. - - Parameters: - ----------- - allHKLs : (optional) bool - If True, return all 2-theta values, even if they are excluded in - the current planeData. Default is False. - - Returns: - ------- - np.ndarray - Array of 2-theta values for each hkl - """ - tTh = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not allHKLs and not self._thisHKL(iHKLr): - continue - tTh.append(hklData['tTheta']) - return np.array(tTh) - - def getMultiplicity(self, allHKLs: Optional[bool] = False) -> np.ndarray: - """ - Get the multiplicity for each hkl (number of symHKLs). - - Paramters: - ---------- - allHKLs : (optional) bool - If True, return all multiplicities, even if they are excluded in - the current planeData. Defaults to false. - - Returns - ------- - np.ndarray - Array of multiplicities for each hkl - """ - # ... JVB: is this incorrect? - multip = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if allHKLs or self._thisHKL(iHKLr): - multip.append(hklData['symHKLs'].shape[1]) - return np.array(multip) - - def getHKLID( - self, - hkl: Union[int, Tuple[int, int, int], np.ndarray], - master: Optional[bool] = False, - ) -> Union[List[int], int]: - """ - Return the unique ID of a list of hkls. - - Parameters - ---------- - hkl : int | tuple | list | numpy.ndarray - The input hkl. If an int, or a list of ints, it just passes - through (FIXME). - If a tuple, treated as a single (h, k, l). - If a list of lists/tuples, each is treated as an (h, k, l). - If an numpy.ndarray, it is assumed to have shape (3, N) with the - N (h, k, l) vectors stacked column-wise - - master : bool, optional - If True, return the master hklID, else return the index from the - external (sorted and reduced) list. - - Returns - ------- - hkl_ids : list - The list of requested hklID values associate with the input. - - Notes - ----- - TODO: revisit this weird API??? - - Changes: - ------- - 2020-05-21 (JVB) -- modified to handle all symmetric equavlent reprs. - """ - if hasattr(hkl, '__setitem__'): # tuple does not have __setitem__ - if isinstance(hkl, np.ndarray): - # if is ndarray, assume is 3xN - return [self._getHKLID(x, master=master) for x in hkl.T] - else: - return [self._getHKLID(x, master=master) for x in hkl] - else: - return self._getHKLID(hkl, master=master) - - def _getHKLID( - self, - hkl: Union[int, Tuple[int, int, int], np.ndarray], - master: Optional[bool] = False, - ) -> int: - """ - for hkl that is a tuple, return externally visible hkl index - """ - if isinstance(hkl, int): - return hkl - else: - hklList = self.getSymHKLs() # !!! list, reduced by exclusions - intl_hklIDs = np.asarray([i['hklID'] for i in self.hklDataList]) - intl_hklIDs_sorted = intl_hklIDs[~self.exclusions[self.tThSortInv]] - dHKLInv = {} - for iHKL, symHKLs in enumerate(hklList): - idx = intl_hklIDs_sorted[iHKL] if master else iHKL - for thisHKL in symHKLs.T: - dHKLInv[tuple(thisHKL)] = idx - try: - return dHKLInv[tuple(hkl)] - except KeyError: - raise RuntimeError( - f"hkl '{tuple(hkl)}' is not present in this material!" - ) - - def getHKLs(self, *hkl_ids: int, **kwargs) -> Union[List[str], np.ndarray]: - """ - Returns the powder HKLs subject to specified options. - - Parameters - ---------- - *hkl_ids : int - Optional list of specific master hklIDs. - **kwargs : dict - One or more of the following keyword arguments: - asStr : bool - If True, return a list of strings. The default is False. - thisTTh : scalar | None - If not None, only return hkls overlapping the specified - 2-theta (in radians). The default is None. - allHKLs : bool - If True, then ignore exlcusions. The default is False. - - Raises - ------ - TypeError - If an unknown kwarg is passed. - RuntimeError - If an invalid hklID is passed. - - Returns - ------- - hkls : list | numpy.ndarray - Either a list of hkls as strings (if asStr=True) or a vstacked - array of hkls. - - Notes - ----- - !!! the shape of the return value when asStr=False is the _transpose_ - of the typical return value for self.get_hkls() and self.hkls! - This _may_ change to avoid confusion, but going to leave it for - now so as not to break anything. - - 2022/08/05 JVB: - - Added functionality to handle optional hklID args - - Updated docstring - """ - # kwarg parsing - opts = dict(asStr=False, thisTTh=None, allHKLs=False) - if len(kwargs) > 0: - # check keys - for k, v in kwargs.items(): - if k not in opts: - raise TypeError( - f"getHKLs() got an unexpected keyword argument '{k}'" - ) - opts.update(kwargs) - - hkls = [] - if len(hkl_ids) == 0: - for iHKLr, hklData in enumerate(self.hklDataList): - if not opts['allHKLs']: - if not self._thisHKL(iHKLr): - continue - if opts['thisTTh'] is not None: - tThLo, tThHi = self._getTThRange(iHKLr) - if opts['thisTTh'] < tThHi and opts['thisTTh'] > tThLo: - hkls.append(hklData['hkl']) - else: - hkls.append(hklData['hkl']) - else: - # !!! changing behavior here; if the hkl_id is invalid, raises - # RuntimeError, and if allHKLs=True and the hkl_id is - # excluded, it also raises a RuntimeError - all_hkl_ids = np.asarray([i['hklID'] for i in self.hklDataList]) - sorted_excl = self.exclusions[self.tThSortInv] - idx = np.zeros(len(self.hklDataList), dtype=int) - for i, hkl_id in enumerate(hkl_ids): - # find ordinal index of current hklID - try: - idx[i] = int(np.where(all_hkl_ids == hkl_id)[0]) - except TypeError: - raise RuntimeError( - f"Requested hklID '{hkl_id}'is invalid!" - ) - if sorted_excl[idx[i]] and not opts['allHKLs']: - raise RuntimeError( - f"Requested hklID '{hkl_id}' is excluded!" - ) - hkls.append(self.hklDataList[idx[i]]['hkl']) - - # handle output kwarg - if opts['asStr']: - return list(map(hklToStr, np.array(hkls))) - else: - return np.array(hkls) - - def getSymHKLs( - self, - asStr: Optional[bool] = False, - withID: Optional[bool] = False, - indices: Optional[List[int]] = None, - ) -> Union[List[List[str]], List[np.ndarray]]: - """ - Return all symmetry HKLs. - - Parameters - ---------- - asStr : bool, optional - If True, return the symmetry HKLs as strings. The default is False. - withID : bool, optional - If True, return the symmetry HKLs with the hklID. The default is - False. Does nothing if asStr is True. - indices : list[inr], optional - Optional list of indices of hkls to include. - - Returns - ------- - sym_hkls : list list of strings, or list of numpy.ndarray - List of symmetry HKLs for each HKL, either as strings or as a - vstacked array. - """ - sym_hkls = [] - hkl_index = 0 - if indices is not None: - indB = np.zeros(self.nHKLs, dtype=bool) - indB[np.array(indices)] = True - else: - indB = np.ones(self.nHKLs, dtype=bool) - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - if indB[hkl_index]: - hkls = hklData['symHKLs'] - if asStr: - sym_hkls.append(list(map(hklToStr, np.array(hkls).T))) - elif withID: - sym_hkls.append( - np.vstack( - [ - np.tile(hklData['hklID'], (1, hkls.shape[1])), - hkls, - ] - ) - ) - else: - sym_hkls.append(np.array(hkls)) - hkl_index += 1 - return sym_hkls - - @staticmethod - def makeScatteringVectors( - hkls: np.ndarray, - rMat_c: np.ndarray, - bMat: np.ndarray, - wavelength: float, - chiTilt: Optional[float] = None, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - Static method for calculating g-vectors and scattering vector angles - for specified hkls, subject to the bragg conditions specified by - lattice vectors, orientation matrix, and wavelength - - Parameters - ---------- - hkls : np.ndarray - (3, n) array of hkls. - rMat_c : np.ndarray - (3, 3) rotation matrix from the crystal to the sample frame. - bMat : np.ndarray, optional - (3, 3) COB from reciprocal lattice frame to the crystal frame. - wavelength : float - xray wavelength in Angstroms. - chiTilt : float, optional - 0 <= chiTilt <= 90 degrees, defaults to 0 - - Returns - ------- - gVec_s : np.ndarray - (3, n) array of g-vectors (reciprocal lattice) in the sample frame. - oangs0 : np.ndarray - (3, n) array containing the feasible (2-theta, eta, ome) triplets - for each input hkl (first solution) - oangs1 : np.ndarray - (3, n) array containing the feasible (2-theta, eta, ome) triplets - for each input hkl (second solution) - - FIXME: must do testing on strained bMat - """ - # arg munging - chi = float(chiTilt) if chiTilt is not None else 0.0 - rMat_c = rMat_c.squeeze() - - # these are the reciprocal lattice vectors in the SAMPLE FRAME - # ** NOTE ** - # if strained, assumes that you handed it a bMat calculated from - # strained [a, b, c] in the CRYSTAL FRAME - gVec_s = np.dot(rMat_c, np.dot(bMat, hkls)) - - dim0 = gVec_s.shape[0] - if dim0 != 3: - raise ValueError(f'Number of lattice plane normal dims is {dim0}') - - # call model from transforms now - oangs0, oangs1 = xfcapi.oscill_angles_of_hkls( - hkls.T, chi, rMat_c, bMat, wavelength - ) - - return gVec_s, oangs0.T, oangs1.T - - def _makeScatteringVectors( - self, - rMat: np.ndarray, - bMat: Optional[np.ndarray] = None, - chiTilt: Optional[float] = None, - ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: - """ - modeled after QFromU.m - """ - - if bMat is None: - bMat = self._latVecOps['B'] - - Qs_vec = [] - Qs_ang0 = [] - Qs_ang1 = [] - for iHKLr, hklData in enumerate(self.hklDataList): - if not self._thisHKL(iHKLr): - continue - thisQs, thisAng0, thisAng1 = PlaneData.makeScatteringVectors( - hklData['symHKLs'], - rMat, - bMat, - self._wavelength, - chiTilt=chiTilt, - ) - Qs_vec.append(thisQs) - Qs_ang0.append(thisAng0) - Qs_ang1.append(thisAng1) - - return Qs_vec, Qs_ang0, Qs_ang1 - - def calcStructFactor(self, atominfo): - """ - Calculates unit cell structure factors as a function of hkl - USAGE: - FSquared = calcStructFactor(atominfo,hkls,B) - INPUTS: - 1) atominfo (m x 1 float ndarray) the first threee columns of the - matrix contain fractional atom positions [uvw] of atoms in the unit - cell. The last column contains the number of electrons for a given atom - 2) hkls (3 x n float ndarray) is the array of Miller indices for - the planes of interest. The vectors are assumed to be - concatenated along the 1-axis (horizontal) - 3) B (3 x 3 float ndarray) is a matrix of reciprocal lattice basis - vectors,where each column contains a reciprocal lattice basis vector - ({g}=[B]*{hkl}) - OUTPUTS: - 1) FSquared (n x 1 float ndarray) array of structure factors, - one for each hkl passed into the function - """ - r = atominfo[:, 0:3] - elecNum = atominfo[:, 3] - hkls = self.hkls - B = self.latVecOps['B'] - sinThOverLamdaList, ffDataList = LoadFormFactorData() - FSquared = np.zeros(hkls.shape[1]) - - for jj in np.arange(0, hkls.shape[1]): - # ???: probably have other functions for this - # Calculate G for each hkl - # Calculate magnitude of G for each hkl - G = ( - hkls[0, jj] * B[:, 0] - + hkls[1, jj] * B[:, 1] - + hkls[2, jj] * B[:, 2] - ) - magG = np.sqrt(G[0] ** 2 + G[1] ** 2 + G[2] ** 2) - - # Begin calculating form factor - F = 0 - for ii in np.arange(0, r.shape[0]): - ff = RetrieveAtomicFormFactor( - elecNum[ii], magG, sinThOverLamdaList, ffDataList - ) - exparg = complex( - 0.0, - 2.0 - * np.pi - * ( - hkls[0, jj] * r[ii, 0] - + hkls[1, jj] * r[ii, 1] - + hkls[2, jj] * r[ii, 2] - ), - ) - F += ff * np.exp(exparg) - - """ - F = sum_atoms(ff(Q)*e^(2*pi*i(hu+kv+lw))) - """ - FSquared[jj] = np.real(F * np.conj(F)) - - return FSquared - - # OLD DEPRECATED PLANE_DATA STUFF ==================================== - @deprecated(new_func="len(self.hkls.T)", removal_date="2025-08-01") - def getNHKLs(self): - return len(self.getHKLs()) - - @deprecated(new_func="self.exclusions", removal_date="2025-08-01") - def get_exclusions(self): - return self.exclusions - - @deprecated(new_func="self.exclusions=...", removal_date="2025-08-01") - def set_exclusions(self, exclusions): - self.exclusions = exclusions - - @deprecated( - new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", - removal_date="2025-08-01", - ) - def getLatticeType(self): - return ltypeOfLaueGroup(self.laueGroup) - - @deprecated(new_func="self.q_sym", removal_date="2025-08-01") - def getQSym(self): - return self.q_sym - - -@deprecated(removal_date='2025-01-01') -def getFriedelPair(tth0, eta0, *ome0, **kwargs): - """ - Get the diffractometer angular coordinates in degrees for - the Friedel pair of a given reflection (min angular distance). - - AUTHORS: - - J. V. Bernier -- 10 Nov 2009 - - USAGE: - - ome1, eta1 = getFriedelPair(tth0, eta0, *ome0, - display=False, - units='degrees', - convention='hexrd') - - INPUTS: - - 1) tth0 is a list (or ndarray) of 1 or n the bragg angles (2theta) for - the n reflections (tiled to match eta0 if only 1 is given). - - 2) eta0 is a list (or ndarray) of 1 or n azimuthal coordinates for the n - reflections (tiled to match tth0 if only 1 is given). - - 3) ome0 is a list (or ndarray) of 1 or n reference oscillation - angles for the n reflections (denoted omega in [1]). This argument - is optional. - - 4) Keyword arguments may be one of the following: - - Keyword Values|{default} Action - -------------- -------------- -------------- - 'display' True|{False} toggles display to cmd line - 'units' 'radians'|{'degrees'} sets units for input angles - 'convention' 'fable'|{'hexrd'} sets conventions defining - the angles (see below) - 'chiTilt' None the inclination (about Xlab) of - the oscillation axis - - OUTPUTS: - - 1) ome1 contains the oscialltion angle coordinates of the - Friedel pairs associated with the n input reflections, relative to ome0 - (i.e. ome1 = + ome0). Output is in DEGREES! - - 2) eta1 contains the azimuthal coordinates of the Friedel - pairs associated with the n input reflections. Output units are - controlled via the module variable 'outputDegrees' - - NOTES: - - !!!: The ouputs ome1, eta1 are written using the selected convention, but - the units are alway degrees. May change this to work with Nathan's - global... - - !!!: In the 'fable' convention [1], {XYZ} form a RHON basis where X is - downstream, Z is vertical, and eta is CCW with +Z defining eta = 0. - - !!!: In the 'hexrd' convention [2], {XYZ} form a RHON basis where Z is - upstream, Y is vertical, and eta is CCW with +X defining eta = 0. - - REFERENCES: - - [1] E. M. Lauridsen, S. Schmidt, R. M. Suter, and H. F. Poulsen, - ``Tracking: a method for structural characterization of grains in - powders or polycrystals''. J. Appl. Cryst. (2001). 34, 744--750 - - [2] J. V. Bernier, M. P. Miller, J. -S. Park, and U. Lienert, - ``Quantitative Stress Analysis of Recrystallized OFHC Cu Subject - to Deformed In Situ'', J. Eng. Mater. Technol. (2008). 130. - DOI:10.1115/1.2870234 - """ - - dispFlag = False - fableFlag = False - chi = None - c1 = 1.0 - c2 = pi / 180.0 - - eta0 = np.atleast_1d(eta0) - tth0 = np.atleast_1d(tth0) - ome0 = np.atleast_1d(ome0) - - if eta0.ndim != 1: - raise RuntimeError('azimuthal input must be 1-D') - - npts = len(eta0) - - if tth0.ndim != 1: - raise RuntimeError('Bragg angle input must be not 1-D') - else: - if len(tth0) != npts: - if len(tth0) == 1: - tth0 *= np.ones(npts) - elif npts == 1: - npts = len(tth0) - eta0 *= np.ones(npts) - else: - raise RuntimeError( - 'the azimuthal and Bragg angle inputs are inconsistent' - ) - - if len(ome0) == 0: - ome0 = np.zeros(npts) # dummy ome0 - elif len(ome0) == 1 and npts > 1: - ome0 *= np.ones(npts) - else: - if len(ome0) != npts: - raise RuntimeError( - 'your oscialltion angle input is inconsistent; ' - + f'it has length {len(ome0)} while it should be {npts}' - ) - - # keyword args processing - kwarglen = len(kwargs) - if kwarglen > 0: - argkeys = list(kwargs.keys()) - for i in range(kwarglen): - if argkeys[i] == 'display': - dispFlag = kwargs[argkeys[i]] - elif argkeys[i] == 'convention': - if kwargs[argkeys[i]].lower() == 'fable': - fableFlag = True - elif argkeys[i] == 'units': - if kwargs[argkeys[i]] == 'radians': - c1 = 180.0 / pi - c2 = 1.0 - elif argkeys[i] == 'chiTilt': - if kwargs[argkeys[i]] is not None: - chi = kwargs[argkeys[i]] - - # a little talkback... - if dispFlag: - if fableFlag: - print('\nUsing Fable angle convention\n') - else: - print('\nUsing image-based angle convention\n') - - # mapped eta input - # - in DEGREES, thanks to c1 - eta0 = mapAngle(c1 * eta0, [-180, 180], units='degrees') - if fableFlag: - eta0 = 90 - eta0 - - # must put args into RADIANS - # - eta0 is in DEGREES, - # - the others are in whatever was entered, hence c2 - eta0 = d2r * eta0 - tht0 = c2 * tth0 / 2 - if chi is not None: - chi = c2 * chi - else: - chi = 0 - - """ - SYSTEM SOLVE - - - cos(chi)cos(eta)cos(theta)sin(x) - cos(chi)sin(theta)cos(x) \ - = sin(theta) - sin(chi)sin(eta)cos(theta) - - - Identity: a sin x + b cos x = sqrt(a**2 + b**2) sin (x + alpha) - - / - | atan(b/a) for a > 0 - alpha < - | pi + atan(b/a) for a < 0 - \ - - => sin (x + alpha) = c / sqrt(a**2 + b**2) - - must use both branches for sin(x) = n: - x = u (+ 2k*pi) | x = pi - u (+ 2k*pi) - """ - cchi = np.cos(chi) - schi = np.sin(chi) - ceta = np.cos(eta0) - seta = np.sin(eta0) - ctht = np.cos(tht0) - stht = np.sin(tht0) - - nchi = np.c_[0.0, cchi, schi].T - - gHat0_l = -np.vstack([ceta * ctht, seta * ctht, stht]) - - a = cchi * ceta * ctht - b = -cchi * stht - c = stht + schi * seta * ctht - - # form solution - abMag = np.sqrt(a * a + b * b) - assert np.all(abMag > 0), "Beam vector specification is infeasible!" - phaseAng = np.arctan2(b, a) - rhs = c / abMag - rhs[abs(rhs) > 1.0] = np.nan - rhsAng = np.arcsin(rhs) - - # write ome angle output arrays (NaNs persist here) - ome1 = rhsAng - phaseAng - ome2 = np.pi - rhsAng - phaseAng - - ome1 = mapAngle(ome1, [-np.pi, np.pi], units='radians') - ome2 = mapAngle(ome2, [-np.pi, np.pi], units='radians') - - ome_stack = np.vstack([ome1, ome2]) - - min_idx = np.argmin(abs(ome_stack), axis=0) - - ome_min = ome_stack[min_idx, list(range(len(ome1)))] - eta_min = np.nan * np.ones_like(ome_min) - - # mark feasible reflections - goodOnes = ~np.isnan(ome_min) - - numGood = np.sum(goodOnes) - tmp_eta = np.empty(numGood) - tmp_gvec = gHat0_l[:, goodOnes] - for i in range(numGood): - rchi = rotMatOfExpMap(np.tile(ome_min[goodOnes][i], (3, 1)) * nchi) - gHat_l = np.dot(rchi, tmp_gvec[:, i].reshape(3, 1)) - tmp_eta[i] = np.arctan2(gHat_l[1], gHat_l[0]) - eta_min[goodOnes] = tmp_eta - - # everybody back to DEGREES! - # - ome1 is in RADIANS here - # - convert and put into [-180, 180] - ome1 = mapAngle( - mapAngle(r2d * ome_min, [-180, 180], units='degrees') + c1 * ome0, - [-180, 180], - units='degrees', - ) - - # put eta1 in [-180, 180] - eta1 = mapAngle(r2d * eta_min, [-180, 180], units='degrees') - - if not outputDegrees: - ome1 *= d2r - eta1 *= d2r - - return ome1, eta1 - - -def getDparms( - lp: np.ndarray, lpTag: str, radians: Optional[bool] = True -) -> np.ndarray: - """ - Utility routine for getting dparms, that is the lattice parameters - without symmetry -- 'triclinic' - - Parameters - ---------- - lp : np.ndarray - Parsed lattice parameters - lpTag : str - Tag for the symmetry group of the lattice (from Laue group) - radians : bool, optional - Whether or not to use radians for angles, default is True - - Returns - ------- - np.ndarray - The lattice parameters without symmetry. - """ - latVecOps = latticeVectors(lp, tag=lpTag, radians=radians) - return latVecOps['dparms'] - - -def LoadFormFactorData(): - """ - Script to read in a csv file containing information relating the - magnitude of Q (sin(th)/lambda) to atomic form factor - Notes: - Atomic form factor data gathered from the International Tables of - Crystallography: - P. J. Brown, A. G. Fox, E. N. Maslen, M. A. O'Keefec and B. T. M. Willis, - "Chapter 6.1. Intensity of diffracted intensities", International Tables - for Crystallography (2006). Vol. C, ch. 6.1, pp. 554-595 - """ - - dir1 = os.path.split(valunits.__file__) - dataloc = os.path.join(dir1[0], 'data', 'FormFactorVsQ.csv') - - data = np.zeros((62, 99), float) - - # FIXME: marked broken by DP - jj = 0 - with open(dataloc, 'rU') as csvfile: - datareader = csv.reader(csvfile, dialect=csv.excel) - for row in datareader: - ii = 0 - for val in row: - data[jj, ii] = float(val) - ii += 1 - jj += 1 - - sinThOverLamdaList = data[:, 0] - ffDataList = data[:, 1:] - - return sinThOverLamdaList, ffDataList - - -def RetrieveAtomicFormFactor(elecNum, magG, sinThOverLamdaList, ffDataList): - """Interpolates between tabulated data to find the atomic form factor - for an atom with elecNum electrons for a given magnitude of Q - USAGE: - ff = RetrieveAtomicFormFactor(elecNum,magG,sinThOverLamdaList,ffDataList) - INPUTS: - 1) elecNum, (1 x 1 float) number of electrons for atom of interest - 2) magG (1 x 1 float) magnitude of G - 3) sinThOverLamdaList (n x 1 float ndarray) form factor data is tabulated - in terms of sin(theta)/lambda (A^-1). - 3) ffDataList (n x m float ndarray) form factor data is tabulated in terms - of sin(theta)/lambda (A^-1). Each column corresponds to a different - number of electrons - OUTPUTS: - 1) ff (n x 1 float) atomic form factor for atom and hkl of interest - NOTES: - Data should be calculated in terms of G at some point - """ - sinThOverLambda = 0.5 * magG - # lambda=2*d*sin(th) - # lambda=2*sin(th)/G - # 1/2*G=sin(th)/lambda - - ff = np.interp( - sinThOverLambda, sinThOverLamdaList, ffDataList[:, (elecNum - 1)] - ) - - return ff - - -def lorentz_factor(tth: np.ndarray) -> np.ndarray: - """ - 05/26/2022 SS adding lorentz factor computation - to the detector so that it can be compenstated for in the - intensity correction - - Parameters - ---------- - tth: np.ndarray - 2-theta of every pixel in radians - - Returns - ------- - np.ndarray - Lorentz factor for each pixel - """ - - theta = 0.5 * tth - - cth = np.cos(theta) - sth2 = np.sin(theta) ** 2 - - return 1.0 / (4.0 * cth * sth2) - - -def polarization_factor( - tth: np.ndarray, - unpolarized: Optional[bool] = True, - eta: Optional[np.ndarray] = None, - f_hor: Optional[float] = None, - f_vert: Optional[float] = None, -) -> np.ndarray: - """ - 06/14/2021 SS adding lorentz polarization factor computation - to the detector so that it can be compenstated for in the - intensity correction - - 05/26/2022 decoupling lorentz factor from polarization factor - - parameters: tth two theta of every pixel in radians - if unpolarized is True, all subsequent arguments are optional - eta azimuthal angle of every pixel - f_hor fraction of horizontal polarization - (~1 for XFELs) - f_vert fraction of vertical polarization - (~0 for XFELs) - notice f_hor + f_vert = 1 - - FIXME, called without parameters like eta, f_hor, f_vert, but they default - to none in the current implementation, which will throw an error. - """ - - ctth2 = np.cos(tth) ** 2 - - if unpolarized: - return (1 + ctth2) / 2 - - seta2 = np.sin(eta) ** 2 - ceta2 = np.cos(eta) ** 2 - return f_hor * (seta2 + ceta2 * ctth2) + f_vert * (ceta2 + seta2 * ctth2) diff --git a/tests/calibration/test_2xrs_calibration.py b/tests/calibration/test_2xrs_calibration.py index 4e93b5581..431a4c260 100644 --- a/tests/calibration/test_2xrs_calibration.py +++ b/tests/calibration/test_2xrs_calibration.py @@ -5,7 +5,7 @@ import pytest from hexrd.core.material.material import load_materials_hdf5 -from hexrd.hedm.instrument.hedm_instrument import HEDMInstrument +from hexrd.core.instrument.hedm_instrument import HEDMInstrument from hexrd.core.fitting.calibration import ( InstrumentCalibrator, diff --git a/tests/calibration/test_calibration.py b/tests/calibration/test_calibration.py index cfaf31549..ee2fee6da 100644 --- a/tests/calibration/test_calibration.py +++ b/tests/calibration/test_calibration.py @@ -7,7 +7,7 @@ import pytest from hexrd.core.material.material import load_materials_hdf5 -from hexrd.hedm.instrument.hedm_instrument import HEDMInstrument +from hexrd.core.instrument.hedm_instrument import HEDMInstrument from hexrd.core.fitting.calibration import ( InstrumentCalibrator, diff --git a/tests/calibration/test_laue_auto_pick.py b/tests/calibration/test_laue_auto_pick.py index 875d7da5a..0f51279d5 100644 --- a/tests/calibration/test_laue_auto_pick.py +++ b/tests/calibration/test_laue_auto_pick.py @@ -5,10 +5,9 @@ import pytest -from hexrd.fitting.calibration import LaueCalibrator -from hexrd.material.material import load_materials_hdf5, Material -from hexrd.instrument.hedm_instrument import HEDMInstrument -from collections import defaultdict +from hexrd.core.fitting.calibration import LaueCalibrator +from hexrd.core.material import load_materials_hdf5, Material +from hexrd.core.instrument.hedm_instrument import HEDMInstrument @pytest.fixture diff --git a/tests/calibration/test_powder_auto_pick.py b/tests/calibration/test_powder_auto_pick.py index b2ff063f9..d3a75015b 100644 --- a/tests/calibration/test_powder_auto_pick.py +++ b/tests/calibration/test_powder_auto_pick.py @@ -6,10 +6,10 @@ import pytest from hexrd import imageseries -from hexrd.fitting.calibration import PowderCalibrator -from hexrd.material.material import load_materials_hdf5, Material -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.instrument.hedm_instrument import HEDMInstrument +from hexrd.core.fitting.calibration import PowderCalibrator +from hexrd.core.material.material import load_materials_hdf5, Material +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.instrument.hedm_instrument import HEDMInstrument @pytest.fixture diff --git a/tests/config/test_material.py b/tests/config/test_material.py index 7d0bef5eb..c425e9f58 100644 --- a/tests/config/test_material.py +++ b/tests/config/test_material.py @@ -1,6 +1,6 @@ from .common import TestConfig, test_data from hexrd.core.config.material import TTHW_DFLT, DMIN_DFLT -from hexrd.hedm.config.utils import get_exclusion_parameters +from hexrd.core.config.utils import get_exclusion_parameters reference_data = """ diff --git a/tests/planedata/test_with_data.py b/tests/planedata/test_with_data.py index bbb3876ba..e9a45ea49 100644 --- a/tests/planedata/test_with_data.py +++ b/tests/planedata/test_with_data.py @@ -3,7 +3,7 @@ import pytest -from hexrd.laue.material.crystallography import ltypeOfLaueGroup +from hexrd.core.material.crystallography import ltypeOfLaueGroup from hexrd.core.material.material import Material from hexrd.core.rotations import rotMatOfQuat diff --git a/tests/test_absorption_correction.py b/tests/test_absorption_correction.py index 26a764f8c..1962ab9ff 100644 --- a/tests/test_absorption_correction.py +++ b/tests/test_absorption_correction.py @@ -2,8 +2,8 @@ import pytest import yaml -from hexrd.hedm.instrument.hedm_instrument import HEDMInstrument -from hexrd.hedm.instrument.physics_package import HEDPhysicsPackage +from hexrd.core.instrument.hedm_instrument import HEDMInstrument +from hexrd.core.instrument.physics_package import HEDPhysicsPackage @pytest.fixture diff --git a/tests/test_find_orientations.py b/tests/test_find_orientations.py index 5580f9aa5..92db46458 100644 --- a/tests/test_find_orientations.py +++ b/tests/test_find_orientations.py @@ -15,7 +15,7 @@ from hexrd.hedm import config # TODO: Check that this test is still sensible after PlaneData change. -from hexrd.hedm.material.crystallography import PlaneData +from hexrd.core.material.crystallography import PlaneData import find_orientations_testing as test_utils From a51fc2a020f1d56e61bf04e654ad1cf71eb27b4d Mon Sep 17 00:00:00 2001 From: Zack Singer Date: Thu, 28 Aug 2025 17:28:59 -0400 Subject: [PATCH 14/19] Add stray files to core migration --- hexrd/core/constants.py | 1 + hexrd/core/fitting/calibration/grain.py | 2 +- hexrd/core/instrument/cylindrical_detector.py | 7 ++++--- hexrd/hedm/fitting/grains.py | 2 +- tests/test_pixel_solid_angles.py | 6 +++--- 5 files changed, 10 insertions(+), 8 deletions(-) diff --git a/hexrd/core/constants.py b/hexrd/core/constants.py index 0f8bb13fa..fab5b4561 100644 --- a/hexrd/core/constants.py +++ b/hexrd/core/constants.py @@ -3816,6 +3816,7 @@ def is_writable_file(path): 'LiF': 2.64, 'quartz': 2.65, 'diamond': 3.5, + 'C22H10N2O5': 1.42, # kapton } ''' diff --git a/hexrd/core/fitting/calibration/grain.py b/hexrd/core/fitting/calibration/grain.py index 68a961666..ed8fb4fcf 100644 --- a/hexrd/core/fitting/calibration/grain.py +++ b/hexrd/core/fitting/calibration/grain.py @@ -5,7 +5,7 @@ from hexrd.core import matrixutil as mutil from hexrd.core.rotations import angularDifference from hexrd.core.transforms import xfcapi -from hexrd.core import xrdutil +from hexrd.hedm import xrdutil from .abstract_grain import AbstractGrainCalibrator from .lmfit_param_handling import ( diff --git a/hexrd/core/instrument/cylindrical_detector.py b/hexrd/core/instrument/cylindrical_detector.py index 752aae520..2e448e24e 100644 --- a/hexrd/core/instrument/cylindrical_detector.py +++ b/hexrd/core/instrument/cylindrical_detector.py @@ -6,6 +6,7 @@ # TODO: Resolve extra-core dependency from hexrd.hedm import xrdutil +from hexrd.hed.xrdutil.utils import _warp_to_cylinder from hexrd.core.utils.decorators import memoize from .detector import Detector @@ -51,7 +52,7 @@ def cart_to_angles( if apply_distortion and self.distortion is not None: xy_data = self.distortion.apply(xy_data) - dvecs = xrdutil.utils._warp_to_cylinder( + dvecs = _warp_to_cylinder( xy_data, self.tvec, self.radius, @@ -125,7 +126,7 @@ def cart_to_dvecs( rmat_s=ct.identity_3x3, tvec_c=ct.zeros_3x1, ): - return xrdutil.utils._warp_to_cylinder( + return _warp_to_cylinder( xy_data, self.tvec, self.radius, @@ -364,7 +365,7 @@ def _pixel_angles( if distortion is not None: xy = distortion.apply(xy) - dvecs = xrdutil.utils._warp_to_cylinder( + dvecs = _warp_to_cylinder( xy, tvec_d - origin, radius, caxis, paxis, normalize=True ) diff --git a/hexrd/hedm/fitting/grains.py b/hexrd/hedm/fitting/grains.py index 518ee5506..843f194dd 100644 --- a/hexrd/hedm/fitting/grains.py +++ b/hexrd/hedm/fitting/grains.py @@ -10,7 +10,7 @@ from hexrd.core import constants from hexrd.core import rotations -from hexrd.xrdutil import ( +from hexrd.hedm.xrdutil import ( apply_correction_to_wavelength, extract_detector_transformation, ) diff --git a/tests/test_pixel_solid_angles.py b/tests/test_pixel_solid_angles.py index 8e7545737..02030d729 100644 --- a/tests/test_pixel_solid_angles.py +++ b/tests/test_pixel_solid_angles.py @@ -6,13 +6,13 @@ import pytest -import hexrd.resources -from hexrd.instrument.hedm_instrument import HEDMInstrument +import hexrd.core.resources +from hexrd.core.instrument.hedm_instrument import HEDMInstrument @pytest.fixture def tardis_instrument() -> HEDMInstrument: - path = importlib.resources.files(hexrd.resources).joinpath( + path = importlib.resources.files(hexrd.core.resources).joinpath( 'tardis_reference_config.yml' ) with open(path, 'r') as rf: From 4d7531087904ba8a26fd4800ab2f026a232fc1d4 Mon Sep 17 00:00:00 2001 From: Zack Singer Date: Thu, 28 Aug 2025 18:15:39 -0400 Subject: [PATCH 15/19] Support Python 3.9 type unioning --- hexrd/module_map.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/hexrd/module_map.py b/hexrd/module_map.py index af6535586..362398abf 100644 --- a/hexrd/module_map.py +++ b/hexrd/module_map.py @@ -1,13 +1,14 @@ # The following dynamically generates aliases for the remapped modules based # on the file_map -import pickle +from collections import defaultdict import importlib import importlib.util import importlib.abc import importlib.machinery -import sys from pathlib import Path -from collections import defaultdict +import pickle +import sys +from typing import Union def path_to_module(path: Path) -> str: @@ -78,7 +79,7 @@ def __getattr__(self, name): ) -flattened_module_map: dict[str, ModuleAlias | str] = {} +flattened_module_map: dict[str, Union[ModuleAlias, str]] = {} for key, (mapped_module, _mapped_fp) in module_map.items(): parts = mapped_module.split(".") @@ -88,8 +89,7 @@ def __getattr__(self, name): flattened_module_map[module] = ModuleAlias(parts[:i]) flattened_module_map[key] = mapped_module - -def get(alias: str) -> ModuleAlias | str | None: +def get(alias: str) -> Union[ModuleAlias, str, None]: """ Returns the the module or an alias to it if it exists. """ From 1c6491e600c1e50b743f0071d5cd824c064427da Mon Sep 17 00:00:00 2001 From: Zack Singer Date: Wed, 1 Oct 2025 14:05:47 -0400 Subject: [PATCH 16/19] Allow module_map to import funcs from disparate files --- hexrd/module_map.py | 49 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/hexrd/module_map.py b/hexrd/module_map.py index 362398abf..524e0ec3f 100644 --- a/hexrd/module_map.py +++ b/hexrd/module_map.py @@ -2,11 +2,9 @@ # on the file_map from collections import defaultdict import importlib -import importlib.util import importlib.abc import importlib.machinery from pathlib import Path -import pickle import sys from typing import Union @@ -15,11 +13,9 @@ def path_to_module(path: Path) -> str: """ Convert a path to a module name. - e.g. * "package_remapper/remapper.py" -> "package_remapper.remapper" * "package_remapper/__init__.py" -> "package_remapper" - """ if path.suffix not in (".py", ""): raise ValueError(f"Expected a .py file, got {path}") @@ -137,7 +133,50 @@ def load_module(self, fullname): raise ImportError(f"Module {fullname} not found in module_map") mapped_module, _mapped_fp = module_map[fullname] - sys.modules[fullname] = importlib.import_module(mapped_module) + base_mod = importlib.import_module(mapped_module) + + extra_candidates: list[str] = [] + for old_path, new_paths in file_map.items(): + if path_to_module(old_path) == fullname: + for p in new_paths: + candidate = path_to_module(p) + if candidate != mapped_module: + extra_candidates.append(candidate) + break + + if extra_candidates: + for candidate in extra_candidates: + try: + cand_mod = importlib.import_module(candidate) + except Exception: + continue + + if hasattr(base_mod, "__path__") and hasattr(cand_mod, "__path__"): + try: + for p in list(cand_mod.__path__): + if p not in base_mod.__path__: + base_mod.__path__.append(p) + except Exception: + pass + + base_all = getattr(base_mod, "__all__", None) + cand_all = getattr(cand_mod, "__all__", None) + if cand_all: + if base_all is None: + base_mod.__all__ = list(cand_all) + else: + for name in cand_all: + if name not in base_all: + base_all.append(name) + base_mod.__all__ = base_all + + for name, val in cand_mod.__dict__.items(): + if name in ("__name__", "__file__", "__package__", "__path__", "__loader__", "__spec__"): + continue + if name not in base_mod.__dict__: + base_mod.__dict__[name] = val + + sys.modules[fullname] = base_mod return sys.modules[fullname] From d3fd89e99e870aadfbee2fe194eea05ebcb23736 Mon Sep 17 00:00:00 2001 From: Zack Singer Date: Wed, 1 Oct 2025 14:09:11 -0400 Subject: [PATCH 17/19] Only concern the module_map with py files --- hexrd/module_map.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hexrd/module_map.py b/hexrd/module_map.py index 524e0ec3f..8484ef725 100644 --- a/hexrd/module_map.py +++ b/hexrd/module_map.py @@ -137,6 +137,8 @@ def load_module(self, fullname): extra_candidates: list[str] = [] for old_path, new_paths in file_map.items(): + if old_path.suffix not in ("", ".py") or not "hexrd" in old_path.parts: + continue if path_to_module(old_path) == fullname: for p in new_paths: candidate = path_to_module(p) From 2b8861c8474acebeb3735172fb2b208366320a12 Mon Sep 17 00:00:00 2001 From: Patrick Avery Date: Wed, 1 Oct 2025 13:25:53 -0500 Subject: [PATCH 18/19] Remove merge conflict issue These lines look like they were accidentally added. Signed-off-by: Patrick Avery --- hexrd/hedm/fitgrains.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/hexrd/hedm/fitgrains.py b/hexrd/hedm/fitgrains.py index 8ab919a55..19cc2afac 100644 --- a/hexrd/hedm/fitgrains.py +++ b/hexrd/hedm/fitgrains.py @@ -428,11 +428,6 @@ def fit_grains( logger.info("\tstarting fit on %d processes with chunksize %d", nproc, chunksize) start = timeit.default_timer() - pool = multiprocessing.Pool( - nproc, - chunksize, - ) - start = timeit.default_timer() pool = multiprocessing.Pool(nproc, fit_grain_FF_init, (params,)) async_result = pool.map_async( From e09f3013fd4be33c05c702353d633621cc6191a3 Mon Sep 17 00:00:00 2001 From: Zack Singer Date: Wed, 1 Oct 2025 14:41:27 -0400 Subject: [PATCH 19/19] Fix duplicated function identification in module_map --- hexrd/module_map.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/hexrd/module_map.py b/hexrd/module_map.py index 8484ef725..1a1b43b88 100644 --- a/hexrd/module_map.py +++ b/hexrd/module_map.py @@ -139,15 +139,25 @@ def load_module(self, fullname): for old_path, new_paths in file_map.items(): if old_path.suffix not in ("", ".py") or not "hexrd" in old_path.parts: continue - if path_to_module(old_path) == fullname: + try: + old_mod = path_to_module(old_path) + except ValueError: + continue + + if old_mod == fullname or old_mod.startswith(fullname + "."): for p in new_paths: candidate = path_to_module(p) if candidate != mapped_module: extra_candidates.append(candidate) - break if extra_candidates: - for candidate in extra_candidates: + seen = set() + deduped: list[str] = [] + for c in extra_candidates: + if c not in seen: + seen.add(c) + deduped.append(c) + for candidate in deduped: try: cand_mod = importlib.import_module(candidate) except Exception: