diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml index b8c64eedc..57f0945fb 100644 --- a/conda.recipe/meta.yaml +++ b/conda.recipe/meta.yaml @@ -9,7 +9,7 @@ build: number: {{ environ.get('GIT_DESCRIBE_NUMBER', 0) }} detect_binary_files_with_prefix: true entry_points: - - hexrd = hexrd.cli.main:main + - hexrd = hexrd.hedm.cli.main:main requirements: build: diff --git a/docs/source/conf.py b/docs/source/conf.py index 4d7928762..8d41c2073 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -13,7 +13,7 @@ import os import sys -from hexrd.constants import __version__ as version +from hexrd.core.constants import __version__ as version sys.path.insert(0, os.path.abspath('../..')) diff --git a/hexrd/__init__.py b/hexrd/__init__.py index 77ca9b19a..e8812182f 100644 --- a/hexrd/__init__.py +++ b/hexrd/__init__.py @@ -1,13 +1,15 @@ import importlib +import importlib.abc +import importlib.machinery import sys -from .material import crystallography -from .material import jcpds -from .material import mksupport -from .material import spacegroup -from .material import symbols -from .material import symmetry -from .material import unitcell +from .core.material import crystallography +from .core.material import jcpds +from .core.material import mksupport +from .core.material import spacegroup +from .core.material import symbols +from .core.material import symmetry +from .core.material import unitcell # These are aliases for import paths, so we don't break old HEXRD scripts. # We will verify the alias files *do not* exist, to avoid confusion. @@ -30,3 +32,16 @@ raise Exception(f'"{alias}" is an alias path and should not exist') sys.modules[alias] = module + + +from . import module_map + + +def __getattr__(name): + # __getattr__ is only called if the attribute doesn't exist + module = module_map.get("hexrd." + name) + if module is not None: + if isinstance(module, str): + return importlib.import_module(module) + return module + raise AttributeError(f"Module `hexrd` has no attribute {name}") diff --git a/hexrd/copyright.py b/hexrd/copyright.py index b61b3640b..cdf8910d0 100644 --- a/hexrd/copyright.py +++ b/hexrd/copyright.py @@ -1,24 +1,24 @@ # ============================================================ -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. # All rights reserved. -# +# # This file is part of HEXRD. For details on dowloading the source, # see the file COPYING. -# +# # Please also see the file LICENSE. -# +# # This program is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License (as published by the Free Software # Foundation) version 2.1 dated February 1999. -# +# # This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, @@ -36,4 +36,4 @@ COPYRIGHT_FILE = 'COPYING' with open(os.path.join(os.path.dirname(__file__), COPYRIGHT_FILE), 'r') as f: - COPYRIGHT_TEXT = f.read() + COPYRIGHT_TEXT = f.read() diff --git a/hexrd/config/__init__.py b/hexrd/core/config/__init__.py similarity index 100% rename from hexrd/config/__init__.py rename to hexrd/core/config/__init__.py diff --git a/hexrd/config/beam.py b/hexrd/core/config/beam.py similarity index 85% rename from hexrd/config/beam.py rename to hexrd/core/config/beam.py index 820dc7d4b..572b25b52 100644 --- a/hexrd/config/beam.py +++ b/hexrd/core/config/beam.py @@ -3,7 +3,7 @@ import numpy as np from .config import Config -from hexrd import imageseries +from hexrd.core import imageseries class Beam(Config): diff --git a/hexrd/config/config.py b/hexrd/core/config/config.py similarity index 90% rename from hexrd/config/config.py rename to hexrd/core/config/config.py index 1b5a3683a..4645d5c5e 100644 --- a/hexrd/config/config.py +++ b/hexrd/core/config/config.py @@ -7,6 +7,7 @@ logger = logging.getLogger('hexrd.config') + class Config(object): """Access a level of the YAML configuration file @@ -15,6 +16,7 @@ class Config(object): cfg: Config instance or a (pyyaml) YAMLObject config representings a level of the YAML input """ + _dirty = False def __init__(self, cfg): @@ -41,14 +43,12 @@ def get(self, key, default=null): res = temp[item] except KeyError: if default is not null: - logger.info( - '%s not specified, defaulting to %s', key, default - ) + logger.info('%s not specified, defaulting to %s', key, default) res = temp.get(item, default) else: raise RuntimeError( '%s must be specified in configuration file' % key - ) + ) return res def set(self, key, val): @@ -80,12 +80,10 @@ def check_filename(fname, wdir): If fname is an absolute path, use that; otherwise take it as a path relative to the working directory. -""" + """ temp = fname if not os.path.isabs(fname): temp = os.path.join(wdir, temp) if os.path.exists(temp): return temp - raise IOError( - 'file: "%s" not found' % temp - ) + raise IOError('file: "%s" not found' % temp) diff --git a/hexrd/config/dumper.py b/hexrd/core/config/dumper.py similarity index 90% rename from hexrd/config/dumper.py rename to hexrd/core/config/dumper.py index 92d3596da..4067ed93c 100644 --- a/hexrd/config/dumper.py +++ b/hexrd/core/config/dumper.py @@ -8,7 +8,7 @@ def _dict_path_by_id(d, value, path=()): return path elif isinstance(d, dict): for k, v in d.items(): - p = _dict_path_by_id(v, value, path + (k, )) + p = _dict_path_by_id(v, value, path + (k,)) if p is not None: return p elif isinstance(d, list): @@ -32,6 +32,7 @@ class NumPyIncludeDumper(yaml.Dumper): The ndarray would be saved in foo/bar.npy. """ + def __init__(self, stream, **kwargs): super().__init__(stream, **kwargs) @@ -58,5 +59,6 @@ def represent(self, data): return super().represent(data) -NumPyIncludeDumper.add_representer(np.ndarray, - NumPyIncludeDumper.ndarray_representer) +NumPyIncludeDumper.add_representer( + np.ndarray, NumPyIncludeDumper.ndarray_representer +) diff --git a/hexrd/config/imageseries.py b/hexrd/core/config/imageseries.py similarity index 92% rename from hexrd/config/imageseries.py rename to hexrd/core/config/imageseries.py index 63bffff07..eadae8210 100644 --- a/hexrd/config/imageseries.py +++ b/hexrd/core/config/imageseries.py @@ -1,7 +1,7 @@ from .config import Config -from hexrd import imageseries +from hexrd.core import imageseries -from hexrd.constants import shared_ims_key +from hexrd.core.constants import shared_ims_key class ImageSeries(Config): @@ -34,7 +34,7 @@ def imageseries(self): panel = '_'.join(panel) elif panel is None: panel = shared_ims_key - except(KeyError): + except KeyError: panel = shared_ims_key self._image_dict[panel] = oms diff --git a/hexrd/config/instrument.py b/hexrd/core/config/instrument.py similarity index 95% rename from hexrd/config/instrument.py rename to hexrd/core/config/instrument.py index 8f0a87daa..f284641df 100644 --- a/hexrd/config/instrument.py +++ b/hexrd/core/config/instrument.py @@ -4,7 +4,7 @@ from .config import Config from .loader import NumPyIncludeLoader -from hexrd import instrument +from hexrd.core import instrument class Instrument(Config): @@ -31,7 +31,7 @@ def hedm(self): try: icfg = h5py.File(self.configuration, 'r') - except(OSError): + except OSError: with open(self.configuration, 'r') as f: icfg = yaml.load(f, Loader=NumPyIncludeLoader) @@ -47,7 +47,7 @@ def hedm(self, icfg_fname): """Set the HEDMInstrument class.""" try: icfg = h5py.File(icfg_fname, 'r') - except(OSError): + except OSError: with open(icfg_fname, 'r') as f: icfg = yaml.load(f, Loader=NumPyIncludeLoader) diff --git a/hexrd/config/loader.py b/hexrd/core/config/loader.py similarity index 100% rename from hexrd/config/loader.py rename to hexrd/core/config/loader.py diff --git a/hexrd/config/material.py b/hexrd/core/config/material.py similarity index 92% rename from hexrd/config/material.py rename to hexrd/core/config/material.py index c55a985c6..bc84bd724 100644 --- a/hexrd/config/material.py +++ b/hexrd/core/config/material.py @@ -2,16 +2,16 @@ import numpy as np -from hexrd import material -from hexrd.constants import keVToAngstrom -from hexrd.valunits import valWUnit +from hexrd.core import material +from hexrd.core.constants import keVToAngstrom +from hexrd.core.valunits import valWUnit from .config import Config from .utils import get_exclusion_parameters -DMIN_DFLT = 0.5 # angstrom -TTHW_DFLT = 0.25 # degrees +DMIN_DFLT = 0.5 # angstrom +TTHW_DFLT = 0.25 # degrees class MaterialConfig(Config): @@ -30,9 +30,7 @@ def definitions(self): temp = os.path.join(self._cfg.working_dir, temp) if os.path.exists(temp): return temp - raise IOError( - f'"material:definitions": "{temp}" does not exist' - ) + raise IOError(f'"material:definitions": "{temp}" does not exist') @property def active(self): diff --git a/hexrd/config/root.py b/hexrd/core/config/root.py similarity index 88% rename from hexrd/config/root.py rename to hexrd/core/config/root.py index 0fca50ed7..ab75fb3b2 100644 --- a/hexrd/config/root.py +++ b/hexrd/core/config/root.py @@ -3,13 +3,15 @@ import logging import multiprocessing as mp -from hexrd.constants import shared_ims_key -from hexrd import imageseries +from hexrd.core.constants import shared_ims_key +from hexrd.core import imageseries from .config import Config from .instrument import Instrument -from .findorientations import FindOrientationsConfig -from .fitgrains import FitGrainsConfig + +# TODO: Resolve extra-core-dependency +from hexrd.hedm.config.findorientations import FindOrientationsConfig +from hexrd.hedm.config.fitgrains import FitGrainsConfig from .material import MaterialConfig logger = logging.getLogger('hexrd.config') @@ -67,8 +69,10 @@ def analysis_dir(self): @property def analysis_id(self): return '_'.join( - [self.analysis_name.strip().replace(' ', '-'), - self.material.active.strip().replace(' ', '-')] + [ + self.analysis_name.strip().replace(' ', '-'), + self.material.active.strip().replace(' ', '-'), + ] ) @property @@ -134,8 +138,9 @@ def multiprocessing(self): if multiproc > ncpus: logger.warning( 'Resuested %s processes, %d available', - multiproc, ncpus - ) + multiproc, + ncpus, + ) res = ncpus else: res = multiproc if multiproc else 1 @@ -144,17 +149,15 @@ def multiprocessing(self): if temp < 1: logger.warning( 'Cannot use less than 1 process, requested %d of %d', - temp, ncpus - ) + temp, + ncpus, + ) res = 1 else: res = temp else: temp = ncpus - 1 - logger.warning( - "Invalid value %s for multiprocessing", - multiproc - ) + logger.warning("Invalid value %s for multiprocessing", multiproc) res = temp return res @@ -163,13 +166,13 @@ def multiprocessing(self, val): isint = isinstance(val, int) if val in ('half', 'all', -1): self.set('multiprocessing', val) - elif (isint and val >= 0 and val <= mp.cpu_count()): + elif isint and val >= 0 and val <= mp.cpu_count(): self.set('multiprocessing', int(val)) else: raise RuntimeError( '"multiprocessing": must be 1:%d, got %s' % (mp.cpu_count(), val) - ) + ) @property def image_series(self): @@ -189,10 +192,10 @@ def image_series(self): panel = '_'.join(panel) elif panel is None: panel = shared_ims_key - except(KeyError): + except KeyError: try: panel = oms.metadata['panel'] - except(KeyError): + except KeyError: panel = shared_ims_key self._image_dict[panel] = oms diff --git a/hexrd/config/utils.py b/hexrd/core/config/utils.py similarity index 67% rename from hexrd/config/utils.py rename to hexrd/core/config/utils.py index e31322f1b..4733a6efe 100644 --- a/hexrd/config/utils.py +++ b/hexrd/core/config/utils.py @@ -4,12 +4,21 @@ ExclusionParameters = namedtuple( - 'ExclusionParameters', ["dmin", "dmax", "tthmin", "tthmax", - "sfacmin", "sfacmax", "pintmin", "pintmax"] + 'ExclusionParameters', + [ + "dmin", + "dmax", + "tthmin", + "tthmax", + "sfacmin", + "sfacmax", + "pintmin", + "pintmax", + ], ) -class Null(): +class Null: pass @@ -52,22 +61,22 @@ def get_exclusion_parameters(cfg, prefix): if sfmin_dflt is not None: warnings.warn( '"min_sfac_ratio" is deprecated, use "sfacmin" instead', - DeprecationWarning + DeprecationWarning, ) # Default for reset_exclusions is True so that old config files will # produce the same behavior. - reset_exclusions= cfg.get(yaml_key("reset_exclusions"), True) + reset_exclusions = cfg.get(yaml_key("reset_exclusions"), True) - return( + return ( reset_exclusions, ExclusionParameters( - dmin = cfg.get(yaml_key("dmin"), None), - dmax = cfg.get(yaml_key("dmax"), None), - tthmin = cfg.get(yaml_key("tthmin"), None), - tthmax = cfg.get(yaml_key("tthmax"), None), - sfacmin = cfg.get(yaml_key("sfacmin"), sfmin_dflt), - sfacmax = cfg.get(yaml_key("sfacmax"), None), - pintmin = cfg.get(yaml_key("pintmin"), None), - pintmax = cfg.get(yaml_key("pintmax"), None), - ) + dmin=cfg.get(yaml_key("dmin"), None), + dmax=cfg.get(yaml_key("dmax"), None), + tthmin=cfg.get(yaml_key("tthmin"), None), + tthmax=cfg.get(yaml_key("tthmax"), None), + sfacmin=cfg.get(yaml_key("sfacmin"), sfmin_dflt), + sfacmax=cfg.get(yaml_key("sfacmax"), None), + pintmin=cfg.get(yaml_key("pintmin"), None), + pintmax=cfg.get(yaml_key("pintmax"), None), + ), ) diff --git a/hexrd/core/constants.py b/hexrd/core/constants.py new file mode 100644 index 000000000..fab5b4561 --- /dev/null +++ b/hexrd/core/constants.py @@ -0,0 +1,4607 @@ +# -*- coding: utf-8 -*- +# ============================================================================= +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================================= + +# fmt: off -- Don't try to format this, it has a lot of special formatting. +from importlib.metadata import version, PackageNotFoundError +import multiprocessing as mp +import os +import platform + +import numpy as np + +from scipy import constants as scipyc + +# !!! for stetting mp start method +if 'Windows' in platform.system(): + mp_context = mp.get_context("spawn") +elif 'Darwin' in platform.system(): + mp_context = mp.get_context("spawn") +else: + mp_context = mp.get_context("fork") + +# pi related +pi = np.pi +piby2 = 0.5 * pi +piby3 = pi / 3.0 +piby4 = 0.25 * pi +piby6 = pi / 6.0 + +# misc radicals +sqrt2 = np.sqrt(2.0) +sqrt3 = np.sqrt(3.0) +sqrt3by2 = 0.5 * sqrt3 + +# fwhm +sigma_to_fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0)) +fwhm_to_sigma = 1.0 / sigma_to_fwhm + +# tolerancing +epsf = np.finfo(float).eps # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 +sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 + +# for angles +period_dict = {'degrees': 360.0, 'radians': 2 * pi} +angular_units = 'radians' # module-level angle units +d2r = pi / 180.0 +r2d = 180.0 / pi + +# identity arrays +identity_3x3 = np.eye(3) # (3, 3) identity +identity_6x1 = np.r_[1.0, 1.0, 1.0, 0.0, 0.0, 0.0] + +# basis vectors +lab_x = np.r_[1.0, 0.0, 0.0] # X in the lab frame +lab_y = np.r_[0.0, 1.0, 0.0] # Y in the lab frame +lab_z = np.r_[0.0, 0.0, 1.0] # Z in the lab frame + +zeros_3 = np.zeros(3) +zeros_3x1 = np.zeros((3, 1)) +zeros_6x1 = np.zeros((6, 1)) + +'''reference beam direction and +eta=0 ref in LAB FRAME for standard geometry''' +beam_vec = -lab_z +eta_vec = lab_x + +# change of basis matrix for the Fable-style sample frame to hexrd's +# !!! the fable sample frame has +# +x along the beam (hexrd -z), and +# +z up (hexrd +y) +# +# !!! both rot mat and qpm ((4, 4) for quat matrix mult) act on the left +# +# !!!: if using Midas/Fable orientations, be aware that the crystal frame +# is different as well! See hexrd.crystallography.latticeVectors. +fable_to_hexrd_cob_rmat = np.array( + [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]] +) + +fable_to_hexrd_cob_qpm = np.array( + [ + [0.5, 0.5, -0.5, -0.5], + [-0.5, 0.5, -0.5, 0.5], + [0.5, 0.5, 0.5, 0.5], + [0.5, -0.5, -0.5, 0.5], + ] +) + +# shared key for imageseries shared by multiple detectors (ROI's) +shared_ims_key = 'SHARED-IMAGES' + +""" +>> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, + saransh1@llnl.gov +>> @DATE: 10/19/2021 SS 1.0 original +>> @DETAILS: some constants for calculation of complementary error + and exponential integral functions. everything based + on rational approximants of the integral + +coefficients from pg. 415 Y. Luke, The special functions and their +approximations, vol 2 (1969) Elsevier +""" + +c_erf = np.array( + [ + 0.254829592, + -0.284496736, + 1.421413741, + -1.453152027, + 1.061405429, + 0.3275911, + ] +).astype(np.float64) + +c_coeff_exp1exp = np.array( + [ + 0.999999584, + -0.249992399, + 0.055514994, + -0.010315766, + 0.001535370, + -0.000142164, + ] +).astype(np.complex128) + +cnum_exp1exp = np.array( + [ + 1.0, + 99.0, + 3952.0, + 82544.0, + 979524.0, + 6712860.0, + 25815840.0, + 51369120.0, + 44339040.0, + 10628640.0, + 0.0, + ] +).astype(np.complex128) + +cden_exp1exp = np.array( + [ + 1.0, + 100.0, + 4050.0, + 86400.0, + 1058400.0, + 7620480.0, + 31752000.0, + 72576000.0, + 81648000.0, + 36288000.0, + 3628800.0, + ] +).astype(np.complex128) + +""" +>> @AUTHOR: Saransh Singh, + Lawrence Livermore National Lab, + saransh1@llnl.gov +>> @DATE: 11/28/2022 SS 1.0 original +>> @DETAILS: constants for rodrigues FZ +""" +FZtypeArray = np.array( + [ + 0, + 0, + 1, + 1, + 1, + 2, + 2, + 2, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 1, + 1, + 2, + 2, + 2, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 3, + 3, + 4, + 3, + 4, + ] +) + +FZorderArray = np.array( + [ + 0, + 0, + 2, + 2, + 2, + 2, + 2, + 2, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 3, + 3, + 3, + 3, + 3, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 0, + 0, + 0, + 0, + 0, + ] +) + +''' +>> @AUTHOR: Saransh Singh, + Lawrence Livermore National Lab, + saransh1@llnl.gov +>> @DATE: 10/28/2020 SS 1.0 original +>> @DETAILS: constants for sphere sectors used for IPF coloring +''' +# radius of homochoric sphere +hoR = (np.pi * 3.0 / 4.0) ** (1.0 / 3.0) + +# radius of homochoric sphere squared +hoR2 = (np.pi * 3.0 / 4.0) ** (2.0 / 3.0) + +# edge of cubochoric cube +cuA = np.pi ** (2.0 / 3.0) + +# semi-edge of cubochoric cube +cuA_2 = 0.5 * np.pi ** (2.0 / 3.0) + +Avol = (np.pi ** (5.0 / 6.0)) / (6 ** (1.0 / 6.0)) + +sc = Avol / cuA + +prek = 1.6434564029725040 + +pref = np.sqrt(6.0 / np.pi) + +tfit = np.array( + [ + 0.9999999999999968e0, + -0.49999999999986866e0, + -0.025000000000632055e0, + -0.003928571496460683e0, + -0.0008164666077062752e0, + -0.00019411896443261646e0, + -0.00004985822229871769e0, + -0.000014164962366386031e0, + -1.9000248160936107e-6, + -5.72184549898506e-6, + 7.772149920658778e-6, + -0.00001053483452909705e0, + 9.528014229335313e-6, + -5.660288876265125e-6, + 1.2844901692764126e-6, + 1.1255185726258763e-6, + -1.3834391419956455e-6, + 7.513691751164847e-7, + -2.401996891720091e-7, + 4.386887017466388e-8, + -3.5917775353564864e-9, + ] +) + +BP = np.array( + [ + 0.0, + 1.0, + 0.577350269189626, + 0.414213562373095, + 0.0, + 0.267949192431123, + 0.0, + 0.198912367379658, + 0.0, + 0.158384440324536, + 0.0, + 0.131652497587396, + ] +) + +# this is a constant which defines the sign of the +# cross-product in the quaternion multiplication rule +# we will set it to 1 for the standard rule always +pjik = 1 + +# sqrt 2 - 1 +tp_8 = np.sqrt(2.0) - 1.0 + +# 2 - sqrt(3) +tp_12 = 2.0 - np.sqrt(3.0) + +# for energy/wavelength conversions + + +def keVToAngstrom(x): + return (1e7 * scipyc.c * scipyc.h / scipyc.e) / np.array(x, dtype=float) + + +def _readenv(name, ctor, default): + try: + res = os.environ[name] + except KeyError: + return default + else: + try: + return ctor(res) + except: + import warnings + + warnings.warn( + "environ %s defined but failed to parse '%s'" % (name, res), + RuntimeWarning, + ) + del warnings + return default + + +del _readenv + + +def set_numba_cache(): + """Set the numba cache only if the following are true: + + 1. We are using numba - assumed true now + 2. We are on Windows + 3. We don't have write access to this file + 4. The NUMBA_CACHE_DIR environment variable is not defined + + If all of these are true, then numba will try to write to a + directory where it doesn't have permission, + and cause the application to + freeze. Avoid that by setting the cache dir ourselves. + """ + + if os.name != 'nt': + return + + def is_writable_file(path): + # Unfortunately, os.access(__file__, os.W_OK) doesn't work on Windows. + # It always returns True. + try: + with open(path, 'r+'): + return True + except Exception: + return False + + try: + if is_writable_file(__file__): + return + except NameError: + # Assume it's not writable + pass + + key = 'NUMBA_CACHE_DIR' + if key in os.environ: + return + + import appdirs + + value = appdirs.user_data_dir('HEXRD') + os.environ[key] = value + + # Must reload numba config + from numba.core.config import reload_config + + reload_config() + + +set_numba_cache() +del set_numba_cache + + +# some physical constants +cAvogadro = 6.02214076e23 # Avogadro's constant Na +cBoltzmann = 1.380649e-23 # Boltzmann's constant, K +cCharge = 1.602176634e-19 # charge of electron +cJ2eV = 1.602176565e-19 # joule to ev, JperkeV*1e-3 +cLight = ( + 299792458.0 # speed of light, same as c above but name is more descriptive +) +cMoment = 9.2740100707e-24 # magnetic moment of electron +cPermea = 1.2566370616e-6 # permeability of free space +cPermit = 8.8541878163e-12 # permittivity of free space +cPlanck = 6.62607015e-34 # same as h above but name is more descriptive +cRestmass = 9.1093837090e-31 # rest mass of electron +cClassicalelectronRad = 2.8179403e-6 # classical electron radius in nm +cRestmasskeV = 510.99895069 # rest mass of electron in keV + +''' +adding another parametrization of the +scattering factors. these are more recent +and more accurate. also used in Vesta +(copied from there). see: + +New Analytical coherent Scattering-Factor Functions for Free Atoms and Ions +BY D. WAASMAIER AND A. KIRFEL +Acta Cryst. (1995). A51,416-431 +''' +scatfac = { + 'H': [ + 0.413048, + 0.294953, + 0.187491, + 0.080701, + 0.023736, + 4.9e-05, + 15.569946, + 32.398468, + 5.711404, + 61.889874, + 1.334118, + ], + 'H1-': [ + 0.70226, + 0.763666, + 0.248678, + 0.261323, + 0.023017, + 0.000425, + 23.945604, + 74.897919, + 6.773289, + 233.58345, + 1.337531, + ], + 'He': [ + 0.732354, + 0.753896, + 0.283819, + 0.190003, + 0.039139, + 0.000487, + 11.553918, + 4.595831, + 1.546299, + 26.463964, + 0.377523, + ], + 'Li': [ + 0.974637, + 0.158472, + 0.811855, + 0.262416, + 0.790108, + 0.002542, + 4.334946, + 0.342451, + 97.102966, + 201.363831, + 1.409234, + ], + 'Li1+': [ + 0.432724, + 0.549257, + 0.376575, + -0.336481, + 0.97606, + 0.001764, + 0.260367, + 1.042836, + 7.885294, + 0.260368, + 3.042539, + ], + 'Be': [ + 1.533712, + 0.638283, + 0.601052, + 0.106139, + 1.118414, + 0.002511, + 42.662079, + 0.59542, + 99.106499, + 0.15134, + 1.843093, + ], + 'Be2+': [ + 3.05543, + -2.372617, + 1.044914, + 0.544233, + 0.381737, + -0.653773, + 0.001226, + 0.001227, + 1.542106, + 0.456279, + 4.047479, + ], + 'B': [ + 2.085185, + 1.06458, + 1.062788, + 0.140515, + 0.641784, + 0.003823, + 23.494068, + 1.137894, + 61.238976, + 0.114886, + 0.399036, + ], + 'C': [ + 2.657506, + 1.078079, + 1.490909, + -4.24107, + 0.713791, + 4.297983, + 14.780758, + 0.776775, + 42.086842, + -0.000294, + 0.239535, + ], + 'Cval': [ + 1.258489, + 0.728215, + 1.119856, + 2.168133, + 0.705239, + 0.019722, + 10.683769, + 0.208177, + 0.836097, + 24.603704, + 58.954273, + ], + 'N': [ + 11.89378, + 3.277479, + 1.858092, + 0.858927, + 0.912985, + -11.804902, + 0.000158, + 10.232723, + 30.34469, + 0.656065, + 0.217287, + ], + 'O': [ + 2.960427, + 2.508818, + 0.637853, + 0.722838, + 1.142756, + 0.027014, + 14.182259, + 5.936858, + 0.112726, + 34.958481, + 0.39024, + ], + 'O1-': [ + 3.106934, + 3.235142, + 1.148886, + 0.783981, + 0.676953, + 0.046136, + 19.86808, + 6.960252, + 0.170043, + 65.693512, + 0.630757, + ], + 'O2-': [ + 3.990247, + 2.300563, + 0.6072, + 1.907882, + 1.16708, + 0.025429, + 16.639956, + 5.636819, + 0.108493, + 47.299709, + 0.379984, + ], + 'F': [ + 3.511943, + 2.772244, + 0.678385, + 0.915159, + 1.089261, + 0.032557, + 10.687859, + 4.380466, + 0.093982, + 27.255203, + 0.313066, + ], + 'F1-': [ + 0.457649, + 3.841561, + 1.432771, + 0.801876, + 3.395041, + 0.069525, + 0.917243, + 5.507803, + 0.164955, + 51.076206, + 15.821679, + ], + 'Ne': [ + 4.183749, + 2.905726, + 0.520513, + 1.135641, + 1.228065, + 0.025576, + 8.175457, + 3.252536, + 0.063295, + 21.81391, + 0.224952, + ], + 'Na': [ + 4.910127, + 3.081783, + 1.262067, + 1.098938, + 0.560991, + 0.079712, + 3.281434, + 9.119178, + 0.102763, + 132.013947, + 0.405878, + ], + 'Na1+': [ + 3.14869, + 4.073989, + 0.767888, + 0.995612, + 0.968249, + 0.0453, + 2.594987, + 6.046925, + 0.070139, + 14.122657, + 0.217037, + ], + 'Mg': [ + 4.708971, + 1.194814, + 1.558157, + 1.170413, + 3.239403, + 0.126842, + 4.875207, + 108.506081, + 0.111516, + 48.292408, + 1.928171, + ], + 'Mg2+': [ + 3.062918, + 4.135106, + 0.853742, + 1.036792, + 0.85252, + 0.058851, + 2.015803, + 4.417941, + 0.065307, + 9.66971, + 0.187818, + ], + 'Al': [ + 4.730796, + 2.313951, + 1.54198, + 1.117564, + 3.154754, + 0.139509, + 3.628931, + 43.051167, + 0.09596, + 108.932388, + 1.555918, + ], + 'Al3+': [ + 4.132015, + 0.912049, + 1.102425, + 0.614876, + 3.219136, + 0.019397, + 3.528641, + 7.378344, + 0.133708, + 0.039065, + 1.644728, + ], + 'Si': [ + 5.275329, + 3.191038, + 1.511514, + 1.356849, + 2.519114, + 0.145073, + 2.631338, + 33.730728, + 0.081119, + 86.288643, + 1.170087, + ], + 'Siva': [ + 2.879033, + 3.07296, + 1.515981, + 1.39003, + 4.995051, + 0.14603, + 1.239713, + 38.706276, + 0.081481, + 93.616333, + 2.770293, + ], + 'Si4+': [ + 3.676722, + 3.828496, + 1.258033, + 0.419024, + 0.720421, + 0.097266, + 1.446851, + 3.013144, + 0.064397, + 0.206254, + 5.970222, + ], + 'P': [ + 1.950541, + 4.14693, + 1.49456, + 1.522042, + 5.729711, + 0.155233, + 0.908139, + 27.044952, + 0.07128, + 67.520187, + 1.981173, + ], + 'S': [ + 6.372157, + 5.154568, + 1.473732, + 1.635073, + 1.209372, + 0.154722, + 1.514347, + 22.092527, + 0.061373, + 55.445175, + 0.646925, + ], + 'Cl': [ + 1.446071, + 6.870609, + 6.151801, + 1.750347, + 0.634168, + 0.146773, + 0.052357, + 1.193165, + 18.343416, + 46.398396, + 0.401005, + ], + 'Cl1-': [ + 1.061802, + 7.139886, + 6.524271, + 2.355626, + 35.829403, + -34.916603, + 0.144727, + 1.171795, + 19.467655, + 60.320301, + 0.000436, + ], + 'Ar': [ + 7.188004, + 6.638454, + 0.45418, + 1.929593, + 1.523654, + 0.265954, + 0.956221, + 15.339877, + 15.339862, + 39.043823, + 0.062409, + ], + 'K': [ + 8.163991, + 7.146945, + 1.07014, + 0.877316, + 1.486434, + 0.253614, + 12.816323, + 0.808945, + 210.327011, + 39.597652, + 0.052821, + ], + 'K1+': [ + -17.609339, + 1.494873, + 7.150305, + 10.899569, + 15.808228, + 0.257164, + 18.840979, + 0.053453, + 0.81294, + 22.264105, + 14.351593, + ], + 'Ca': [ + 8.593655, + 1.477324, + 1.436254, + 1.182839, + 7.113258, + 0.196255, + 10.460644, + 0.041891, + 81.390381, + 169.847839, + 0.688098, + ], + 'Ca2+': [ + 8.501441, + 12.880483, + 9.765095, + 7.156669, + 0.71116, + -21.013187, + 10.525848, + -0.004033, + 0.010692, + 0.684443, + 27.231771, + ], + 'Sc': [ + 1.476566, + 1.487278, + 1.600187, + 9.177463, + 7.09975, + 0.157765, + 53.131023, + 0.035325, + 137.319489, + 9.098031, + 0.602102, + ], + 'Sc3+': [ + 7.104348, + 1.511488, + -53.669773, + 38.404816, + 24.53224, + 0.118642, + 0.601957, + 0.033386, + 12.572138, + 10.859736, + 14.12523, + ], + 'Ti': [ + 9.818524, + 1.522646, + 1.703101, + 1.768774, + 7.082555, + 0.102473, + 8.001879, + 0.029763, + 39.885422, + 120.157997, + 0.532405, + ], + 'Ti2+': [ + 7.040119, + 1.496285, + 9.657304, + 0.006534, + 1.649561, + 0.150362, + 0.537072, + 0.031914, + 8.009958, + 201.800293, + 24.039482, + ], + 'Ti3+': [ + 36.587933, + 7.230255, + -9.086077, + 2.084594, + 17.294008, + -35.111282, + 0.000681, + 0.522262, + 5.262317, + 15.881716, + 6.149805, + ], + 'Ti4+': [ + 45.355537, + 7.0929, + 7.483858, + -43.498817, + 1.678915, + -0.110628, + 9.252186, + 0.523046, + 13.082852, + 10.193876, + 0.023064, + ], + 'V': [ + 10.473575, + 1.547881, + 1.986381, + 1.865616, + 7.05625, + 0.067744, + 7.08194, + 0.02604, + 31.909672, + 108.022842, + 0.474882, + ], + 'V2+': [ + 7.754356, + 2.0641, + 2.576998, + 2.011404, + 7.126177, + -0.533379, + 7.066315, + 0.014993, + 7.066308, + 22.055786, + 0.467568, + ], + 'V3+': [ + 9.95848, + 1.59635, + 1.483442, + -10.846044, + 17.332867, + 0.474921, + 6.763041, + 0.056895, + 17.750029, + 0.328826, + 0.388013, + ], + 'V5+': [ + 15.575018, + 8.448095, + 1.61204, + -9.721855, + 1.534029, + 0.552676, + 0.682708, + 5.56664, + 10.527077, + 0.907961, + 0.066667, + ], + 'Cr': [ + 11.007069, + 1.555477, + 2.985293, + 1.347855, + 7.034779, + 0.06551, + 6.366281, + 0.023987, + 23.244839, + 105.774498, + 0.429369, + ], + 'Cr2+': [ + 10.598877, + 1.565858, + 2.72828, + 0.098064, + 6.959321, + 0.04987, + 6.151846, + 0.023519, + 17.432816, + 54.002388, + 0.426301, + ], + 'Cr3+': [ + 7.98931, + 1.765079, + 2.627125, + 1.82938, + 6.980908, + -0.192123, + 6.068867, + 0.018342, + 6.068887, + 16.309284, + 0.420864, + ], + 'Mn': [ + 11.709542, + 1.733414, + 2.673141, + 2.023368, + 7.00318, + -0.147293, + 5.59712, + 0.0178, + 21.78842, + 89.517914, + 0.383054, + ], + 'Mn2+': [ + 11.287712, + 26.042414, + 3.058096, + 0.090258, + 7.088306, + -24.566132, + 5.506225, + 0.000774, + 16.158575, + 54.766354, + 0.37558, + ], + 'Mn3+': [ + 6.926972, + 2.081342, + 11.128379, + 2.375107, + -0.419287, + -0.093713, + 0.378315, + 0.015054, + 5.379957, + 14.429586, + 0.004939, + ], + 'Mn4+': [ + 12.409131, + 7.466993, + 1.809947, + -12.138477, + 10.780248, + 0.672146, + 0.3004, + 0.112814, + 12.520756, + 0.168653, + 5.173237, + ], + 'Fe': [ + 12.311098, + 1.876623, + 3.066177, + 2.070451, + 6.975185, + -0.304931, + 5.009415, + 0.014461, + 18.74304, + 82.767876, + 0.346506, + ], + 'Fe2+': [ + 11.776765, + 11.165097, + 3.533495, + 0.165345, + 7.036932, + -9.676919, + 4.912232, + 0.001748, + 14.166556, + 42.381958, + 0.341324, + ], + 'Fe3+': [ + 9.721638, + 63.403847, + 2.141347, + 2.629274, + 7.033846, + -61.930725, + 4.869297, + 0.000293, + 4.867602, + 13.539076, + 0.33852, + ], + 'Co': [ + 12.91451, + 2.481908, + 3.466894, + 2.106351, + 6.960892, + -0.936572, + 4.507138, + 0.009126, + 16.438129, + 76.98732, + 0.314418, + ], + 'Co2+': [ + 6.99384, + 26.285812, + 12.254289, + 0.246114, + 4.017407, + -24.796852, + 0.310779, + 0.000684, + 4.400528, + 35.741447, + 12.536393, + ], + 'Co3+': [ + 6.861739, + 2.67857, + 12.281889, + 3.501741, + -0.179384, + -1.147345, + 0.309794, + 0.008142, + 4.331703, + 11.914167, + 11.914167, + ], + 'Ni': [ + 13.521865, + 6.947285, + 3.866028, + 2.1359, + 4.284731, + -2.762697, + 4.077277, + 0.286763, + 14.622634, + 71.96608, + 0.004437, + ], + 'Ni2+': [ + 12.519017, + 37.832058, + 4.387257, + 0.661552, + 6.949072, + -36.344471, + 3.933053, + 0.000442, + 10.449184, + 23.860998, + 0.283723, + ], + 'Ni3+': [ + 13.579366, + 1.902844, + 12.859268, + 3.811005, + -6.838595, + -0.317618, + 0.31314, + 0.012621, + 3.906407, + 10.894311, + 0.344379, + ], + 'Cu': [ + 14.014192, + 4.784577, + 5.056806, + 1.457971, + 6.932996, + -3.254477, + 3.73828, + 0.003744, + 13.034982, + 72.554794, + 0.265666, + ], + 'Cu1+': [ + 12.960763, + 16.34215, + 1.110102, + 5.520682, + 6.915452, + -14.84932, + 3.57601, + 0.000975, + 29.523218, + 10.114283, + 0.261326, + ], + 'Cu2+': [ + 11.895569, + 16.344978, + 5.799817, + 1.048804, + 6.789088, + -14.878383, + 3.378519, + 0.000924, + 8.133653, + 20.526524, + 0.254741, + ], + 'Zn': [ + 14.741002, + 6.907748, + 4.642337, + 2.191766, + 38.424042, + -36.915829, + 3.388232, + 0.243315, + 11.903689, + 63.31213, + 0.000397, + ], + 'Zn2+': [ + 13.340772, + 10.428857, + 5.544489, + 0.762295, + 6.869172, + -8.945248, + 3.215913, + 0.001413, + 8.54268, + 21.891756, + 0.239215, + ], + 'Ga': [ + 15.758946, + 6.841123, + 4.121016, + 2.714681, + 2.395246, + -0.847395, + 3.121754, + 0.226057, + 12.482196, + 66.203621, + 0.007238, + ], + 'Ga3+': [ + 13.123875, + 35.288189, + 6.126979, + 0.611551, + 6.724807, + -33.875122, + 2.80996, + 0.000323, + 6.831534, + 16.784311, + 0.212002, + ], + 'Ge': [ + 16.540613, + 1.5679, + 3.727829, + 3.345098, + 6.785079, + 0.018726, + 2.866618, + 0.012198, + 13.432163, + 58.866047, + 0.210974, + ], + 'Ge4+': [ + 6.876636, + 6.779091, + 9.969591, + 3.135857, + 0.152389, + 1.086542, + 2.025174, + 0.17665, + 3.573822, + 7.685848, + 16.677574, + ], + 'As': [ + 17.025642, + 4.503441, + 3.715904, + 3.9372, + 6.790175, + -2.984117, + 2.597739, + 0.003012, + 14.272119, + 50.437996, + 0.193015, + ], + 'Se': [ + 17.354071, + 4.653248, + 4.259489, + 4.136455, + 6.749163, + -3.160982, + 2.349787, + 0.00255, + 15.57946, + 45.181202, + 0.177432, + ], + 'Br': [ + 17.55057, + 5.411882, + 3.93718, + 3.880645, + 6.707793, + -2.492088, + 2.119226, + 16.557184, + 0.002481, + 42.164009, + 0.162121, + ], + 'Br1-': [ + 17.71431, + 6.466926, + 6.947385, + 4.402674, + -0.697279, + 1.152674, + 2.122554, + 19.050768, + 0.152708, + 58.690361, + 58.690372, + ], + 'Kr': [ + 17.655279, + 6.848105, + 4.171004, + 3.44676, + 6.6852, + -2.810592, + 1.908231, + 16.606236, + 0.001598, + 39.917473, + 0.146896, + ], + 'Rb': [ + 8.123134, + 2.138042, + 6.761702, + 1.156051, + 17.679546, + 1.139548, + 15.142385, + 33.542667, + 0.129372, + 224.132507, + 1.713368, + ], + 'Rb1+': [ + 17.68432, + 7.761588, + 6.680874, + 2.668883, + 0.070974, + 1.133263, + 1.710209, + 14.919863, + 0.128542, + 31.654478, + 0.128543, + ], + 'Sr': [ + 17.730219, + 9.795867, + 6.099763, + 2.620025, + 0.600053, + 1.140251, + 1.56306, + 14.310868, + 0.120574, + 135.771317, + 0.120574, + ], + 'Sr2+': [ + 17.694973, + 1.275762, + 6.154252, + 9.234786, + 0.515995, + 1.125309, + 1.550888, + 30.133041, + 0.118774, + 13.821799, + 0.118774, + ], + 'Y': [ + 17.79204, + 10.253252, + 5.714949, + 3.170516, + 0.918251, + 1.131787, + 1.429691, + 13.132816, + 0.112173, + 108.197029, + 0.112173, + ], + 'Zr': [ + 17.859772, + 10.911038, + 5.821115, + 3.512513, + 0.746965, + 1.124859, + 1.310692, + 12.319285, + 0.104353, + 91.777542, + 0.104353, + ], + 'Zr4+': [ + 6.802956, + 17.699253, + 10.650647, + -0.248108, + 0.250338, + 0.827902, + 0.096228, + 1.296127, + 11.240715, + -0.219259, + -0.219021, + ], + 'Nb': [ + 17.958399, + 12.063054, + 5.007015, + 3.287667, + 1.531019, + 1.123452, + 1.21159, + 12.246687, + 0.098615, + 75.011948, + 0.098615, + ], + 'Nb3+': [ + 17.714323, + 1.675213, + 7.483963, + 8.322464, + 11.143573, + -8.339573, + 1.172419, + 30.102791, + 0.080255, + -0.002983, + 10.456687, + ], + 'Nb5+': [ + 17.580206, + 7.633277, + 10.793497, + 0.180884, + 67.837921, + -68.02478, + 1.165852, + 0.078558, + 9.507652, + 31.621656, + -0.000438, + ], + 'Mo': [ + 6.236218, + 17.987711, + 12.973127, + 3.451426, + 0.210899, + 1.10877, + 0.09078, + 1.10831, + 11.46872, + 66.684151, + 0.09078, + ], + 'Mo3+': [ + 7.44705, + 17.778122, + 11.886068, + 1.997905, + 1.789626, + -1.898764, + 0.072, + 1.073145, + 9.83472, + 28.221746, + -0.011674, + ], + 'Mo5+': [ + 7.929879, + 17.667669, + 11.515987, + 0.500402, + 77.444084, + -78.056595, + 0.068856, + 1.068064, + 9.046229, + 26.558945, + -0.000473, + ], + 'Mo6+': [ + 34.757683, + 9.653037, + 6.584769, + -18.628115, + 2.490594, + 1.141916, + 1.30177, + 7.123843, + 0.094097, + 1.617443, + 12.335434, + ], + 'Tc': [ + 17.840963, + 3.428236, + 1.373012, + 12.947364, + 6.335469, + 1.074784, + 1.005729, + 41.901382, + 119.320541, + 9.781542, + 0.083391, + ], + 'Ru': [ + 6.271624, + 17.906738, + 14.123269, + 3.746008, + 0.908235, + 1.043992, + 0.07704, + 0.928222, + 9.555345, + 35.86068, + 123.552246, + ], + 'Ru3+': [ + 17.894758, + 13.579529, + 10.729251, + 2.474095, + 48.227997, + -51.905243, + 0.902827, + 8.740579, + 0.045125, + 24.764954, + -0.001699, + ], + 'Ru4+': [ + 17.845776, + 13.455084, + 10.229087, + 1.653524, + 14.059795, + -17.241762, + 0.90107, + 8.482392, + 0.045972, + 23.015272, + -0.004889, + ], + 'Rh': [ + 6.216648, + 17.919739, + 3.854252, + 0.840326, + 15.173498, + 0.995452, + 0.070789, + 0.856121, + 33.889484, + 121.686691, + 9.029517, + ], + 'Rh3+': [ + 17.758621, + 14.569813, + 5.29832, + 2.533579, + 0.879753, + 0.960843, + 0.841779, + 8.319533, + 0.06905, + 23.709131, + 0.06905, + ], + 'Rh4+': [ + 17.716188, + 14.446654, + 5.185801, + 1.703448, + 0.989992, + 0.959941, + 0.840572, + 8.100647, + 0.068995, + 22.357307, + 0.068995, + ], + 'Pd': [ + 6.121511, + 4.784063, + 16.631683, + 4.318258, + 13.246773, + 0.883099, + 0.062549, + 0.784031, + 8.751391, + 34.489983, + 0.784031, + ], + 'Pd2+': [ + 6.122282, + 15.651012, + 3.513508, + 9.06079, + 8.771199, + 0.879336, + 0.062424, + 8.018296, + 24.784275, + 0.776457, + 0.776457, + ], + 'Pd4+': [ + 6.152421, + -96.069023, + 31.622141, + 81.578255, + 17.801403, + 0.915874, + 0.063951, + 11.090354, + 13.466152, + 9.758302, + 0.783014, + ], + 'Ag': [ + 6.073874, + 17.155437, + 4.173344, + 0.852238, + 17.988686, + 0.756603, + 0.055333, + 7.896512, + 28.443739, + 110.376106, + 0.716809, + ], + 'Ag1+': [ + 6.091192, + 4.019526, + 16.948174, + 4.258638, + 13.889437, + 0.785127, + 0.056305, + 0.71934, + 7.758938, + 27.368349, + 0.71934, + ], + 'Ag2+': [ + 6.401808, + 48.699802, + 4.799859, + -32.332523, + 16.35671, + 1.068247, + 0.068167, + 0.94227, + 20.639496, + 1.100365, + 6.883131, + ], + 'Cd': [ + 6.080986, + 18.019468, + 4.018197, + 1.30351, + 17.974669, + 0.603504, + 0.04899, + 7.273646, + 29.119284, + 95.831207, + 0.661231, + ], + 'Cd2+': [ + 6.093711, + 43.909691, + 17.041306, + -39.675117, + 17.958918, + 0.664795, + 0.050624, + 8.654143, + 15.621396, + 11.082067, + 0.667591, + ], + 'In': [ + 6.196477, + 18.816183, + 4.050479, + 1.638929, + 17.962912, + 0.333097, + 0.042072, + 6.695665, + 31.00979, + 103.284348, + 0.610714, + ], + 'In3+': [ + 6.206277, + 18.497746, + 3.078131, + 10.524613, + 7.401234, + 0.293677, + 0.041357, + 6.605563, + 18.79225, + 0.608082, + 0.608082, + ], + 'Sn': [ + 19.325171, + 6.281571, + 4.498866, + 1.856934, + 17.917318, + 0.119024, + 6.118104, + 0.036915, + 32.529045, + 95.037186, + 0.565651, + ], + 'Sn2+': [ + 6.353672, + 4.770377, + 14.672025, + 4.235959, + 18.002131, + -0.042519, + 0.03472, + 6.167891, + 6.167879, + 29.006456, + 0.561774, + ], + 'Sn4+': [ + 15.445732, + 6.420892, + 4.56298, + 1.713385, + 18.033537, + -0.172219, + 6.280898, + 0.033144, + 6.280899, + 17.983601, + 0.55798, + ], + 'Sb': [ + 5.394956, + 6.54957, + 19.650681, + 1.82782, + 17.867832, + -0.290506, + 33.326523, + 0.030974, + 5.564929, + 87.130966, + 0.523992, + ], + 'Sb3+': [ + 10.189171, + 57.461918, + 19.356573, + 4.862206, + -45.394096, + 1.516108, + 0.089485, + 0.375256, + 5.357987, + 22.153736, + 0.297768, + ], + 'Sb5+': [ + 17.920622, + 6.647932, + 12.724075, + 1.555545, + 7.600591, + -0.445371, + 0.522315, + 0.029487, + 5.71821, + 16.433775, + 5.718204, + ], + 'Te': [ + 6.660302, + 6.940756, + 19.847015, + 1.557175, + 17.802427, + -0.806668, + 33.031654, + 0.02575, + 5.065547, + 84.101616, + 0.48766, + ], + 'I': [ + 19.884502, + 6.736593, + 8.110516, + 1.170953, + 17.548716, + -0.448811, + 4.628591, + 0.027754, + 31.849096, + 84.406387, + 0.46355, + ], + 'I1-': [ + 20.01033, + 17.835524, + 8.10413, + 2.231118, + 9.158548, + -3.341004, + 4.565931, + 0.444266, + 32.430672, + 95.14904, + 0.014906, + ], + 'Xe': [ + 19.97892, + 11.774945, + 9.332182, + 1.244749, + 17.737501, + -6.065902, + 4.143356, + 0.010142, + 28.7962, + 75.280685, + 0.413616, + ], + 'Cs': [ + 17.418674, + 8.314444, + 10.323193, + 1.383834, + 19.876251, + -2.322802, + 0.399828, + 0.016872, + 25.605827, + 233.339676, + 3.826915, + ], + 'Cs1+': [ + 19.939056, + 24.967621, + 10.375884, + 0.454243, + 17.660248, + -19.394306, + 3.770511, + 0.00404, + 25.311275, + 76.537766, + 0.38473, + ], + 'Ba': [ + 19.747343, + 17.368477, + 10.465718, + 2.592602, + 11.003653, + -5.183497, + 3.481823, + 0.371224, + 21.226641, + 173.834274, + 0.010719, + ], + 'Ba2+': [ + 19.7502, + 17.513683, + 10.884892, + 0.321585, + 65.149834, + -59.618172, + 3.430748, + 0.36159, + 21.358307, + 70.309402, + 0.001418, + ], + 'La': [ + 19.966019, + 27.329655, + 11.018425, + 3.086696, + 17.335455, + -21.745489, + 3.197408, + 0.003446, + 19.955492, + 141.381973, + 0.341817, + ], + 'La3+': [ + 19.688887, + 17.345703, + 11.356296, + 0.099418, + 82.358124, + -76.846909, + 3.146211, + 0.339586, + 18.753832, + 90.345459, + 0.001072, + ], + 'Ce': [ + 17.355122, + 43.988499, + 20.54665, + 3.13067, + 11.353665, + -38.386017, + 0.328369, + 0.002047, + 3.088196, + 134.907654, + 18.83296, + ], + 'Ce3+': [ + 26.593231, + 85.866432, + -6.677695, + 12.111847, + 17.401903, + -80.313423, + 3.280381, + 0.001012, + 4.313575, + 17.868504, + 0.326962, + ], + 'Ce4+': [ + 17.457533, + 25.659941, + 11.691037, + 19.695251, + -16.994749, + -3.515096, + 0.311812, + -0.003793, + 16.568687, + 2.886395, + -0.008931, + ], + 'Pr': [ + 21.551311, + 17.16173, + 11.903859, + 2.679103, + 9.564197, + -3.871068, + 2.995675, + 0.312491, + 17.716705, + 152.192825, + 0.010468, + ], + 'Pr3+': [ + 20.879841, + 36.035797, + 12.135341, + 0.283103, + 17.167803, + -30.500784, + 2.870897, + 0.002364, + 16.615236, + 53.909359, + 0.306993, + ], + 'Pr4+': [ + 17.496082, + 21.538509, + 20.403114, + 12.062211, + -7.492043, + -9.016722, + 0.294457, + -0.002742, + 2.772886, + 15.804613, + -0.013556, + ], + 'Nd': [ + 17.331244, + 62.783924, + 12.160097, + 2.663483, + 22.23995, + -57.189842, + 0.300269, + 0.00132, + 17.026001, + 148.748993, + 2.910268, + ], + 'Nd3+': [ + 17.120077, + 56.038139, + 21.468307, + 10.000671, + 2.905866, + -50.541992, + 0.291295, + 0.001421, + 2.743681, + 14.581367, + 22.485098, + ], + 'Pm': [ + 17.286388, + 51.560162, + 12.478557, + 2.675515, + 22.960947, + -45.973682, + 0.28662, + 0.00155, + 16.223755, + 143.984512, + 2.79648, + ], + 'Pm3+': [ + 22.221066, + 17.068142, + 12.805423, + 0.435687, + 52.23877, + -46.767181, + 2.635767, + 0.277039, + 14.927315, + 45.768017, + 0.001455, + ], + 'Sm': [ + 23.700363, + 23.072214, + 12.777782, + 2.684217, + 17.204367, + -17.452166, + 2.689539, + 0.003491, + 15.495437, + 139.862473, + 0.274536, + ], + 'Sm3+': [ + 15.618565, + 19.538092, + 13.398946, + -4.358811, + 24.490461, + -9.714854, + 0.006001, + 0.306379, + 14.979594, + 0.748825, + 2.454492, + ], + 'Eu': [ + 17.186195, + 37.156837, + 13.103387, + 2.707246, + 24.419271, + -31.586687, + 0.261678, + 0.001995, + 14.78736, + 134.816299, + 2.581883, + ], + 'Eu2+': [ + 23.899035, + 31.657497, + 12.955752, + 1.700576, + 16.992199, + -26.204315, + 2.467332, + 0.00223, + 13.625002, + 35.089481, + 0.253136, + ], + 'Eu3+': [ + 17.758327, + 33.498665, + 24.067188, + 13.436883, + -9.019134, + -19.768026, + 0.244474, + -0.003901, + 2.487526, + 14.568011, + -0.015628, + ], + 'Gd': [ + 24.898117, + 17.104952, + 13.222581, + 3.266152, + 48.995213, + -43.505684, + 2.435028, + 0.246961, + 13.996325, + 110.863091, + 0.001383, + ], + 'Gd3+': [ + 24.344999, + 16.945311, + 13.866931, + 0.481674, + 93.506378, + -88.147179, + 2.333971, + 0.239215, + 12.982995, + 43.876347, + 0.000673, + ], + 'Tb': [ + 25.910013, + 32.344139, + 13.765117, + 2.751404, + 17.064405, + -26.851971, + 2.373912, + 0.002034, + 13.481969, + 125.83651, + 0.236916, + ], + 'Tb3+': [ + 24.878252, + 16.856016, + 13.663937, + 1.279671, + 39.271294, + -33.950317, + 2.223301, + 0.22729, + 11.812528, + 29.910065, + 0.001527, + ], + 'Dy': [ + 26.671785, + 88.687576, + 14.065445, + 2.768497, + 17.067781, + -83.279831, + 2.282593, + 0.000665, + 12.92023, + 121.937187, + 0.225531, + ], + 'Dy3+': [ + 16.864344, + 90.383461, + 13.675473, + 1.687078, + 25.540651, + -85.15065, + 0.216275, + 0.000593, + 11.121207, + 26.250975, + 2.13593, + ], + 'Ho': [ + 27.15019, + 16.999819, + 14.059334, + 3.386979, + 46.546471, + -41.165253, + 2.16966, + 0.215414, + 12.213148, + 100.506783, + 0.001211, + ], + 'Ho3+': [ + 16.837524, + 63.221336, + 13.703766, + 2.061602, + 26.202621, + -58.026505, + 0.206873, + 0.000796, + 10.500283, + 24.031883, + 2.05506, + ], + 'Er': [ + 28.174887, + 82.493271, + 14.624002, + 2.802756, + 17.018515, + -77.135223, + 2.120995, + 0.00064, + 11.915256, + 114.529938, + 0.207519, + ], + 'Er3+': [ + 16.810127, + 22.681061, + 13.864114, + 2.294506, + 26.864477, + -17.51346, + 0.198293, + 0.002126, + 9.973341, + 22.836388, + 1.979442, + ], + 'Tm': [ + 28.925894, + 76.173798, + 14.904704, + 2.814812, + 16.998117, + -70.839813, + 2.046203, + 0.000656, + 11.465375, + 111.41198, + 0.199376, + ], + 'Tm3+': [ + 16.7875, + 15.350905, + 14.182357, + 2.299111, + 27.573771, + -10.192087, + 0.190852, + 0.003036, + 9.602934, + 22.52688, + 1.912862, + ], + 'Yb': [ + 29.67676, + 65.624069, + 15.160854, + 2.830288, + 16.99785, + -60.313812, + 1.97763, + 0.00072, + 11.044622, + 108.139153, + 0.19211, + ], + 'Yb2+': [ + 28.443794, + 16.849527, + 14.165081, + 3.445311, + 28.308853, + -23.214935, + 1.863896, + 0.183811, + 9.225469, + 23.691355, + 0.001463, + ], + 'Yb3+': [ + 28.191629, + 16.828087, + 14.167848, + 2.744962, + 23.171774, + -18.103676, + 1.842889, + 0.182788, + 9.045957, + 20.799847, + 0.001759, + ], + 'Lu': [ + 30.122866, + 15.099346, + 56.314899, + 3.54098, + 16.943729, + -51.049416, + 1.88309, + 10.342764, + 0.00078, + 89.55925, + 0.183849, + ], + 'Lu3+': [ + 28.828693, + 16.823227, + 14.247617, + 3.079559, + 25.647667, + -20.626528, + 1.776641, + 0.17556, + 8.575531, + 19.693701, + 0.001453, + ], + 'Hf': [ + 30.617033, + 15.145351, + 54.933548, + 4.096253, + 16.896156, + -49.719837, + 1.795613, + 9.934469, + 0.000739, + 76.189705, + 0.175914, + ], + 'Hf4+': [ + 29.267378, + 16.792543, + 14.78531, + 2.184128, + 23.791996, + -18.820383, + 1.697911, + 0.168313, + 8.190025, + 18.277578, + 0.001431, + ], + 'Ta': [ + 31.066359, + 15.341823, + 49.278297, + 4.577665, + 16.828321, + -44.119026, + 1.708732, + 9.618455, + 0.00076, + 66.346199, + 0.168002, + ], + 'Ta5+': [ + 29.539469, + 16.741854, + 15.18207, + 1.642916, + 16.437447, + -11.542459, + 1.612934, + 0.16046, + 7.654408, + 17.070732, + 0.001858, + ], + 'W': [ + 31.5079, + 15.682498, + 37.960129, + 4.885509, + 16.792112, + -32.864574, + 1.629485, + 9.446448, + 0.000898, + 59.980675, + 0.160798, + ], + 'W6+': [ + 29.729357, + 17.247808, + 15.184488, + 1.154652, + 0.739335, + 3.945157, + 1.501648, + 0.140803, + 6.880573, + 14.299601, + 14.299618, + ], + 'Re': [ + 31.888456, + 16.117104, + 42.390297, + 5.211669, + 16.767591, + -37.412682, + 1.549238, + 9.233474, + 0.000689, + 54.516373, + 0.152815, + ], + 'Os': [ + 32.210297, + 16.67844, + 48.559906, + 5.455839, + 16.735533, + -43.677956, + 1.473531, + 9.049695, + 0.000519, + 50.210201, + 0.145771, + ], + 'Os4+': [ + 17.113485, + 15.79237, + 23.342392, + 4.090271, + 7.671292, + 3.98839, + 0.13185, + 7.288542, + 1.389307, + 19.629425, + 1.389307, + ], + 'Ir': [ + 32.004436, + 1.975454, + 17.070105, + 15.939454, + 5.990003, + 4.018893, + 1.353767, + 81.014175, + 0.128093, + 7.661196, + 26.659403, + ], + 'Ir3+': [ + 31.537575, + 16.363338, + 15.597141, + 5.051404, + 1.436935, + 4.009459, + 1.334144, + 7.451918, + 0.127514, + 21.705648, + 0.127515, + ], + 'Ir4+': [ + 30.391249, + 16.146996, + 17.019068, + 4.458904, + 0.975372, + 4.006865, + 1.328519, + 7.181766, + 0.127337, + 19.060146, + 1.328519, + ], + 'Pt': [ + 31.273891, + 18.44544, + 17.063745, + 5.555933, + 1.57527, + 4.050394, + 1.316992, + 8.797154, + 0.124741, + 40.177994, + 1.316997, + ], + 'Pt2+': [ + 31.986849, + 17.249048, + 15.269374, + 5.760234, + 1.694079, + 4.032512, + 1.281143, + 7.625512, + 0.123571, + 24.190826, + 0.123571, + ], + 'Pt4+': [ + 41.932713, + 16.339224, + 17.653894, + 6.01242, + -12.036877, + 4.094551, + 1.111409, + 6.466086, + 0.128917, + 16.954155, + 0.778721, + ], + 'Au': [ + 16.77739, + 19.317156, + 32.979683, + 5.595453, + 10.576854, + -6.279078, + 0.122737, + 8.62157, + 1.256902, + 38.00882, + 0.000601, + ], + 'Au1+': [ + 32.124306, + 16.716476, + 16.8141, + 7.311565, + 0.993064, + 4.040792, + 1.216073, + 7.165378, + 0.118715, + 20.442486, + 53.095985, + ], + 'Au3+': [ + 31.704271, + 17.545767, + 16.819551, + 5.52264, + 0.361725, + 4.042679, + 1.215561, + 7.220506, + 0.118812, + 20.05097, + 1.215562, + ], + 'Hg': [ + 16.83989, + 20.023823, + 28.428564, + 5.881564, + 4.714706, + 4.076478, + 0.115905, + 8.256927, + 1.19525, + 39.247227, + 1.19525, + ], + 'Hg1+': [ + 28.866837, + 19.27754, + 16.776051, + 6.281459, + 3.710289, + 4.06843, + 1.173967, + 7.583842, + 0.115351, + 29.055994, + 1.173968, + ], + 'Hg2+': [ + 32.411079, + 18.690371, + 16.711773, + 9.974835, + -3.847611, + 4.052869, + 1.16298, + 7.329806, + 0.114518, + 22.009489, + 22.009493, + ], + 'Tl': [ + 16.630795, + 19.386616, + 32.808571, + 1.747191, + 6.356862, + 4.066939, + 0.110704, + 7.181401, + 1.11973, + 90.660263, + 26.014978, + ], + 'Tl1+': [ + 32.295044, + 16.570049, + 17.991013, + 1.535355, + 7.554591, + 4.05403, + 1.101544, + 0.11002, + 6.528559, + 52.495068, + 20.338634, + ], + 'Tl3+': [ + 32.525639, + 19.139185, + 17.100321, + 5.891115, + 12.599463, + -9.256075, + 1.094966, + 6.900992, + 0.103667, + 18.489614, + -0.001401, + ], + 'Pb': [ + 16.419567, + 32.73859, + 6.530247, + 2.342742, + 19.916475, + 4.049824, + 0.105499, + 1.055049, + 25.02589, + 80.906593, + 6.664449, + ], + 'Pb2+': [ + 27.392647, + 16.496822, + 19.984501, + 6.813923, + 5.23391, + 4.065623, + 1.058874, + 0.106305, + 6.708123, + 24.395554, + 1.058874, + ], + 'Pb4+': [ + 32.505657, + 20.01424, + 14.645661, + 5.029499, + 1.760138, + 4.044678, + 1.047035, + 6.670321, + 0.105279, + 16.52504, + 0.105279, + ], + 'Bi': [ + 16.282274, + 32.725136, + 6.678302, + 2.69475, + 20.576559, + 4.040914, + 0.10118, + 1.002287, + 25.714146, + 77.057549, + 6.291882, + ], + 'Bi3+': [ + 32.461437, + 19.438683, + 16.302486, + 7.322662, + 0.431704, + 4.043703, + 0.99793, + 6.038867, + 0.101338, + 18.371586, + 46.361046, + ], + 'Bi5+': [ + 16.734028, + 20.580494, + 9.452623, + 61.155834, + -34.041023, + 4.113663, + 0.105076, + 4.773282, + 11.762162, + 1.211775, + 1.619408, + ], + 'Po': [ + 16.289164, + 32.807171, + 21.095163, + 2.505901, + 7.254589, + 4.046556, + 0.098121, + 0.966265, + 6.046622, + 76.598068, + 28.096128, + ], + 'At': [ + 16.011461, + 32.615547, + 8.113899, + 2.884082, + 21.377867, + 3.995684, + 0.092639, + 0.904416, + 26.543257, + 68.372963, + 5.499512, + ], + 'Rn': [ + 16.070229, + 32.641106, + 21.489658, + 2.299218, + 9.480184, + 4.020977, + 0.090437, + 0.876409, + 5.239687, + 69.188477, + 27.632641, + ], + 'Fr': [ + 16.007385, + 32.66383, + 21.594351, + 1.598497, + 11.121192, + 4.003472, + 0.087031, + 0.840187, + 4.954467, + 199.805801, + 26.905106, + ], + 'Ra': [ + 32.56369, + 21.396671, + 11.298093, + 2.834688, + 15.914965, + 3.981773, + 0.80198, + 4.590666, + 22.758972, + 160.404388, + 0.083544, + ], + 'Ra2+': [ + 4.986228, + 32.474945, + 21.947443, + 11.800013, + 10.807292, + 3.956572, + 0.082597, + 0.791468, + 4.608034, + 24.792431, + 0.082597, + ], + 'Ac': [ + 15.914053, + 32.535042, + 21.553976, + 11.433394, + 3.612409, + 3.939212, + 0.080511, + 0.770669, + 4.352206, + 21.381622, + 130.500748, + ], + 'Ac3+': [ + 15.584983, + 32.022125, + 21.456327, + 0.757593, + 12.341252, + 3.838984, + 0.077438, + 0.739963, + 4.040735, + 47.525002, + 19.406845, + ], + 'Th': [ + 15.784024, + 32.454899, + 21.849222, + 4.239077, + 11.736191, + 3.922533, + 0.077067, + 0.735137, + 4.097976, + 109.464111, + 20.512138, + ], + 'Th4+': [ + 15.515445, + 32.090691, + 13.996399, + 12.918157, + 7.635514, + 3.831122, + 0.074499, + 0.711663, + 3.871044, + 18.596891, + 3.871044, + ], + 'Pa': [ + 32.740208, + 21.973675, + 12.957398, + 3.683832, + 15.744058, + 3.886066, + 0.709545, + 4.050881, + 19.231543, + 117.255005, + 0.07404, + ], + 'U': [ + 15.679275, + 32.824306, + 13.660459, + 3.687261, + 22.279434, + 3.854444, + 0.071206, + 0.681177, + 18.236156, + 112.500038, + 3.930325, + ], + 'U3+': [ + 15.360309, + 32.395657, + 21.96129, + 1.325894, + 14.251453, + 3.706622, + 0.067815, + 0.654643, + 3.643409, + 39.604965, + 16.33057, + ], + 'U4+': [ + 15.355091, + 32.235306, + 0.557745, + 14.396367, + 21.751173, + 3.705863, + 0.067789, + 0.652613, + 42.354237, + 15.908239, + 3.553231, + ], + 'U6+': [ + 15.333844, + 31.770849, + 21.274414, + 13.872636, + 0.048519, + 3.700591, + 0.067644, + 0.646384, + 3.317894, + 14.65025, + 75.339699, + ], + 'Np': [ + 32.999901, + 22.638077, + 14.219973, + 3.67295, + 15.683245, + 3.769391, + 0.657086, + 3.854918, + 17.435474, + 109.464485, + 0.068033, + ], + 'Np3+': [ + 15.378152, + 32.572132, + 22.206125, + 1.413295, + 14.828381, + 3.60337, + 0.064613, + 0.63142, + 3.561936, + 37.875511, + 15.546129, + ], + 'Np4+': [ + 15.373926, + 32.423019, + 21.969994, + 0.662078, + 14.96935, + 3.603039, + 0.064597, + 0.629658, + 3.476389, + 39.438942, + 15.135764, + ], + 'Np6+': [ + 15.359986, + 31.992825, + 21.412458, + 0.066574, + 14.568174, + 3.600942, + 0.064528, + 0.624505, + 3.253441, + 67.658318, + 13.980832, + ], + 'Pu': [ + 33.281178, + 23.148544, + 15.153755, + 3.031492, + 15.704215, + 3.6642, + 0.634999, + 3.856168, + 16.849735, + 121.292038, + 0.064857, + ], + 'Pu3+': [ + 15.356004, + 32.769127, + 22.68021, + 1.351055, + 15.416232, + 3.428895, + 0.06059, + 0.604663, + 3.491509, + 37.260635, + 14.981921, + ], + 'Pu4+': [ + 15.416219, + 32.610569, + 22.256662, + 0.719495, + 15.518152, + 3.480408, + 0.061456, + 0.607938, + 3.411848, + 37.628792, + 14.46436, + ], + 'Pu6+': [ + 15.436506, + 32.289719, + 14.726737, + 15.012391, + 7.024677, + 3.502325, + 0.061815, + 0.606541, + 3.245363, + 13.616438, + 3.245364, + ], + 'Am': [ + 33.435162, + 23.657259, + 15.576339, + 3.027023, + 15.7461, + 3.54116, + 0.612785, + 3.792942, + 16.195778, + 117.757004, + 0.061755, + ], + 'Cm': [ + 15.804837, + 33.480801, + 24.150198, + 3.655563, + 15.499866, + 3.39084, + 0.058619, + 0.59016, + 3.67472, + 100.736191, + 15.408296, + ], + 'Bk': [ + 15.889072, + 33.625286, + 24.710381, + 3.707139, + 15.839268, + 3.213169, + 0.055503, + 0.569571, + 3.615472, + 97.694786, + 14.754303, + ], + 'Cf': [ + 33.794075, + 25.467693, + 16.048487, + 3.657525, + 16.008982, + 3.005326, + 0.550447, + 3.581973, + 14.357388, + 96.064972, + 0.05245, + ], +} + +chargestate = { + 'H': ['0', '1-'], + 'He': ['0'], + 'Li': ['0', '1+'], + 'Be': ['0', '2+'], + 'B': ['0'], + 'C': ['0'], + 'N': ['0'], + 'O': ['0', '1-', '2-'], + 'F': ['0', '1-'], + 'Ne': ['0'], + 'Na': ['0', '1+'], + 'Mg': ['0', '2+'], + 'Al': ['0', '3+'], + 'Si': ['0', '4+'], + 'P': ['0'], + 'S': ['0'], + 'Cl': ['0', '1-'], + 'Ar': ['0'], + 'K': ['0', '1+'], + 'Ca': ['0', '2+'], + 'Sc': ['0', '3+'], + 'Ti': ['0', '2+', '3+', '4+'], + 'V': ['0', '2+', '3+', '5+'], + 'Cr': ['0', '2+', '3+'], + 'Mn': ['0', '2+', '3+', '4+'], + 'Fe': ['0', '2+', '3+'], + 'Co': ['0', '2+', '3+'], + 'Ni': ['0', '2+', '3+'], + 'Cu': ['0', '1+', '2+'], + 'Zn': ['0', '2+'], + 'Ga': ['0', '3+'], + 'Ge': ['0', '4+'], + 'As': ['0'], + 'Se': ['0'], + 'Br': ['0', '1-'], + 'Kr': ['0'], + 'Rb': ['0', '1+'], + 'Sr': ['0', '2+'], + 'Y': ['0'], + 'Zr': ['0', '4+'], + 'Nb': ['0', '3+', '5+'], + 'Mo': ['0', '3+', '5+', '6+'], + 'Tc': ['0'], + 'Ru': ['0', '3+', '4+'], + 'Rh': ['0', '3+', '4+'], + 'Pd': ['0', '2+', '4+'], + 'Ag': ['0', '1+', '2+'], + 'Cd': ['0', '2+'], + 'In': ['0', '3+'], + 'Sn': ['0', '2+', '4+'], + 'Sb': ['0', '3+', '5+'], + 'Te': ['0'], + 'I': ['0'], + 'Xe': ['0'], + 'Cs': ['0', '1+'], + 'Ba': ['0', '2+'], + 'La': ['0', '3+'], + 'Ce': ['0', '3+', '4+'], + 'Pr': ['0', '3+', '4+'], + 'Nd': ['0', '3+'], + 'Pm': ['0', '3+'], + 'Sm': ['0', '3+'], + 'Eu': ['0', '2+', '3+'], + 'Gd': ['0', '3+'], + 'Tb': ['0', '3+'], + 'Dy': ['0', '3+'], + 'Ho': ['0', '3+'], + 'Er': ['0', '3+'], + 'Tm': ['0', '3+'], + 'Yb': ['0', '2+', '3+'], + 'Lu': ['0', '3+'], + 'Hf': ['0', '4+'], + 'Ta': ['0', '5+'], + 'W': ['0'], + 'Re': ['0'], + 'Os': ['0', '4+'], + 'Ir': ['0', '3+', '4+'], + 'Pt': ['0', '2+', '4+'], + 'Au': ['0', '1+', '3+'], + 'Hg': ['0', '1+', '2+'], + 'Tl': ['0', '1+', '3+'], + 'Pb': ['0', '2+', '4+'], + 'Bi': ['0', '3+', '5+'], + 'Po': ['0'], + 'At': ['0'], + 'Rn': ['0'], + 'Fr': ['0'], + 'Ra': ['0', '2+'], + 'Ac': ['0', '3+'], + 'Th': ['0', '4+'], + 'Pa': ['0'], + 'U': ['0', '3+', '4+', '6+'], + 'Np': ['0', '3+', '4+', '6+'], + 'Pu': ['0', '3+', '4+', '6+'], + 'Am': ['0'], + 'Cm': ['0'], + 'Bk': ['0'], + 'Cf': ['0'], +} +''' +this dictionary tabulates the small +nuclear Thomson term fNT for all +elements up to Z=92 +''' +fNT = { + 'H': -0.00054423, + 'He': -0.00054817, + 'Li': -0.00071131, + 'Be': -0.00097394, + 'B': -0.0012687, + 'C': -0.0016442, + 'N': -0.0019191, + 'O': -0.0021944, + 'F': -0.0023389, + 'Ne': -0.0027186, + 'Na': -0.0028873, + 'Mg': -0.0032502, + 'Al': -0.0034361, + 'Si': -0.0038284, + 'P': -0.003985, + 'S': -0.0043804, + 'Cl': -0.0044718, + 'Ar': -0.0044493, + 'K': -0.0050651, + 'Ca': -0.0054748, + 'Sc': -0.0053814, + 'Ti': -0.0055454, + 'V': -0.0056967, + 'Cr': -0.006077, + 'Mn': -0.0062409, + 'Fe': -0.0066403, + 'Co': -0.0067859, + 'Ni': -0.0073281, + 'Cu': -0.0072602, + 'Zn': -0.0075516, + 'Ga': -0.0075615, + 'Ge': -0.0077386, + 'As': -0.0079737, + 'Se': -0.0080314, + 'Br': -0.0084102, + 'Kr': -0.008484, + 'Rb': -0.008787, + 'Sr': -0.0090407, + 'Y': -0.0093851, + 'Zr': -0.0096221, + 'Nb': -0.0099257, + 'Mo': -0.010086, + 'Tc': -0.01035, + 'Ru': -0.010508, + 'Rh': -0.010795, + 'Pd': -0.010908, + 'Ag': -0.011234, + 'Cd': -0.011244, + 'In': -0.011471, + 'Sn': -0.011555, + 'Sb': -0.01172, + 'Te': -0.011625, + 'I': -0.012143, + 'Xe': -0.012184, + 'Cs': -0.012486, + 'Ba': -0.012527, + 'La': -0.012831, + 'Ce': -0.01317, + 'Pr': -0.013552, + 'Nd': -0.013692, + 'Pm': -0.014078, + 'Sm': -0.014025, + 'Eu': -0.014328, + 'Gd': -0.014289, + 'Tb': -0.014584, + 'Dy': -0.014705, + 'Ho': -0.014931, + 'Er': -0.015166, + 'Tm': -0.01546, + 'Yb': -0.015534, + 'Lu': -0.015805, + 'Hf': -0.015933, + 'Ta': -0.016156, + 'W': -0.01634, + 'Re': -0.016572, + 'Os': -0.016659, + 'Ir': -0.016921, + 'Pt': -0.017109, + 'Au': -0.017382, + 'Hg': -0.017503, + 'Tl': -0.01761, + 'Pb': -0.017802, + 'Bi': -0.018084, + 'Po': -0.01852, + 'At': -0.018874, + 'Rn': -0.018276, + 'Fr': -0.01862, + 'Ra': -0.018795, + 'Ac': -0.01914, + 'Th': -0.01915, + 'Pa': -0.019663, + 'U': -0.019507, +} + +''' +relativistic correction factor for in anomalous scattering for all elements upto Z=92 +''' +frel = { + 'H': 0.0, + 'He': 0.0, + 'Li': -0.0006, + 'Be': -0.0006, + 'B': -0.0012, + 'C': -0.0018, + 'N': -0.003, + 'O': -0.0042, + 'F': -0.0054, + 'Ne': -0.0066, + 'Na': -0.0084, + 'Mg': -0.0108, + 'Al': -0.0126, + 'Si': -0.0156, + 'P': -0.018, + 'S': -0.021, + 'Cl': -0.0246, + 'Ar': -0.0282, + 'K': -0.0318, + 'Ca': -0.036, + 'Sc': -0.0408, + 'Ti': -0.045, + 'V': -0.0504, + 'Cr': -0.0558, + 'Mn': -0.0612, + 'Fe': -0.0678, + 'Co': -0.0738, + 'Ni': -0.081, + 'Cu': -0.0876, + 'Zn': -0.0954, + 'Ga': -0.1032, + 'Ge': -0.1116, + 'As': -0.12, + 'Se': -0.129, + 'Br': -0.1386, + 'Kr': -0.1482, + 'Rb': -0.1584, + 'Sr': -0.1692, + 'Y': -0.18, + 'Zr': -0.1914, + 'Nb': -0.2028, + 'Mo': -0.2154, + 'Tc': -0.228, + 'Ru': -0.2406, + 'Rh': -0.2544, + 'Pd': -0.2682, + 'Ag': -0.2826, + 'Cd': -0.2976, + 'In': -0.3126, + 'Sn': -0.3282, + 'Sb': -0.345, + 'Te': -0.3612, + 'I': -0.3786, + 'Xe': -0.396, + 'Cs': -0.414, + 'Ba': -0.4326, + 'La': -0.4518, + 'Ce': -0.4716, + 'Pr': -0.4914, + 'Nd': -0.5124, + 'Pm': -0.5334, + 'Sm': -0.555, + 'Eu': -0.5772, + 'Gd': -0.6, + 'Tb': -0.6234, + 'Dy': -0.6474, + 'Ho': -0.6714, + 'Er': -0.6966, + 'Tm': -0.7224, + 'Yb': -0.7488, + 'Lu': -0.7758, + 'Hf': -0.8028, + 'Ta': -0.831, + 'W': -0.8598, + 'Re': -0.8892, + 'Os': -0.9192, + 'Ir': -0.9498, + 'Pt': -0.9816, + 'Au': -1.0134, + 'Hg': -1.0458, + 'Tl': -1.0794, + 'Pb': -1.1136, + 'Bi': -1.1484, + 'Po': -1.1838, + 'At': -1.2198, + 'Rn': -1.257, + 'Fr': -1.2942, + 'Ra': -1.3326, + 'Ac': -1.3722, + 'Th': -1.4118, + 'Pa': -1.4526, + 'U': -1.494, +} + + +''' +atomic weights for things like density computations +(from NIST elemental data base) +''' +atom_weights = np.array( + [ + 1.00794, + 4.002602, + 6.941, + 9.012182, + 10.811, + 12.0107, + 14.0067, + 15.9994, + 18.9984032, + 20.1797, + 22.98976928, + 24.3050, + 26.9815386, + 28.0855, + 30.973762, + 32.065, + 35.453, + 39.948, + 39.0983, + 40.078, + 44.955912, + 47.867, + 50.9415, + 51.9961, + 54.938045, + 55.845, + 58.933195, + 58.6934, + 63.546, + 65.38, + 69.723, + 72.64, + 74.92160, + 78.96, + 79.904, + 83.798, + 85.4678, + 87.62, + 88.90585, + 91.224, + 92.90638, + 95.96, + 98.9062, + 101.07, + 102.90550, + 106.42, + 107.8682, + 112.411, + 114.818, + 118.710, + 121.760, + 127.60, + 126.90447, + 131.293, + 132.9054519, + 137.327, + 138.90547, + 140.116, + 140.90765, + 144.242, + 145.0, + 150.36, + 151.964, + 157.25, + 158.92535, + 162.500, + 164.93032, + 167.259, + 168.93421, + 173.054, + 174.9668, + 178.49, + 180.94788, + 183.84, + 186.207, + 190.23, + 192.217, + 195.084, + 196.966569, + 200.59, + 204.3833, + 207.2, + 208.98040, + 209.0, + 210.0, + 222.0, + 223.0, + 226.0, + 227.0, + 232.03806, + 231.03588, + 238.02891, + 237.0, + 244.0, + 243.0, + 247.0, + 247.0, + 251.0, + ] +) + +""" +dictionary of atomic weights +""" +ATOM_WEIGHTS_DICT = { + 'H': 1.00794, + 'He': 4.002602, + 'Li': 6.941, + 'Be': 9.012182, + 'B': 10.811, + 'C': 12.0107, + 'N': 14.0067, + 'O': 15.9994, + 'F': 18.9984032, + 'Ne': 20.1797, + 'Na': 22.98976928, + 'Mg': 24.305, + 'Al': 26.9815386, + 'Si': 28.0855, + 'P': 30.973762, + 'S': 32.065, + 'Cl': 35.453, + 'Ar': 39.948, + 'K': 39.0983, + 'Ca': 40.078, + 'Sc': 44.955912, + 'Ti': 47.867, + 'V': 50.9415, + 'Cr': 51.9961, + 'Mn': 54.938045, + 'Fe': 55.845, + 'Co': 58.933195, + 'Ni': 58.6934, + 'Cu': 63.546, + 'Zn': 65.38, + 'Ga': 69.723, + 'Ge': 72.64, + 'As': 74.9216, + 'Se': 78.96, + 'Br': 79.904, + 'Kr': 83.798, + 'Rb': 85.4678, + 'Sr': 87.62, + 'Y': 88.90585, + 'Zr': 91.224, + 'Nb': 92.90638, + 'Mo': 95.96, + 'Tc': 98.9062, + 'Ru': 101.07, + 'Rh': 102.9055, + 'Pd': 106.42, + 'Ag': 107.8682, + 'Cd': 112.411, + 'In': 114.818, + 'Sn': 118.71, + 'Sb': 121.76, + 'Te': 127.6, + 'I': 126.90447, + 'Xe': 131.293, + 'Cs': 132.9054519, + 'Ba': 137.327, + 'La': 138.90547, + 'Ce': 140.116, + 'Pr': 140.90765, + 'Nd': 144.242, + 'Pm': 145.0, + 'Sm': 150.36, + 'Eu': 151.964, + 'Gd': 157.25, + 'Tb': 158.92535, + 'Dy': 162.5, + 'Ho': 164.93032, + 'Er': 167.259, + 'Tm': 168.93421, + 'Yb': 173.054, + 'Lu': 174.9668, + 'Hf': 178.49, + 'Ta': 180.94788, + 'W': 183.84, + 'Re': 186.207, + 'Os': 190.23, + 'Ir': 192.217, + 'Pt': 195.084, + 'Au': 196.966569, + 'Hg': 200.59, + 'Tl': 204.3833, + 'Pb': 207.2, + 'Bi': 208.9804, + 'Po': 209.0, + 'At': 210.0, + 'Rn': 222.0, + 'Fr': 223.0, + 'Ra': 226.0, + 'Ac': 227.0, + 'Th': 232.0377, + 'Pa': 231.03588, + 'U': 238.02891, + 'Np': 237.0, + 'Pu': 244.0, + 'Am': 243.0, + 'Cm': 247.0, + 'Bk': 247.0, + 'Cf': 251.0, +} + +""" +densities of elements in g/cc +""" +DENSITY = { + 'H': 8.99e-05, + 'He': 0.0001785, + 'Li': 0.535, + 'Be': 1.848, + 'B': 2.46, + 'C': 2.26, + 'N': 0.001251, + 'O': 0.001429, + 'F': 0.001696, + 'Ne': 0.0009, + 'Na': 0.968, + 'Mg': 1.738, + 'Al': 2.7, + 'Si': 2.33, + 'P': 1.823, + 'S': 1.96, + 'Cl': 0.003214, + 'Ar': 0.001784, + 'K': 0.856, + 'Ca': 1.55, + 'Sc': 2.985, + 'Ti': 4.507, + 'V': 6.11, + 'Cr': 7.14, + 'Mn': 7.47, + 'Fe': 7.874, + 'Co': 8.9, + 'Ni': 8.908, + 'Cu': 8.92, + 'Zn': 7.14, + 'Ga': 5.904, + 'Ge': 5.323, + 'As': 5.727, + 'Se': 4.819, + 'Br': 3.12, + 'Kr': 0.00375, + 'Rb': 1.532, + 'Sr': 2.63, + 'Y': 4.472, + 'Zr': 6.511, + 'Nb': 8.57, + 'Mo': 10.28, + 'Tc': 11.5, + 'Ru': 12.37, + 'Rh': 12.45, + 'Pd': 12.023, + 'Ag': 10.49, + 'Cd': 8.65, + 'In': 7.31, + 'Sn': 7.31, + 'Sb': 6.697, + 'Te': 6.24, + 'I': 4.94, + 'Xe': 0.0059, + 'Cs': 1.879, + 'Ba': 3.51, + 'La': 6.146, + 'Ce': 6.689, + 'Pr': 6.64, + 'Nd': 7.01, + 'Pm': 7.264, + 'Sm': 7.353, + 'Eu': 5.244, + 'Gd': 7.901, + 'Tb': 8.219, + 'Dy': 8.551, + 'Ho': 8.795, + 'Er': 9.066, + 'Tm': 9.321, + 'Yb': 6.57, + 'Lu': 9.841, + 'Hf': 13.31, + 'Ta': 16.65, + 'W': 19.25, + 'Re': 21.02, + 'Os': 22.59, + 'Ir': 22.56, + 'Pt': 21.09, + 'Au': 19.3, + 'Hg': 13.534, + 'Tl': 11.85, + 'Pb': 11.34, + 'Bi': 9.78, + 'Po': 9.196, + 'At': None, + 'Rn': 0.00973, + 'Fr': None, + 'Ra': 5.0, + 'Ac': 10.07, + 'Th': 11.724, + 'Pa': 15.37, + 'U': 19.05, + 'Np': 20.45, + 'Pu': 19.816, + 'Am': 13.67, + 'Cm': 13.51, + 'Bk': 14.78, + 'Cf': 15.1, +} + +# some polymer densities commonly used in hexrd +DENSITY_COMPOUNDS = { + 'C10H8O4': 1.4, + 'Ba2263F2263Br1923I339C741H1730N247O494': 3.3, + 'LiF': 2.64, + 'quartz': 2.65, + 'diamond': 3.5, + 'C22H10N2O5': 1.42, # kapton +} + +''' +dictionary of atomic numbers with element symbol as keys +used in I/O from cif file +''' +ptable = { + 'H': 1, + 'He': 2, + 'Li': 3, + 'Be': 4, + 'B': 5, + 'C': 6, + 'N': 7, + 'O': 8, + 'F': 9, + 'Ne': 10, + 'Na': 11, + 'Mg': 12, + 'Al': 13, + 'Si': 14, + 'P': 15, + 'S': 16, + 'Cl': 17, + 'Ar': 18, + 'K': 19, + 'Ca': 20, + 'Sc': 21, + 'Ti': 22, + 'V': 23, + 'Cr': 24, + 'Mn': 25, + 'Fe': 26, + 'Co': 27, + 'Ni': 28, + 'Cu': 29, + 'Zn': 30, + 'Ga': 31, + 'Ge': 32, + 'As': 33, + 'Se': 34, + 'Br': 35, + 'Kr': 36, + 'Rb': 37, + 'Sr': 38, + 'Y': 39, + 'Zr': 40, + 'Nb': 41, + 'Mo': 42, + 'Tc': 43, + 'Ru': 44, + 'Rh': 45, + 'Pd': 46, + 'Ag': 47, + 'Cd': 48, + 'In': 49, + 'Sn': 50, + 'Sb': 51, + 'Te': 52, + 'I': 53, + 'Xe': 54, + 'Cs': 55, + 'Ba': 56, + 'La': 57, + 'Ce': 58, + 'Pr': 59, + 'Nd': 60, + 'Pm': 61, + 'Sm': 62, + 'Eu': 63, + 'Gd': 64, + 'Tb': 65, + 'Dy': 66, + 'Ho': 67, + 'Er': 68, + 'Tm': 69, + 'Yb': 70, + 'Lu': 71, + 'Hf': 72, + 'Ta': 73, + 'W': 74, + 'Re': 75, + 'Os': 76, + 'Ir': 77, + 'Pt': 78, + 'Au': 79, + 'Hg': 80, + 'Tl': 81, + 'Pb': 82, + 'Bi': 83, + 'Po': 84, + 'At': 85, + 'Rn': 86, + 'Fr': 87, + 'Ra': 88, + 'Ac': 89, + 'Th': 90, + 'Pa': 91, + 'U': 92, + 'Np': 93, + 'Pu': 94, + 'Am': 95, + 'Cm': 96, + 'Bk': 97, + 'Cf': 98, + 'Es': 99, + 'Fm': 100, + 'Md': 101, + 'No': 102, + 'Lr': 103, + 'Rf': 104, + 'Db': 105, + 'Sg': 106, + 'Bh': 107, + 'Hs': 108, + 'Mt': 109, +} + +ptableinverse = dict.fromkeys(ptable.values()) +for k, v in ptable.items(): + ptableinverse[v] = k + +''' +listing the symmorphic space groups +''' +sgnum_symmorphic = np.array( + [ + 1, + 2, + 3, + 5, + 6, + 8, + 10, + 12, + 16, + 21, + 22, + 23, + 25, + 35, + 38, + 42, + 44, + 47, + 65, + 69, + 71, + 75, + 79, + 81, + 82, + 83, + 87, + 89, + 97, + 99, + 107, + 111, + 115, + 119, + 121, + 123, + 139, + 143, + 146, + 147, + 148, + 149, + 150, + 155, + 156, + 157, + 160, + 162, + 164, + 166, + 168, + 174, + 175, + 177, + 183, + 187, + 189, + 191, + 195, + 196, + 197, + 200, + 202, + 204, + 207, + 209, + 211, + 215, + 216, + 217, + 221, + 225, + 229, + ] +) + +''' this variable encodes all the generators +(including translations) for all 230 space groups +will be used to compute the full space group symmetry +operators +''' +SYM_GL = [ + "000 ", + "100 ", + "01cOOO0 ", + "01cODO0 ", + "02aDDOcOOO0 ", + "01jOOO0 ", + "01jOOD0 ", + "02aDDOjOOO0 ", + "02aDDOjOOD0 ", + "11cOOO0 ", + "11cODO0 ", + "12aDDOcOOO0 ", + "11cOOD0 ", + "11cODD0 ", + "12aDDOcOOD0 ", + "02bOOOcOOO0 ", + "02bOODcOOD0 ", + "02bOOOcDDO0 ", + "02bDODcODD0 ", + "03aDDObOODcOOD0 ", + "03aDDObOOOcOOO0 ", + "04aODDaDODbOOOcOOO0 ", + "03aDDDbOOOcOOO0 ", + "03aDDDbDODcODD0 ", + "02bOOOjOOO0 ", + "02bOODjOOD0 ", + "02bOOOjOOD0 ", + "02bOOOjDOO0 ", + "02bOODjDOO0 ", + "02bOOOjODD0 ", + "02bDODjDOD0 ", + "02bOOOjDDO0 ", + "02bOODjDDO0 ", + "02bOOOjDDD0 ", + "03aDDObOOOjOOO0 ", + "03aDDObOODjOOD0 ", + "03aDDObOOOjOOD0 ", + "03aODDbOOOjOOO0 ", + "03aODDbOOOjODO0 ", + "03aODDbOOOjDOO0 ", + "03aODDbOOOjDDO0 ", + "04aODDaDODbOOOjOOO0 ", + "04aODDaDODbOOOjBBB0 ", + "03aDDDbOOOjOOO0 ", + "03aDDDbOOOjDDO0 ", + "03aDDDbOOOjDOO0 ", + "12bOOOcOOO0 ", + "03bOOOcOOOhDDD1BBB ", + "12bOOOcOOD0 ", + "03bOOOcOOOhDDO1BBO ", + "12bDOOcOOO0 ", + "12bDOOcDDD0 ", + "12bDODcDOD0 ", + "12bDOOcOOD0 ", + "12bOOOcDDO0 ", + "12bDDOcODD0 ", + "12bOODcODD0 ", + "12bOOOcDDD0 ", + "03bOOOcDDOhDDO1BBO ", + "12bDDDcOOD0 ", + "12bDODcODD0 ", + "12bDODcODO0 ", + "13aDDObOODcOOD0 ", + "13aDDObODDcODD0 ", + "13aDDObOOOcOOO0 ", + "13aDDObOOOcOOD0 ", + "13aDDObODOcODO0 ", + "04aDDObDDOcOOOhODD1OBB ", + "14aODDaDODbOOOcOOO0 ", + "05aODDaDODbOOOcOOOhBBB1ZZZ ", + "13aDDDbOOOcOOO0 ", + "13aDDDbOOOcDDO0 ", + "13aDDDbDODcODD0 ", + "13aDDDbODOcODO0 ", + "02bOOOgOOO0 ", + "02bOODgOOB0 ", + "02bOOOgOOD0 ", + "02bOODgOOF0 ", + "03aDDDbOOOgOOO0 ", + "03aDDDbDDDgODB0 ", + "02bOOOmOOO0 ", + "03aDDDbOOOmOOO0 ", + "12bOOOgOOO0 ", + "12bOOOgOOD0 ", + "03bOOOgDDOhDDO1YBO ", + "03bOOOgDDDhDDD1YYY ", + "13aDDDbOOOgOOO0 ", + "04aDDDbDDDgODBhODB1OYZ ", + "03bOOOgOOOcOOO0 ", + "03bOOOgDDOcDDO0 ", + "03bOODgOOBcOOO0 ", + "03bOODgDDBcDDB0 ", + "03bOOOgOODcOOO0 ", + "03bOOOgDDDcDDD0 ", + "03bOODgOOFcOOO0 ", + "03bOODgDDFcDDF0 ", + "04aDDDbOOOgOOOcOOO0 ", + "04aDDDbDDDgODBcDOF0 ", + "03bOOOgOOOjOOO0 ", + "03bOOOgOOOjDDO0 ", + "03bOOOgOODjOOD0 ", + "03bOOOgDDDjDDD0 ", + "03bOOOgOOOjOOD0 ", + "03bOOOgOOOjDDD0 ", + "03bOOOgOODjOOO0 ", + "03bOOOgOODjDDO0 ", + "04aDDDbOOOgOOOjOOO0 ", + "04aDDDbOOOgOOOjOOD0 ", + "04aDDDbDDDgODBjOOO0 ", + "04aDDDbDDDgODBjOOD0 ", + "03bOOOmOOOcOOO0 ", + "03bOOOmOOOcOOD0 ", + "03bOOOmOOOcDDO0 ", + "03bOOOmOOOcDDD0 ", + "03bOOOmOOOjOOO0 ", + "03bOOOmOOOjOOD0 ", + "03bOOOmOOOjDDO0 ", + "03bOOOmOOOjDDD0 ", + "04aDDDbOOOmOOOjOOO0 ", + "04aDDDbOOOmOOOjOOD0 ", + "04aDDDbOOOmOOOcOOO0 ", + "04aDDDbOOOmOOOcDOF0 ", + "13bOOOgOOOcOOO0 ", + "13bOOOgOOOcOOD0 ", + "04bOOOgOOOcOOOhDDO1YYO ", + "04bOOOgOOOcOOOhDDD1YYY ", + "13bOOOgOOOcDDO0 ", + "13bOOOgOOOcDDD0 ", + "04bOOOgDDOcDDOhDDO1YBO ", + "04bOOOgDDOcDDDhDDO1YBO ", + "13bOOOgOODcOOO0 ", + "13bOOOgOODcOOD0 ", + "04bOOOgDDDcOODhDDD1YBY ", + "04bOOOgDDDcOOOhDDD1YBY ", + "13bOOOgOODcDDO0 ", + "13bOOOgDDDcDDD0 ", + "04bOOOgDDDcDDDhDDD1YBY ", + "04bOOOgDDDcDDOhDDD1YBY ", + "14aDDDbOOOgOOOcOOO0 ", + "14aDDDbOOOgOOOcOOD0 ", + "05aDDDbDDDgODBcDOFhODB1OBZ ", + "05aDDDbDDDgODBcDOBhODB1OBZ ", + "01nOOO0 ", + "01nOOC0 ", + "01nOOE0 ", + "02aECCnOOO0 ", + "11nOOO0 ", + "12aECCnOOO0 ", + "02nOOOfOOO0 ", + "02nOOOeOOO0 ", + "02nOOCfOOE0 ", + "02nOOCeOOO0 ", + "02nOOEfOOC0 ", + "02nOOEeOOO0 ", + "03aECCnOOOeOOO0 ", + "02nOOOkOOO0 ", + "02nOOOlOOO0 ", + "02nOOOkOOD0 ", + "02nOOOlOOD0 ", + "03aECCnOOOkOOO0 ", + "03aECCnOOOkOOD0 ", + "12nOOOfOOO0 ", + "12nOOOfOOD0 ", + "12nOOOeOOO0 ", + "12nOOOeOOD0 ", + "13aECCnOOOeOOO0 ", + "13aECCnOOOeOOD0 ", + "02nOOObOOO0 ", + "02nOOCbOOD0 ", + "02nOOEbOOD0 ", + "02nOOEbOOO0 ", + "02nOOCbOOO0 ", + "02nOOObOOD0 ", + "02nOOOiOOO0 ", + "12nOOObOOO0 ", + "12nOOObOOD0 ", + "03nOOObOOOeOOO0 ", + "03nOOCbOODeOOC0 ", + "03nOOEbOODeOOE0 ", + "03nOOEbOOOeOOE0 ", + "03nOOCbOOOeOOC0 ", + "03nOOObOODeOOO0 ", + "03nOOObOOOkOOO0 ", + "03nOOObOOOkOOD0 ", + "03nOOObOODkOOD0 ", + "03nOOObOODkOOO0 ", + "03nOOOiOOOkOOO0 ", + "03nOOOiOODkOOD0 ", + "03nOOOiOOOeOOO0 ", + "03nOOOiOODeOOO0 ", + "13nOOObOOOeOOO0 ", + "13nOOObOOOeOOD0 ", + "13nOOObOODeOOD0 ", + "13nOOObOODeOOO0 ", + "03bOOOcOOOdOOO0 ", + "05aODDaDODbOOOcOOOdOOO0 ", + "04aDDDbOOOcOOOdOOO0 ", + "03bDODcODDdOOO0 ", + "04aDDDbDODcODDdOOO0 ", + "13bOOOcOOOdOOO0 ", + "04bOOOcOOOdOOOhDDD1YYY ", + "15aODDaDODbOOOcOOOdOOO0 ", + "06aODDaDODbOOOcOOOdOOOhBBB1ZZZ ", + "14aDDDbOOOcOOOdOOO0 ", + "13bDODcODDdOOO0 ", + "14aDDDbDODcODDdOOO0 ", + "04bOOOcOOOdOOOeOOO0 ", + "04bOOOcOOOdOOOeDDD0 ", + "06aODDaDODbOOOcOOOdOOOeOOO0 ", + "06aODDaDODbODDcDDOdOOOeFBF0 ", + "05aDDDbOOOcOOOdOOOeOOO0 ", + "04bDODcODDdOOOeBFF0 ", + "04bDODcODDdOOOeFBB0 ", + "05aDDDbDODcODDdOOOeFBB0 ", + "04bOOOcOOOdOOOlOOO0 ", + "06aODDaDODbOOOcOOOdOOOlOOO0 ", + "05aDDDbOOOcOOOdOOOlOOO0 ", + "04bOOOcOOOdOOOlDDD0 ", + "06aODDaDODbOOOcOOOdOOOlDDD0 ", + "05aDDDbDODcODDdOOOlBBB0 ", + "14bOOOcOOOdOOOeOOO0 ", + "05bOOOcOOOdOOOeOOOhDDD1YYY ", + "14bOOOcOOOdOOOeDDD0 ", + "05bOOOcOOOdOOOeDDDhDDD1YYY ", + "16aODDaDODbOOOcOOOdOOOeOOO0 ", + "16aODDaDODbOOOcOOOdOOOeDDD0 ", + "07aODDaDODbODDcDDOdOOOeFBFhBBB1ZZZ ", + "07aODDaDODbODDcDDOdOOOeFBFhFFF1XXX ", + "15aDDDbOOOcOOOdOOOeOOO0 ", + "15aDDDbDODcODDdOOOeFBB0 ", + "01dOOO0 ", + "11dOOO0 ", + "02dOOOfOOO0 ", + "02dOOOlOOO0 ", + "02dOOOlDDD0 ", + "12dOOOfOOO0 ", + "12dOOOfDDD0 ", +] + +''' +this table contains the screw axis and glide planes +which is used in calculating the systemtaic absences. +organized as follows: + +--> the key will be the space group number +--> first list has the glide plane in the + primary, secondary and tertiary direction +--> second list has screw axis in primary,secondary +and tertiary directions + +obv. this table only has the non-symmorphic groups +taken from international table of crystallography vol A +''' +SYS_AB = { + 4: [['', '', ''], ['', '2_1', '']], + 7: [['', 'c', ''], ['', '', '']], + 9: [['', 'c', ''], ['', '', '']], + 11: [['', '', ''], ['', '2_1', '']], + 13: [['', 'c', ''], ['', '', '']], + 14: [['', 'c', ''], ['', '2_1', '']], + 15: [['', 'c', ''], ['', '', '']], + 17: [['', '', ''], ['', '', '2_1']], + 18: [['', '', ''], ['2_1', '2_1', '']], + 19: [['', '', ''], ['2_1', '2_1', '2_1']], + 20: [['', '', ''], ['', '', '2_1']], + 24: [['', '', ''], ['2_1', '2_1', '2_1']], + 26: [['', 'c', ''], ['', '', '2_1']], + 27: [['c', 'c', ''], ['', '', '']], + 28: [['', 'a', ''], ['', '', '']], + 29: [['c', 'a', ''], ['', '', '2_1']], + 30: [['n', 'c', ''], ['', '', '']], + 31: [['', 'n', ''], ['', '', '2_1']], + 32: [['b', 'a', ''], ['', '', '']], + 33: [['n', 'a', ''], ['', '', '2_1']], + 34: [['n', 'n', ''], ['', '', '']], + 36: [['', 'c', ''], ['', '', '2_1']], + 37: [['c', 'c', ''], ['', '', '']], + 39: [['b', '', ''], ['', '', '']], + 40: [['', 'a', ''], ['', '', '']], + 41: [['b', 'a', ''], ['', '', '']], + 43: [['d', 'd', ''], ['', '', '']], + 45: [['b', 'a', ''], ['', '', '']], + 46: [['', 'a', ''], ['', '', '']], + 48: [['n', 'n', 'n'], ['', '', '']], + 49: [['c', 'c', ''], ['', '', '']], + 50: [['b', 'a', 'n'], ['', '', '']], + 51: [['', '', 'a'], ['2_1', '', '']], + 52: [['n', 'n', 'a'], ['', '2_1', '']], + 53: [['', 'n', 'a'], ['', '', '2_1']], + 54: [['c', 'c', 'a'], ['2_1', '', '']], + 55: [['b', 'a', ''], ['2_1', '2_1', '']], + 56: [['c', 'c', 'n'], ['2_1', '2_1', '']], + 57: [['b', 'c', ''], ['', '2_1', '2_1']], + 58: [['n', 'n', ''], ['2_1', '2_1', '']], + 59: [['', '', 'n'], ['2_1', '2_1', '']], + 60: [['b', 'c', 'n'], ['2_1', '', '2_1']], + 61: [['b', 'c', 'a'], ['2_1', '2_1', '2_1']], + 62: [['n', '', 'a'], ['2_1', '2_1', '2_1']], + 63: [['', 'c', ''], ['', '', '2_1']], + 64: [['', 'c', 'a'], ['', '', '2_1']], + 66: [['c', 'c', ''], ['', '', '']], + 67: [['', '', 'a'], ['', '', '']], + 68: [['c', 'c', 'a'], ['', '', '']], + 70: [['d', 'd', 'd'], ['', '', '']], + 72: [['b', 'a', ''], ['', '', '']], + 73: [['b', 'c', 'a'], ['2_1', '2_1', '2_1']], + 74: [['', '', 'a'], ['2_1', '2_1', '2_1']], + 76: [['', '', ''], ['4_1', '', '']], + 77: [['', '', ''], ['4_2', '', '']], + 78: [['', '', ''], ['4_3', '', '']], + 80: [['', '', ''], ['4_1', '', '']], + 84: [['', '', ''], ['4_2', '', '']], + 85: [['n', '', ''], ['', '', '']], + 86: [['n', '', ''], ['4_2', '', '']], + 88: [['a', '', ''], ['4_1', '', '']], + 90: [['', '', ''], ['', '2_1', '']], + 91: [['', '', ''], ['4_1', '', '']], + 92: [['', '', ''], ['4_1', '2_1', '']], + 93: [['', '', ''], ['4_2', '', '']], + 94: [['', '', ''], ['4_2', '2_1', '']], + 95: [['', '', ''], ['4_3', '', '']], + 96: [['', '', ''], ['4_3', '2_1', '']], + 98: [['', '', ''], ['4_1', '', '']], + 100: [['', 'b', ''], ['', '', '']], + 101: [['', 'c', ''], ['4_2', '', '']], + 102: [['', 'n', ''], ['4_2', '', '']], + 103: [['', 'c', 'c'], ['', '', '']], + 104: [['', 'n', 'c'], ['', '', '']], + 105: [['', '', 'c'], ['4_2', '', '']], + 106: [['', 'b', 'c'], ['4_2', '', '']], + 108: [['', 'c', ''], ['', '', '']], + 109: [['', '', 'd'], ['4_1', '', '']], + 110: [['', 'c', 'd'], ['4_1', '', '']], + 112: [['', '', 'c'], ['', '', '']], + 113: [['', '', ''], ['', '2_1', '']], + 114: [['', '', 'c'], ['', '2_1', '']], + 116: [['', 'c', ''], ['', '', '']], + 117: [['', 'b', ''], ['', '', '']], + 118: [['', 'n', ''], ['', '', '']], + 120: [['', 'c', ''], ['', '', '']], + 122: [['', '', 'd'], ['', '', '']], + 124: [['', 'c', 'c'], ['', '', '']], + 125: [['n', 'b', ''], ['', '', '']], + 126: [['n', 'n', 'c'], ['', '', '']], + 127: [['', 'b', ''], ['', '2_1', '']], + 128: [['', 'n', 'c'], ['', '2_1', '']], + 129: [['n', '', ''], ['', '2_1', '']], + 130: [['n', 'c', 'c'], ['', '2_1', '']], + 131: [['', '', 'c'], ['4_2', '', '']], + 132: [['', 'c', ''], ['4_2', '', '']], + 133: [['n', 'b', 'c'], ['4_2', '', '']], + 134: [['n', 'n', ''], ['4_2', '', '']], + 135: [['', 'b', 'c'], ['4_2', '2_1', '']], + 136: [['', 'n', ''], ['4_2', '2_1', '']], + 137: [['n', '', 'c'], ['4_2', '2_1', '']], + 138: [['n', 'c', ''], ['4_2', '2_1', '']], + 140: [['', 'c', ''], ['', '', '']], + 141: [['a', '', 'd'], ['4_1', '', '']], + 142: [['a', 'c', 'd'], ['4_1', '', '']], + 144: [['', '', ''], ['3_1', '', '']], + 145: [['', '', ''], ['3_2', '', '']], + 151: [['', '', ''], ['3_1', '', '']], + 152: [['', '', ''], ['3_1', '', '']], + 153: [['', '', ''], ['3_2', '', '']], + 154: [['', '', ''], ['3_2', '', '']], + 158: [['', 'c', ''], ['', '', '']], + 159: [['', '', 'c'], ['', '', '']], + 161: [['', 'c', ''], ['', '', '']], + 163: [['', '', 'c'], ['', '', '']], + 165: [['', 'c', ''], ['', '', '']], + 167: [['', 'c', ''], ['', '', '']], + 169: [['', '', ''], ['6_1', '', '']], + 170: [['', '', ''], ['6_5', '', '']], + 171: [['', '', ''], ['6_2', '', '']], + 172: [['', '', ''], ['6_4', '', '']], + 173: [['', '', ''], ['6_3', '', '']], + 176: [['', '', ''], ['6_3', '', '']], + 178: [['', '', ''], ['6_1', '', '']], + 179: [['', '', ''], ['6_5', '', '']], + 180: [['', '', ''], ['6_2', '', '']], + 181: [['', '', ''], ['6_4', '', '']], + 182: [['', '', ''], ['6_3', '', '']], + 184: [['', 'c', 'c'], ['', '', '']], + 185: [['', 'c', ''], ['6_3', '', '']], + 186: [['', '', 'c'], ['6_3', '', '']], + 188: [['', 'c', ''], ['', '', '']], + 190: [['', '', 'c'], ['', '', '']], + 192: [['', 'c', 'c'], ['', '', '']], + 193: [['', 'c', ''], ['6_3', '', '']], + 194: [['', '', 'c'], ['6_3', '', '']], + 198: [['', '', ''], ['2_1', '', '']], + 199: [['', '', ''], ['2_1', '', '']], + 201: [['n', '', ''], ['', '', '']], + 203: [['d', '', ''], ['', '', '']], + 205: [['a', '', ''], ['2_1', '', '']], + 206: [['a', '', ''], ['2_1', '', '']], + 208: [['', '', ''], ['4_2', '', '']], + 210: [['', '', ''], ['4_1', '', '']], + 212: [['', '', ''], ['4_3', '', '']], + 213: [['', '', ''], ['4_1', '', '']], + 214: [['', '', ''], ['4_1', '', '']], + 218: [['', '', 'n'], ['', '', '']], + 219: [['', '', 'c'], ['', '', '']], + 220: [['', '', 'd'], ['', '', '']], + 222: [['n', '', 'n'], ['', '', '']], + 223: [['', '', 'n'], ['4_2', '', '']], + 224: [['n', '', ''], ['4_2', '', '']], + 226: [['', '', 'c'], ['', '', '']], + 227: [['d', '', ''], ['4_1', '', '']], + 228: [['d', '', 'c'], ['4_1', '', '']], + 230: [['a', '', 'd'], ['4_1', '', '']], +} + +''' +this dictionary contains the generators encoded +in each letter of the generator string +the full symmetry is generated by the repeated +action of the generator matrix +''' + +''' rotational, inversions, mirrors etc. components +''' + +SYM_GENERATORS = {} + +# now start to fill them in +# identity +SYM_GENERATORS['a'] = np.zeros([3, 3]) +SYM_GENERATORS['a'] = np.eye(3) + +# 180@c +SYM_GENERATORS['b'] = np.zeros([3, 3]) +SYM_GENERATORS['b'][0, 0] = -1.0 +SYM_GENERATORS['b'][1, 1] = -1.0 +SYM_GENERATORS['b'][2, 2] = 1.0 + +# 180@b +SYM_GENERATORS['c'] = np.zeros([3, 3]) +SYM_GENERATORS['c'][0, 0] = -1.0 +SYM_GENERATORS['c'][1, 1] = 1.0 +SYM_GENERATORS['c'][2, 2] = -1.0 + +# 120@[111] +SYM_GENERATORS['d'] = np.zeros([3, 3]) +SYM_GENERATORS['d'][0, 2] = 1.0 +SYM_GENERATORS['d'][1, 0] = 1.0 +SYM_GENERATORS['d'][2, 1] = 1.0 + +# 180@[110] +SYM_GENERATORS['e'] = np.zeros([3, 3]) +SYM_GENERATORS['e'][0, 1] = 1.0 +SYM_GENERATORS['e'][1, 0] = 1.0 +SYM_GENERATORS['e'][2, 2] = -1.0 + +# +SYM_GENERATORS['f'] = np.zeros([3, 3]) +SYM_GENERATORS['f'][0, 1] = -1.0 +SYM_GENERATORS['f'][1, 0] = -1.0 +SYM_GENERATORS['f'][2, 2] = -1.0 + +# +SYM_GENERATORS['g'] = np.zeros([3, 3]) +SYM_GENERATORS['g'][0, 1] = -1.0 +SYM_GENERATORS['g'][1, 0] = 1.0 +SYM_GENERATORS['g'][2, 2] = 1.0 + +# inversion +SYM_GENERATORS['h'] = -np.eye(3) + +# c-mirror +SYM_GENERATORS['i'] = np.zeros([3, 3]) +SYM_GENERATORS['i'][0, 0] = 1.0 +SYM_GENERATORS['i'][1, 1] = 1.0 +SYM_GENERATORS['i'][2, 2] = -1.0 + +# b-mirror +SYM_GENERATORS['j'] = np.zeros([3, 3]) +SYM_GENERATORS['j'][0, 0] = 1.0 +SYM_GENERATORS['j'][1, 1] = -1.0 +SYM_GENERATORS['j'][2, 2] = 1.0 + +# 90@[001] +SYM_GENERATORS['k'] = np.zeros([3, 3]) +SYM_GENERATORS['k'][0, 1] = -1.0 +SYM_GENERATORS['k'][1, 0] = -1.0 +SYM_GENERATORS['k'][2, 2] = 1.0 + +# +SYM_GENERATORS['l'] = np.zeros([3, 3]) +SYM_GENERATORS['l'][0, 1] = 1.0 +SYM_GENERATORS['l'][1, 0] = 1.0 +SYM_GENERATORS['l'][2, 2] = 1.0 + +# +SYM_GENERATORS['m'] = np.zeros([3, 3]) +SYM_GENERATORS['m'][0, 1] = 1.0 +SYM_GENERATORS['m'][1, 0] = -1.0 +SYM_GENERATORS['m'][2, 2] = -1.0 + +# +SYM_GENERATORS['n'] = np.zeros([3, 3]) +SYM_GENERATORS['n'][0, 1] = -1.0 +SYM_GENERATORS['n'][1, 0] = 1.0 +SYM_GENERATORS['n'][1, 1] = -1.0 +SYM_GENERATORS['n'][2, 2] = 1.0 + +''' translation components +''' +SYM_GENERATORS['A'] = 1.0 / 6.0 +SYM_GENERATORS['B'] = 1.0 / 4.0 +SYM_GENERATORS['C'] = 1.0 / 3.0 +SYM_GENERATORS['D'] = 1.0 / 2.0 +SYM_GENERATORS['E'] = 2.0 / 3.0 +SYM_GENERATORS['F'] = 3.0 / 4.0 +SYM_GENERATORS['G'] = 5.0 / 6.0 +SYM_GENERATORS['O'] = 0.0 +SYM_GENERATORS['X'] = -3.0 / 8.0 +SYM_GENERATORS['Y'] = -1.0 / 4.0 +SYM_GENERATORS['Z'] = -1.0 / 8.0 + +''' + @AUTHOR Saransh Singh, + Lawrence Livermore National Lab, + saransh1@llnl.gov + @DATE 11/23/2020 SS 1.0 original + @DETAIL. this list of symbols will help us to genrate + the point group symmetries in the cartesian + space for any point group. this is needed for + the supergroup symmetry usd in the coloring + scheme used in the package. this needs to be a + separate set of routines because the supergroup + can be a point group which is not the laue group + of the crystal (e.g. m-3 --> m-3m) the notation + used will be the same as the one used for the + space group without any translations. +''' +SYM_GL_PG = { + 'c1': '1a', # only identity rotation + 'ci': '1h', # only inversion operation + 'c2': '1c', # 2-fold rotation about z + 'cs': '1j', + 'c2h': '2ch', + 'd2': '2bc', + 'c2v': '2bj', + 'd2h': '3bch', + 'c4': '1g', + 's4': '1m', + 'c4h': '2gh', + 'd4': '2cg', + 'c4v': '2gj', + 'd2d': '2cm', + 'd4h': '3cgh', + 'c3': '1n', + 's6': '2hn', + 'd3': '2en', + 'c3v': '2kn ', + 'd3d': '3fhn', + 'c6': '2bn', + 'c3h': '2in', + 'c6h': '3bhn', + 'd6': '3ben', + 'c6v': '3bkn', + 'd3h': '3ikn', + 'd6h': '4benh', + 't': '2cd', + 'th': '3cdh', + 'o': '2dg', + 'td': '2dm', + 'oh': '3dgh', +} +# The above dict must be in the correct order for this to work +SYM_PG_to_PGNUM = {pg: i + 1 for i, pg in enumerate(SYM_GL_PG)} +SYM_PGNUM_to_PG = {v: k for k, v in SYM_PG_to_PGNUM.items()} + +# Set the __version__ variable +try: + __version__ = version('hexrd') +except PackageNotFoundError: + __version__ = None diff --git a/hexrd/convolution/__init__.py b/hexrd/core/convolution/__init__.py similarity index 99% rename from hexrd/convolution/__init__.py rename to hexrd/core/convolution/__init__.py index 617a37458..f07eaedf6 100644 --- a/hexrd/convolution/__init__.py +++ b/hexrd/core/convolution/__init__.py @@ -3,4 +3,3 @@ from .convolve import convolve - diff --git a/hexrd/convolution/convolve.py b/hexrd/core/convolution/convolve.py similarity index 68% rename from hexrd/convolution/convolve.py rename to hexrd/core/convolution/convolve.py index ebc0b3e35..e33716e9c 100644 --- a/hexrd/convolution/convolve.py +++ b/hexrd/core/convolution/convolve.py @@ -17,7 +17,9 @@ try: _convolve = load_library("_convolve", LIBRARY_PATH) except Exception: - raise ImportError("Convolution C extension is missing. Try re-building astropy.") + raise ImportError( + "Convolution C extension is missing. Try re-building astropy." + ) # The GIL is automatically released by default when calling functions imported # from libraries loaded by ctypes.cdll.LoadLibrary() @@ -26,19 +28,23 @@ # Boundary None _convolveNd_c = _convolve.convolveNd_c _convolveNd_c.restype = None -_convolveNd_c.argtypes = [ndpointer(ctypes.c_double, flags={"C_CONTIGUOUS", "WRITEABLE"}), # return array - ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # input array - ctypes.c_uint, # N dim - # size array for input and result unless - # embed_result_within_padded_region is False, - # in which case the result array is assumed to be - # input.shape - 2*(kernel.shape//2). Note: integer division. - ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), - ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # kernel array - ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), # size array for kernel - ctypes.c_bool, # nan_interpolate - ctypes.c_bool, # embed_result_within_padded_region - ctypes.c_uint] # n_threads +_convolveNd_c.argtypes = [ + ndpointer( + ctypes.c_double, flags={"C_CONTIGUOUS", "WRITEABLE"} + ), # return array + ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # input array + ctypes.c_uint, # N dim + # size array for input and result unless + # embed_result_within_padded_region is False, + # in which case the result array is assumed to be + # input.shape - 2*(kernel.shape//2). Note: integer division. + ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), + ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # kernel array + ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), # size array for kernel + ctypes.c_bool, # nan_interpolate + ctypes.c_bool, # embed_result_within_padded_region + ctypes.c_uint, +] # n_threads # Disabling all doctests in this module until a better way of handling warnings # in doctests can be determined @@ -48,8 +54,15 @@ MAX_NORMALIZATION = 100 -def _copy_input_if_needed(input, dtype=float, order='C', nan_treatment=None, - mask=None, fill_value=None): + +def _copy_input_if_needed( + input, + dtype=float, + order='C', + nan_treatment=None, + mask=None, + fill_value=None, +): # strip quantity attributes if hasattr(input, 'unit'): input = input.value @@ -59,7 +72,11 @@ def _copy_input_if_needed(input, dtype=float, order='C', nan_treatment=None, # Anything that's masked must be turned into NaNs for the interpolation. # This requires copying. A copy is also needed for nan_treatment == 'fill' # A copy prevents possible function side-effects of the input array. - if nan_treatment == 'fill' or np.ma.is_masked(input) or mask is not None: + if ( + nan_treatment == 'fill' + or np.ma.is_masked(input) + or mask is not None + ): if np.ma.is_masked(input): # ``np.ma.maskedarray.filled()`` returns a copy, however there # is no way to specify the return type or order etc. In addition @@ -68,12 +85,16 @@ def _copy_input_if_needed(input, dtype=float, order='C', nan_treatment=None, # ``float`` masked arrays. ``subok=True`` is needed to retain # ``np.ma.maskedarray.filled()``. ``copy=False`` allows the fill # to act as the copy if type and order are already correct. - output = np.array(input, dtype=dtype, copy=False, order=order, subok=True) + output = np.array( + input, dtype=dtype, copy=False, order=order, subok=True + ) output = output.filled(fill_value) else: # Since we're making a copy, we might as well use `subok=False` to save, # what is probably, a negligible amount of memory. - output = np.array(input, dtype=dtype, copy=True, order=order, subok=False) + output = np.array( + input, dtype=dtype, copy=True, order=order, subok=False + ) if mask is not None: # mask != 0 yields a bool mask for all ints/floats/bool @@ -83,16 +104,29 @@ def _copy_input_if_needed(input, dtype=float, order='C', nan_treatment=None, # The advantage of `subok=True` is that it won't copy when array is an ndarray subclass. If it # is and `subok=False` (default), then it will copy even if `copy=False`. This uses less memory # when ndarray subclasses are passed in. - output = np.array(input, dtype=dtype, copy=False, order=order, subok=True) + output = np.array( + input, dtype=dtype, copy=False, order=order, subok=True + ) except (TypeError, ValueError) as e: - raise TypeError('input should be a Numpy array or something ' - 'convertible into a float array', e) + raise TypeError( + 'input should be a Numpy array or something ' + 'convertible into a float array', + e, + ) return output -def convolve(array, kernel, boundary='fill', fill_value=0., - nan_treatment='interpolate', normalize_kernel=True, mask=None, - preserve_nan=False, normalization_zero_tol=1e-8): +def convolve( + array, + kernel, + boundary='fill', + fill_value=0.0, + nan_treatment='interpolate', + normalize_kernel=True, + mask=None, + preserve_nan=False, + normalization_zero_tol=1e-8, +): """ Convolve an array with a kernel. @@ -167,8 +201,11 @@ def convolve(array, kernel, boundary='fill', fill_value=0., """ if boundary not in BOUNDARY_OPTIONS: - raise ValueError("Invalid boundary option: must be one of {}" - .format(BOUNDARY_OPTIONS)) + raise ValueError( + "Invalid boundary option: must be one of {}".format( + BOUNDARY_OPTIONS + ) + ) if nan_treatment not in ('interpolate', 'fill'): raise ValueError("nan_treatment must be one of 'interpolate','fill'") @@ -195,14 +232,24 @@ def convolve(array, kernel, boundary='fill', fill_value=0., # Convert kernel to ndarray if not already # Copy or alias array to array_internal - array_internal = _copy_input_if_needed(passed_array, dtype=float, order='C', - nan_treatment=nan_treatment, mask=mask, - fill_value=np.nan) + array_internal = _copy_input_if_needed( + passed_array, + dtype=float, + order='C', + nan_treatment=nan_treatment, + mask=mask, + fill_value=np.nan, + ) array_dtype = getattr(passed_array, 'dtype', array_internal.dtype) # Copy or alias kernel to kernel_internal - kernel_internal = _copy_input_if_needed(passed_kernel, dtype=float, order='C', - nan_treatment=None, mask=None, - fill_value=fill_value) + kernel_internal = _copy_input_if_needed( + passed_kernel, + dtype=float, + order='C', + nan_treatment=None, + mask=None, + fill_value=fill_value, + ) # Make sure kernel has all odd axes if has_even_axis(kernel_internal): @@ -219,15 +266,18 @@ def convolve(array, kernel, boundary='fill', fill_value=0., if array_internal.ndim == 0: raise Exception("cannot convolve 0-dimensional arrays") elif array_internal.ndim > 3: - raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional ' - 'arrays at this time') + raise NotImplementedError( + 'convolve only supports 1, 2, and 3-dimensional ' + 'arrays at this time' + ) elif array_internal.ndim != kernel_internal.ndim: - raise Exception('array and kernel have differing number of ' - 'dimensions.') + raise Exception( + 'array and kernel have differing number of ' 'dimensions.' + ) array_shape = np.array(array_internal.shape) kernel_shape = np.array(kernel_internal.shape) - pad_width = kernel_shape//2 + pad_width = kernel_shape // 2 # For boundary=None only the center space is convolved. All array indices within a # distance kernel.shape//2 from the edge are completely ignored (zeroed). @@ -239,25 +289,34 @@ def convolve(array, kernel, boundary='fill', fill_value=0., # For odd kernels it is: # array_shape >= kernel_shape OR array_shape > kernel_shape-1 OR array_shape > 2*(kernel_shape//2). # Since the latter is equal to the former two for even lengths, the latter condition is complete. - if boundary is None and not np.all(array_shape > 2*pad_width): - raise KernelSizeError("for boundary=None all kernel axes must be smaller than array's - " - "use boundary in ['fill', 'extend', 'wrap'] instead.") + if boundary is None and not np.all(array_shape > 2 * pad_width): + raise KernelSizeError( + "for boundary=None all kernel axes must be smaller than array's - " + "use boundary in ['fill', 'extend', 'wrap'] instead." + ) # NaN interpolation significantly slows down the C convolution # computation. Since nan_treatment = 'interpolate', is the default # check whether it is even needed, if not, don't interpolate. # NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any() - nan_interpolate = (nan_treatment == 'interpolate') and np.isnan(array_internal.sum()) + nan_interpolate = (nan_treatment == 'interpolate') and np.isnan( + array_internal.sum() + ) # Check if kernel is normalizable if normalize_kernel or nan_interpolate: kernel_sum = kernel_internal.sum() - kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol) - - if kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero: - raise ValueError("The kernel can't be normalized, because its sum is " - "close to zero. The sum of the given kernel is < {}" - .format(1. / MAX_NORMALIZATION)) + kernel_sums_to_zero = np.isclose( + kernel_sum, 0, atol=normalization_zero_tol + ) + + if kernel_sum < 1.0 / MAX_NORMALIZATION or kernel_sums_to_zero: + raise ValueError( + "The kernel can't be normalized, because its sum is " + "close to zero. The sum of the given kernel is < {}".format( + 1.0 / MAX_NORMALIZATION + ) + ) # Mark the NaN values so we can replace them later if interpolate_nan is # not set @@ -276,20 +335,35 @@ def convolve(array, kernel, boundary='fill', fill_value=0., embed_result_within_padded_region = False if boundary == 'fill': # This method is faster than using numpy.pad(..., mode='constant') - array_to_convolve = np.full(array_shape + 2*pad_width, fill_value=fill_value, dtype=float, order='C') + array_to_convolve = np.full( + array_shape + 2 * pad_width, + fill_value=fill_value, + dtype=float, + order='C', + ) # Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of [pad_width[0]:-pad_width[0]] # to account for when the kernel has size of 1 making pad_width = 0. if array_internal.ndim == 1: - array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0]] = array_internal + array_to_convolve[ + pad_width[0] : array_shape[0] + pad_width[0] + ] = array_internal elif array_internal.ndim == 2: - array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0], - pad_width[1]:array_shape[1]+pad_width[1]] = array_internal + array_to_convolve[ + pad_width[0] : array_shape[0] + pad_width[0], + pad_width[1] : array_shape[1] + pad_width[1], + ] = array_internal else: - array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0], - pad_width[1]:array_shape[1]+pad_width[1], - pad_width[2]:array_shape[2]+pad_width[2]] = array_internal + array_to_convolve[ + pad_width[0] : array_shape[0] + pad_width[0], + pad_width[1] : array_shape[1] + pad_width[1], + pad_width[2] : array_shape[2] + pad_width[2], + ] = array_internal else: - np_pad_mode_dict = {'fill': 'constant', 'extend': 'edge', 'wrap': 'wrap'} + np_pad_mode_dict = { + 'fill': 'constant', + 'extend': 'edge', + 'wrap': 'wrap', + } np_pad_mode = np_pad_mode_dict[boundary] pad_width = kernel_shape // 2 @@ -298,18 +372,27 @@ def convolve(array, kernel, boundary='fill', fill_value=0., elif array_internal.ndim == 2: np_pad_width = ((pad_width[0],), (pad_width[1],)) else: - np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],)) - - array_to_convolve = np.pad(array_internal, pad_width=np_pad_width, - mode=np_pad_mode) - - _convolveNd_c(result, array_to_convolve, - array_to_convolve.ndim, - np.array(array_to_convolve.shape, dtype=ctypes.c_size_t, order='C'), - kernel_internal, - np.array(kernel_shape, dtype=ctypes.c_size_t, order='C'), - nan_interpolate, embed_result_within_padded_region, - n_threads) + np_pad_width = ( + (pad_width[0],), + (pad_width[1],), + (pad_width[2],), + ) + + array_to_convolve = np.pad( + array_internal, pad_width=np_pad_width, mode=np_pad_mode + ) + + _convolveNd_c( + result, + array_to_convolve, + array_to_convolve.ndim, + np.array(array_to_convolve.shape, dtype=ctypes.c_size_t, order='C'), + kernel_internal, + np.array(kernel_shape, dtype=ctypes.c_size_t, order='C'), + nan_interpolate, + embed_result_within_padded_region, + n_threads, + ) # So far, normalization has only occured for nan_treatment == 'interpolate' # because this had to happen within the C extension so as to ignore @@ -321,10 +404,12 @@ def convolve(array, kernel, boundary='fill', fill_value=0., result *= kernel_sum if nan_interpolate and not preserve_nan and np.isnan(result.sum()): - warnings.warn("nan_treatment='interpolate', however, NaN values detected " - "post convolution. A contiguous region of NaN values, larger " - "than the kernel size, are present in the input array. " - "Increase the kernel size to avoid this.") + warnings.warn( + "nan_treatment='interpolate', however, NaN values detected " + "post convolution. A contiguous region of NaN values, larger " + "than the kernel size, are present in the input array. " + "Increase the kernel size to avoid this." + ) if preserve_nan: result[initially_nan] = np.nan @@ -339,6 +424,3 @@ def convolve(array, kernel, boundary='fill', fill_value=0., return result.astype(array_dtype) else: return result - - - diff --git a/hexrd/convolution/src/convolve.c b/hexrd/core/convolution/src/convolve.c similarity index 100% rename from hexrd/convolution/src/convolve.c rename to hexrd/core/convolution/src/convolve.c diff --git a/hexrd/convolution/src/convolve.h b/hexrd/core/convolution/src/convolve.h similarity index 100% rename from hexrd/convolution/src/convolve.h rename to hexrd/core/convolution/src/convolve.h diff --git a/hexrd/convolution/utils.py b/hexrd/core/convolution/utils.py similarity index 99% rename from hexrd/convolution/utils.py rename to hexrd/core/convolution/utils.py index 4283c3985..485ad80df 100644 --- a/hexrd/convolution/utils.py +++ b/hexrd/core/convolution/utils.py @@ -3,6 +3,7 @@ import ctypes import numpy as np + class DiscretizationError(Exception): """ Called when discretization of models goes wrong. @@ -23,4 +24,4 @@ def has_even_axis(array): def raise_even_kernel_exception(): - raise KernelSizeError("Kernel size must be odd in all axes.") \ No newline at end of file + raise KernelSizeError("Kernel size must be odd in all axes.") diff --git a/hexrd/deprecation.py b/hexrd/core/deprecation.py similarity index 99% rename from hexrd/deprecation.py rename to hexrd/core/deprecation.py index 0ac51b271..ed9f9fc94 100644 --- a/hexrd/deprecation.py +++ b/hexrd/core/deprecation.py @@ -4,6 +4,7 @@ class DeprecatedFunctionError(Exception): """Custom exception for deprecated functions.""" + pass diff --git a/hexrd/distortion/__init__.py b/hexrd/core/distortion/__init__.py similarity index 99% rename from hexrd/distortion/__init__.py rename to hexrd/core/distortion/__init__.py index 5931e0e7e..d01fff19f 100644 --- a/hexrd/distortion/__init__.py +++ b/hexrd/core/distortion/__init__.py @@ -1,4 +1,5 @@ """Distortion package (python 3)""" + import abc import pkgutil from importlib import import_module diff --git a/hexrd/distortion/dexela_2923.py b/hexrd/core/distortion/dexela_2923.py similarity index 88% rename from hexrd/distortion/dexela_2923.py rename to hexrd/core/distortion/dexela_2923.py index bff2a0cd3..a26407c5d 100644 --- a/hexrd/distortion/dexela_2923.py +++ b/hexrd/core/distortion/dexela_2923.py @@ -7,7 +7,7 @@ import numpy as np import numba -from hexrd import constants +from hexrd.core import constants from .distortionabc import DistortionABC from .registry import _RegisterDistortionClass @@ -38,9 +38,7 @@ def apply(self, xy_in): return xy_in else: xy_out = np.empty_like(xy_in) - _dexela_2923_distortion( - xy_out, xy_in, np.asarray(self.params) - ) + _dexela_2923_distortion(xy_out, xy_in, np.asarray(self.params)) return xy_out def apply_inverse(self, xy_in): @@ -56,9 +54,9 @@ def apply_inverse(self, xy_in): def _find_quadrant(xy_in): quad_label = np.zeros(len(xy_in), dtype=int) - in_2_or_3 = xy_in[:, 0] < 0. + in_2_or_3 = xy_in[:, 0] < 0.0 in_1_or_4 = ~in_2_or_3 - in_3_or_4 = xy_in[:, 1] < 0. + in_3_or_4 = xy_in[:, 1] < 0.0 in_1_or_2 = ~in_3_or_4 quad_label[np.logical_and(in_1_or_4, in_1_or_2)] = 1 quad_label[np.logical_and(in_2_or_3, in_1_or_2)] = 2 @@ -71,15 +69,15 @@ def _find_quadrant(xy_in): def _dexela_2923_distortion(out_, in_, params): for el in range(len(in_)): xi, yi = in_[el, :] - if xi < 0.: - if yi < 0.: + if xi < 0.0: + if yi < 0.0: # 3rd quadrant out_[el, :] = in_[el, :] + params[4:6] else: # 2nd quadrant out_[el, :] = in_[el, :] + params[2:4] else: - if yi < 0.: + if yi < 0.0: # 4th quadrant out_[el, :] = in_[el, :] + params[6:8] else: @@ -91,18 +89,17 @@ def _dexela_2923_distortion(out_, in_, params): def _dexela_2923_inverse_distortion(out_, in_, params): for el in range(len(in_)): xi, yi = in_[el, :] - if xi < 0.: - if yi < 0.: + if xi < 0.0: + if yi < 0.0: # 3rd quadrant out_[el, :] = in_[el, :] - params[4:6] else: # 2nd quadrant out_[el, :] = in_[el, :] - params[2:4] else: - if yi < 0.: + if yi < 0.0: # 4th quadrant out_[el, :] = in_[el, :] - params[6:8] else: # 1st quadrant out_[el, :] = in_[el, :] - params[0:2] - diff --git a/hexrd/distortion/dexela_2923_quad.py b/hexrd/core/distortion/dexela_2923_quad.py similarity index 98% rename from hexrd/distortion/dexela_2923_quad.py rename to hexrd/core/distortion/dexela_2923_quad.py index 6f61f3961..2c9ae55e1 100644 --- a/hexrd/distortion/dexela_2923_quad.py +++ b/hexrd/core/distortion/dexela_2923_quad.py @@ -1,6 +1,5 @@ import numpy as np import numba -from hexrd import constants from .distortionabc import DistortionABC from .registry import _RegisterDistortionClass diff --git a/hexrd/distortion/distortionabc.py b/hexrd/core/distortion/distortionabc.py similarity index 100% rename from hexrd/distortion/distortionabc.py rename to hexrd/core/distortion/distortionabc.py diff --git a/hexrd/distortion/ge_41rt.py b/hexrd/core/distortion/ge_41rt.py similarity index 97% rename from hexrd/distortion/ge_41rt.py rename to hexrd/core/distortion/ge_41rt.py index 8c52aee95..681d2ac66 100644 --- a/hexrd/distortion/ge_41rt.py +++ b/hexrd/core/distortion/ge_41rt.py @@ -1,4 +1,5 @@ """GE41RT Detector Distortion""" + from typing import List import numpy as np @@ -8,8 +9,8 @@ from .registry import _RegisterDistortionClass from .utils import newton -from hexrd import constants as cnst -from hexrd.extensions import inverse_distortion +from hexrd.core import constants as cnst +from hexrd.core.extensions import inverse_distortion RHO_MAX = 204.8 # max radius in mm for ge detector diff --git a/hexrd/distortion/identity.py b/hexrd/core/distortion/identity.py similarity index 99% rename from hexrd/distortion/identity.py rename to hexrd/core/distortion/identity.py index 801387b82..c6bcc8c13 100644 --- a/hexrd/distortion/identity.py +++ b/hexrd/core/distortion/identity.py @@ -2,6 +2,7 @@ Simple class that returns it's input. """ + from .distortionabc import DistortionABC from .registry import _RegisterDistortionClass diff --git a/hexrd/distortion/nyi.py b/hexrd/core/distortion/nyi.py similarity index 99% rename from hexrd/distortion/nyi.py rename to hexrd/core/distortion/nyi.py index bc1421217..4105852cc 100644 --- a/hexrd/distortion/nyi.py +++ b/hexrd/core/distortion/nyi.py @@ -2,6 +2,7 @@ To illustrate error when abstract method is not implemented """ + from .distortionabc import DistortionABC from .registry import _RegisterDistortionClass diff --git a/hexrd/distortion/registry.py b/hexrd/core/distortion/registry.py similarity index 99% rename from hexrd/distortion/registry.py rename to hexrd/core/distortion/registry.py index a36f2f7d0..28dc3a49e 100644 --- a/hexrd/distortion/registry.py +++ b/hexrd/core/distortion/registry.py @@ -1,4 +1,5 @@ """Distortion package (python 3)""" + import abc __all__ = ['maptypes', 'get_mapping'] @@ -13,6 +14,7 @@ def __init__(cls, name, bases, attrs): class Registry(object): """Registry for imageseries adapters""" + distortion_registry = dict() @classmethod diff --git a/hexrd/distortion/utils.py b/hexrd/core/distortion/utils.py old mode 100755 new mode 100644 similarity index 100% rename from hexrd/distortion/utils.py rename to hexrd/core/distortion/utils.py diff --git a/hexrd/core/extensions/__init__.py b/hexrd/core/extensions/__init__.py new file mode 100644 index 000000000..424934712 --- /dev/null +++ b/hexrd/core/extensions/__init__.py @@ -0,0 +1,3 @@ +from . import _new_transforms_capi +from . import _transforms_CAPI +from . import inverse_distortion diff --git a/hexrd/fitting/__init__.py b/hexrd/core/fitting/__init__.py similarity index 94% rename from hexrd/fitting/__init__.py rename to hexrd/core/fitting/__init__.py index 139c20cbd..cb35de12f 100644 --- a/hexrd/fitting/__init__.py +++ b/hexrd/core/fitting/__init__.py @@ -27,7 +27,8 @@ """ Functions for peak fitting """ -from . import grains +# TODO: Resolve extra-workflow dependency +from hexrd.hedm.fitting import grains fitGrain = grains.fitGrain objFuncFitGrain = grains.objFuncFitGrain diff --git a/hexrd/fitting/calibration/__init__.py b/hexrd/core/fitting/calibration/__init__.py similarity index 100% rename from hexrd/fitting/calibration/__init__.py rename to hexrd/core/fitting/calibration/__init__.py index f9a099541..7ad5cd25f 100644 --- a/hexrd/fitting/calibration/__init__.py +++ b/hexrd/core/fitting/calibration/__init__.py @@ -1,9 +1,9 @@ -from .grain import GrainCalibrator from .instrument import InstrumentCalibrator from .laue import LaueCalibrator from .lmfit_param_handling import fix_detector_y from .powder import PowderCalibrator from .structureless import StructurelessCalibrator +from .grain import GrainCalibrator # For backward-compatibility, since it used to be named this: StructureLessCalibrator = StructurelessCalibrator diff --git a/hexrd/fitting/calibration/abstract_grain.py b/hexrd/core/fitting/calibration/abstract_grain.py similarity index 90% rename from hexrd/fitting/calibration/abstract_grain.py rename to hexrd/core/fitting/calibration/abstract_grain.py index dfc5e0262..5ee41ed1b 100644 --- a/hexrd/fitting/calibration/abstract_grain.py +++ b/hexrd/core/fitting/calibration/abstract_grain.py @@ -4,13 +4,10 @@ import lmfit import numpy as np -import hexrd.constants as cnst -from hexrd.rotations import ( - angleAxisOfRotMat, - RotMatEuler, -) -from hexrd.transforms import xfcapi -from hexrd.utils.hkl import hkl_to_str, str_to_hkl +import hexrd.core.constants as cnst +from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler +from hexrd.core.transforms import xfcapi +from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl from .calibrator import Calibrator from .lmfit_param_handling import ( @@ -23,9 +20,15 @@ class AbstractGrainCalibrator(Calibrator): - def __init__(self, instr, material, grain_params, - default_refinements=None, calibration_picks=None, - euler_convention=DEFAULT_EULER_CONVENTION): + def __init__( + self, + instr, + material, + grain_params, + default_refinements=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + ): self.instr = instr self.material = material self.grain_params = grain_params @@ -113,7 +116,12 @@ def grain_params_euler(self, v): # Grain parameters with orientation set using Euler angle convention grain_params = v.copy() if self.euler_convention is not None: - rme = RotMatEuler(np.zeros(3,), **self.euler_convention) + rme = RotMatEuler( + np.zeros( + 3, + ), + **self.euler_convention + ) rme.angles = np.radians(grain_params[:3]) phi, n = angleAxisOfRotMat(rme.rmat) grain_params[:3] = phi * n.flatten() diff --git a/hexrd/fitting/calibration/calibrator.py b/hexrd/core/fitting/calibration/calibrator.py similarity index 100% rename from hexrd/fitting/calibration/calibrator.py rename to hexrd/core/fitting/calibration/calibrator.py diff --git a/hexrd/fitting/calibration/grain.py b/hexrd/core/fitting/calibration/grain.py similarity index 77% rename from hexrd/fitting/calibration/grain.py rename to hexrd/core/fitting/calibration/grain.py index de3f99107..ed8fb4fcf 100644 --- a/hexrd/fitting/calibration/grain.py +++ b/hexrd/core/fitting/calibration/grain.py @@ -2,13 +2,15 @@ import numpy as np -from hexrd import matrixutil as mutil -from hexrd.rotations import angularDifference -from hexrd.transforms import xfcapi -from hexrd import xrdutil +from hexrd.core import matrixutil as mutil +from hexrd.core.rotations import angularDifference +from hexrd.core.transforms import xfcapi +from hexrd.hedm import xrdutil from .abstract_grain import AbstractGrainCalibrator -from .lmfit_param_handling import DEFAULT_EULER_CONVENTION +from .lmfit_param_handling import ( + DEFAULT_EULER_CONVENTION, +) from .. import grains as grainutil logger = logging.getLogger(__name__) @@ -16,14 +18,27 @@ class GrainCalibrator(AbstractGrainCalibrator): """This is for HEDM grain calibration""" + type = 'grain' - def __init__(self, instr, material, grain_params, ome_period, - index=0, default_refinements=None, calibration_picks=None, - euler_convention=DEFAULT_EULER_CONVENTION): + def __init__( + self, + instr, + material, + grain_params, + ome_period, + index=0, + default_refinements=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + ): super().__init__( - instr, material, grain_params, default_refinements, - calibration_picks, euler_convention, + instr, + material, + grain_params, + default_refinements, + calibration_picks, + euler_convention, ) self.ome_period = ome_period self.index = index @@ -59,22 +74,32 @@ def residual(self): pick_hkls_dict, pick_xys_dict = self._evaluate() return sxcal_obj_func( - [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, - self.bmatx, self.ome_period + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.ome_period, ) def model(self): pick_hkls_dict, pick_xys_dict = self._evaluate() return sxcal_obj_func( - [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, - self.bmatx, self.ome_period, sim_only=True + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.ome_period, + sim_only=True, ) # Objective function for multigrain fitting -def sxcal_obj_func(grain_params, instr, xyo_det, hkls_idx, - bmat, ome_period, sim_only=False): +def sxcal_obj_func( + grain_params, instr, xyo_det, hkls_idx, bmat, ome_period, sim_only=False +): ngrains = len(grain_params) # assign some useful params @@ -110,7 +135,7 @@ def sxcal_obj_func(grain_params, instr, xyo_det, hkls_idx, xy_unwarped[det_key].append(xyo[:, :2]) meas_omes[det_key].append(xyo[:, 2]) - if panel.distortion is not None: # do unwarping + if panel.distortion is not None: # do unwarping xy_unwarped[det_key][ig] = panel.distortion.apply( xy_unwarped[det_key][ig] ) @@ -142,18 +167,20 @@ def sxcal_obj_func(grain_params, instr, xyo_det, hkls_idx, chi, rmat_c, bmat, corrected_wavelength, vInv=vinv_s, beamVec=bvec, - omePeriod=ome_period) + omePeriod=ome_period, + ) rmat_s_arr = xfcapi.make_sample_rmat( chi, np.ascontiguousarray(calc_omes_tmp) ) calc_xy_tmp = xfcapi.gvec_to_xy( - ghat_c.T, rmat_d, rmat_s_arr, rmat_c, - tvec_d, tvec_s, tvec_c + ghat_c.T, rmat_d, rmat_s_arr, rmat_c, tvec_d, tvec_s, tvec_c ) if np.any(np.isnan(calc_xy_tmp)): - logger.warning("infeasible parameters: may want to scale back " - "finite difference step size") + logger.warning( + "infeasible parameters: may want to scale back " + "finite difference step size" + ) calc_omes[det_key].append(calc_omes_tmp) calc_xy[det_key].append(calc_xy_tmp) @@ -188,7 +215,6 @@ def sxcal_obj_func(grain_params, instr, xyo_det, hkls_idx, diff_vecs_xy = calc_xy_all - meas_xy_all diff_ome = angularDifference(calc_omes_all, meas_omes_all) retval = np.hstack( - [diff_vecs_xy, - diff_ome.reshape(npts_tot, 1)] + [diff_vecs_xy, diff_ome.reshape(npts_tot, 1)] ).flatten() return retval diff --git a/hexrd/fitting/calibration/instrument.py b/hexrd/core/fitting/calibration/instrument.py similarity index 86% rename from hexrd/fitting/calibration/instrument.py rename to hexrd/core/fitting/calibration/instrument.py index 470d5d50c..baf8b9bb8 100644 --- a/hexrd/fitting/calibration/instrument.py +++ b/hexrd/core/fitting/calibration/instrument.py @@ -26,9 +26,13 @@ def _normalized_ssqr(resd): class InstrumentCalibrator: - def __init__(self, *args, engineering_constraints=None, - euler_convention=DEFAULT_EULER_CONVENTION, - relative_constraints_type=RelativeConstraintsType.none): + def __init__( + self, + *args, + engineering_constraints=None, + euler_convention=DEFAULT_EULER_CONVENTION, + relative_constraints_type=RelativeConstraintsType.none, + ): """ Model for instrument calibration class as a function of @@ -48,17 +52,19 @@ def __init__(self, *args, engineering_constraints=None, assert len(args) > 0, "must have at least one calibrator" self.calibrators = args for calib in self.calibrators: - assert calib.instr is self.instr, \ - "all calibrators must refer to the same instrument" + assert ( + calib.instr is self.instr + ), "all calibrators must refer to the same instrument" self._engineering_constraints = engineering_constraints self._relative_constraints = create_relative_constraints( - relative_constraints_type, self.instr) + relative_constraints_type, self.instr + ) self.euler_convention = euler_convention self.params = self.make_lmfit_params() - self.fitter = lmfit.Minimizer(self.minimizer_function, - self.params, - nan_policy='omit') + self.fitter = lmfit.Minimizer( + self.minimizer_function, self.params, nan_policy='omit' + ) def make_lmfit_params(self): params = create_instr_params( @@ -125,10 +131,9 @@ def minimize(self, method='least_squares', odict=None): result = self.fitter.least_squares(self.params, **odict) else: - result = self.fitter.scalar_minimize(method=method, - params=self.params, - max_nfev=50000, - **odict) + result = self.fitter.scalar_minimize( + method=method, params=self.params, max_nfev=50000, **odict + ) return result @@ -168,7 +173,8 @@ def relative_constraints_type(self, v: Optional[RelativeConstraintsType]): current = getattr(self, '_relative_constraints', None) if current is None or current.type != v: self.relative_constraints = create_relative_constraints( - v, self.instr) + v, self.instr + ) @property def relative_constraints(self) -> RelativeConstraints: @@ -196,7 +202,7 @@ def run_calibration(self, odict): nrm_ssr_1 = _normalized_ssqr(resd1) - delta_r = 1. - nrm_ssr_1/nrm_ssr_0 + delta_r = 1.0 - nrm_ssr_1 / nrm_ssr_0 if delta_r > 0: logger.info('OPTIMIZATION SUCCESSFUL') diff --git a/hexrd/fitting/calibration/laue.py b/hexrd/core/fitting/calibration/laue.py similarity index 69% rename from hexrd/fitting/calibration/laue.py rename to hexrd/core/fitting/calibration/laue.py index 1603fa0bc..855c43727 100644 --- a/hexrd/fitting/calibration/laue.py +++ b/hexrd/core/fitting/calibration/laue.py @@ -8,10 +8,10 @@ from skimage import filters from skimage.feature import blob_log -from hexrd import xrdutil -from hexrd.constants import fwhm_to_sigma -from hexrd.instrument import switch_xray_source -from hexrd.transforms import xfcapi +from hexrd.hedm import xrdutil +from hexrd.core.constants import fwhm_to_sigma +from hexrd.core.instrument import switch_xray_source +from hexrd.core.transforms import xfcapi from .abstract_grain import AbstractGrainCalibrator from .lmfit_param_handling import DEFAULT_EULER_CONVENTION @@ -27,16 +27,29 @@ class LaueCalibrator(AbstractGrainCalibrator): varying energy range rather than a constant energy value. Also, we do not utilize any omega periods. """ + type = 'laue' - def __init__(self, instr, material, grain_params, default_refinements=None, - min_energy=5, max_energy=25, tth_distortion=None, - calibration_picks=None, - euler_convention=DEFAULT_EULER_CONVENTION, - xray_source: Optional[str] = None): + def __init__( + self, + instr, + material, + grain_params, + default_refinements=None, + min_energy=5, + max_energy=25, + tth_distortion=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + xray_source: Optional[str] = None, + ): super().__init__( - instr, material, grain_params, default_refinements, - calibration_picks, euler_convention, + instr, + material, + grain_params, + default_refinements, + calibration_picks, + euler_convention, ) self.energy_cutoffs = [min_energy, max_energy] self.xray_source = xray_source @@ -80,10 +93,20 @@ def energy_cutoffs(self, x): self.plane_data.wavelength = self.energy_cutoffs[-1] self.plane_data.exclusions = None - def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., - npdiv=2, do_smoothing=True, smoothing_sigma=2, - use_blob_detection=True, blob_threshold=0.25, - fit_peaks=True, min_peak_int=1., fit_tth_tol=0.1): + def autopick_points( + self, + raw_img_dict, + tth_tol=5.0, + eta_tol=5.0, + npdiv=2, + do_smoothing=True, + smoothing_sigma=2, + use_blob_detection=True, + blob_threshold=0.25, + fit_peaks=True, + min_peak_int=1.0, + fit_tth_tol=0.1, + ): """ Parameters ---------- @@ -127,13 +150,23 @@ def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., fit_tth_tol=fit_tth_tol, ) - def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., - npdiv=2, do_smoothing=True, smoothing_sigma=2, - use_blob_detection=True, blob_threshold=0.25, - fit_peaks=True, min_peak_int=1., fit_tth_tol=0.1): + def _autopick_points( + self, + raw_img_dict, + tth_tol=5.0, + eta_tol=5.0, + npdiv=2, + do_smoothing=True, + smoothing_sigma=2, + use_blob_detection=True, + blob_threshold=0.25, + fit_peaks=True, + min_peak_int=1.0, + fit_tth_tol=0.1, + ): labelStructure = ndimage.generate_binary_structure(2, 1) rmat_s = np.eye(3) # !!! forcing to identity - omega = 0. # !!! same ^^^ + omega = 0.0 # !!! same ^^^ rmat_c = xfcapi.make_rmat_of_expmap(self.grain_params[:3]) tvec_c = self.grain_params[3:6] @@ -145,7 +178,8 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., self.plane_data, minEnergy=self.energy_cutoffs[0], maxEnergy=self.energy_cutoffs[1], - rmat_s=None, grain_params=np.atleast_2d(self.grain_params), + rmat_s=None, + grain_params=np.atleast_2d(self.grain_params), ) # loop over detectors for results @@ -154,7 +188,7 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., det_config = det.config_dict( chi=self.instr.chi, tvec=self.instr.tvec, - beam_vector=self.instr.beam_vector + beam_vector=self.instr.beam_vector, ) xy_det, hkls, angles, dspacing, energy = laue_sim[det_key] @@ -177,57 +211,66 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., # make patches refl_patches = xrdutil.make_reflection_patches( det_config, - valid_angs, det.angularPixelSize(valid_xy), - rmat_c=rmat_c, tvec_c=tvec_c, - tth_tol=tth_tol, eta_tol=eta_tol, - npdiv=npdiv, quiet=True) + valid_angs, + det.angularPixelSize(valid_xy), + rmat_c=rmat_c, + tvec_c=tvec_c, + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + quiet=True, + ) reflInfoList = [] img = raw_img_dict[det_key] native_area = det.pixel_area num_patches = len(valid_angs) - meas_xy = np.nan*np.ones((num_patches, 2)) - meas_angs = np.nan*np.ones((num_patches, 2)) + meas_xy = np.nan * np.ones((num_patches, 2)) + meas_angs = np.nan * np.ones((num_patches, 2)) for iRefl, patch in enumerate(refl_patches): # check for overrun irow = patch[-1][0] jcol = patch[-1][1] - if np.any([irow < 0, irow >= det.rows, - jcol < 0, jcol >= det.cols]): + if np.any( + [irow < 0, irow >= det.rows, jcol < 0, jcol >= det.cols] + ): continue if not np.all( - det.clip_to_panel( - np.vstack([patch[1][0].flatten(), - patch[1][1].flatten()]).T - )[1] - ): + det.clip_to_panel( + np.vstack( + [patch[1][0].flatten(), patch[1][1].flatten()] + ).T + )[1] + ): continue # use nearest interpolation spot_data = img[irow, jcol] * patch[3] * npdiv**2 / native_area spot_data -= np.amin(spot_data) patch_size = spot_data.shape - sigmax = 0.25*np.min(spot_data.shape) * fwhm_to_sigma + sigmax = 0.25 * np.min(spot_data.shape) * fwhm_to_sigma # optional gaussian smoothing if do_smoothing: spot_data = filters.gaussian(spot_data, smoothing_sigma) if use_blob_detection: - spot_data_scl = 2.*spot_data/np.max(spot_data) - 1. + spot_data_scl = 2.0 * spot_data / np.max(spot_data) - 1.0 # Compute radii in the 3rd column. - blobs_log = blob_log(spot_data_scl, - min_sigma=2, - max_sigma=min(sigmax, 20), - num_sigma=10, - threshold=blob_threshold, - overlap=0.1) + blobs_log = blob_log( + spot_data_scl, + min_sigma=2, + max_sigma=min(sigmax, 20), + num_sigma=10, + threshold=blob_threshold, + overlap=0.1, + ) numPeaks = len(blobs_log) else: labels, numPeaks = ndimage.label( spot_data > np.percentile(spot_data, 99), - structure=labelStructure + structure=labelStructure, ) slabels = np.arange(1, numPeaks + 1) tth_edges = patch[0][0][0, :] @@ -242,11 +285,11 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., coms = np.array( ndimage.center_of_mass( spot_data, labels=labels, index=slabels - ) ) + ) if numPeaks > 1: # - center = np.r_[spot_data.shape]*0.5 + center = np.r_[spot_data.shape] * 0.5 com_diff = coms - np.tile(center, (numPeaks, 1)) closest_peak_idx = np.argmin( np.sum(com_diff**2, axis=1) @@ -258,20 +301,28 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., coms = coms[closest_peak_idx] # if fit_peaks: - sigm = 0.2*np.min(spot_data.shape) + sigm = 0.2 * np.min(spot_data.shape) if use_blob_detection: sigm = min(blobs_log[closest_peak_idx, 2], sigm) y0, x0 = coms.flatten() ampl = float(spot_data[int(y0), int(x0)]) # y0, x0 = 0.5*np.array(spot_data.shape) # ampl = np.max(spot_data) - a_par = c_par = 0.5/float(sigm**2) - b_par = 0. - bgx = bgy = 0. + a_par = c_par = 0.5 / float(sigm**2) + b_par = 0.0 + bgx = bgy = 0.0 bkg = np.min(spot_data) - params = [ampl, - a_par, b_par, c_par, - x0, y0, bgx, bgy, bkg] + params = [ + ampl, + a_par, + b_par, + c_par, + x0, + y0, + bgx, + bgy, + bkg, + ] # result = leastsq(gaussian_2d, params, args=(spot_data)) # @@ -286,24 +337,29 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., row_cen = fit_tth_tol * patch_size[0] col_cen = fit_tth_tol * patch_size[1] if np.any( - [coms[0] < row_cen, - coms[0] >= patch_size[0] - row_cen, - coms[1] < col_cen, - coms[1] >= patch_size[1] - col_cen] + [ + coms[0] < row_cen, + coms[0] >= patch_size[0] - row_cen, + coms[1] < col_cen, + coms[1] >= patch_size[1] - col_cen, + ] ): continue - if (fit_par[0] < min_peak_int): + if fit_par[0] < min_peak_int: continue # intensities spot_intensity, int_err = nquad( gaussian_2d_int, - [[0., 2.*y0], [0., 2.*x0]], - args=fit_par) - com_angs = np.hstack([ - tth_edges[0] + (0.5 + coms[1])*delta_tth, - eta_edges[0] + (0.5 + coms[0])*delta_eta - ]) + [[0.0, 2.0 * y0], [0.0, 2.0 * x0]], + args=fit_par, + ) + com_angs = np.hstack( + [ + tth_edges[0] + (0.5 + coms[1]) * delta_tth, + eta_edges[0] + (0.5 + coms[0]) * delta_eta, + ] + ) # grab intensities if not fit_peaks: @@ -326,12 +382,18 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., cmv, chi=self.instr.chi, rmat_c=rmat_c, - beam_vec=self.instr.beam_vector) + beam_vec=self.instr.beam_vector, + ) new_xy = xfcapi.gvec_to_xy( gvec_c, - det.rmat, rmat_s, rmat_c, - det.tvec, self.instr.tvec, tvec_c, - beam_vec=self.instr.beam_vector) + det.rmat, + rmat_s, + rmat_c, + det.tvec, + self.instr.tvec, + tvec_c, + beam_vec=self.instr.beam_vector, + ) meas_xy[iRefl, :] = new_xy if det.distortion is not None: meas_xy[iRefl, :] = det.distortion.apply_inverse( @@ -343,15 +405,20 @@ def _autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., # spot_intensity = np.nan max_intensity = np.nan - reflInfoList.append([peakId, valid_hkls[:, iRefl], - (spot_intensity, max_intensity), - valid_energy[iRefl], - valid_angs[iRefl, :], - meas_angs[iRefl, :], - meas_xy[iRefl, :]]) + reflInfoList.append( + [ + peakId, + valid_hkls[:, iRefl], + (spot_intensity, max_intensity), + valid_energy[iRefl], + valid_angs[iRefl, :], + meas_angs[iRefl, :], + meas_xy[iRefl, :], + ] + ) reflInfo = np.array( - [tuple(i) for i in reflInfoList], - dtype=reflInfo_dtype) + [tuple(i) for i in reflInfoList], dtype=reflInfo_dtype + ) refl_dict[det_key] = reflInfo # Convert to our data_dict format @@ -402,8 +469,12 @@ def _residual(self): energy_cutoffs = np.r_[0.5, 1.5] * np.asarray(self.energy_cutoffs) return sxcal_obj_func( - [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, - self.bmatx, energy_cutoffs + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + energy_cutoffs, ) def model(self): @@ -415,14 +486,26 @@ def _model(self): pick_hkls_dict, pick_xys_dict = self._evaluate() return sxcal_obj_func( - [self.grain_params], self.instr, pick_xys_dict, pick_hkls_dict, - self.bmatx, self.energy_cutoffs, sim_only=True + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.energy_cutoffs, + sim_only=True, ) # Objective function for Laue fitting -def sxcal_obj_func(grain_params, instr, meas_xy, hkls_idx, - bmat, energy_cutoffs, sim_only=False): +def sxcal_obj_func( + grain_params, + instr, + meas_xy, + hkls_idx, + bmat, + energy_cutoffs, + sim_only=False, +): """ Objective function for Laue-based fitting. @@ -439,9 +522,10 @@ def sxcal_obj_func(grain_params, instr, meas_xy, hkls_idx, # returns xy_det, hkls_in, angles, dspacing, energy sim_results = panel.simulate_laue_pattern( [hkls_idx[det_key], bmat], - minEnergy=energy_cutoffs[0], maxEnergy=energy_cutoffs[1], + minEnergy=energy_cutoffs[0], + maxEnergy=energy_cutoffs[1], grain_params=grain_params, - beam_vec=instr.beam_vector + beam_vec=instr.beam_vector, ) calc_xy_tmp = sim_results[0][0] @@ -469,20 +553,30 @@ def sxcal_obj_func(grain_params, instr, meas_xy, hkls_idx, def gaussian_2d(p, data): shape = data.shape x, y = np.meshgrid(range(shape[1]), range(shape[0])) - func = p[0]*np.exp( - -(p[1]*(x-p[4])*(x-p[4]) - + p[2]*(x-p[4])*(y-p[5]) - + p[3]*(y-p[5])*(y-p[5])) - ) + p[6]*(x-p[4]) + p[7]*(y-p[5]) + p[8] + func = ( + p[0] + * np.exp( + -( + p[1] * (x - p[4]) * (x - p[4]) + + p[2] * (x - p[4]) * (y - p[5]) + + p[3] * (y - p[5]) * (y - p[5]) + ) + ) + + p[6] * (x - p[4]) + + p[7] * (y - p[5]) + + p[8] + ) return func.flatten() - data.flatten() def gaussian_2d_int(y, x, *p): - func = p[0]*np.exp( - -(p[1]*(x-p[4])*(x-p[4]) - + p[2]*(x-p[4])*(y-p[5]) - + p[3]*(y-p[5])*(y-p[5])) + func = p[0] * np.exp( + -( + p[1] * (x - p[4]) * (x - p[4]) + + p[2] * (x - p[4]) * (y - p[5]) + + p[3] * (y - p[5]) * (y - p[5]) ) + ) return func.flatten() diff --git a/hexrd/fitting/calibration/lmfit_param_handling.py b/hexrd/core/fitting/calibration/lmfit_param_handling.py similarity index 86% rename from hexrd/fitting/calibration/lmfit_param_handling.py rename to hexrd/core/fitting/calibration/lmfit_param_handling.py index 77a660c03..3c5bab8d0 100644 --- a/hexrd/fitting/calibration/lmfit_param_handling.py +++ b/hexrd/core/fitting/calibration/lmfit_param_handling.py @@ -3,13 +3,13 @@ import lmfit import numpy as np -from hexrd.instrument import ( +from hexrd.core.instrument import ( calc_angles_from_beam_vec, calc_beam_vec, Detector, HEDMInstrument, ) -from hexrd.rotations import ( +from hexrd.core.rotations import ( angleAxisOfRotMat, expMapOfQuat, make_rmat_euler, @@ -17,8 +17,9 @@ RotMatEuler, rotMatOfExpMap, ) -from hexrd.material.unitcell import _lpname -from .relative_constraints import ( +from hexrd.core.material.unitcell import _lpname +from .relative_constraints import RelativeConstraints, RelativeConstraintsType +from hexrd.core.fitting.calibration.relative_constraints import ( RelativeConstraints, RelativeConstraintsType, ) @@ -30,8 +31,9 @@ EULER_CONVENTION_TYPES = dict | tuple | None -def create_instr_params(instr, euler_convention=DEFAULT_EULER_CONVENTION, - relative_constraints=None): +def create_instr_params( + instr, euler_convention=DEFAULT_EULER_CONVENTION, relative_constraints=None +): # add with tuples: (NAME VALUE VARY MIN MAX EXPR BRUTE_STEP) parms_list = [] @@ -66,16 +68,22 @@ def create_instr_params(instr, euler_convention=DEFAULT_EULER_CONVENTION, f'{base_name}_slope', slope, False, -np.inf, np.inf )) - parms_list.append(('instr_chi', np.degrees(instr.chi), - False, np.degrees(instr.chi)-1, - np.degrees(instr.chi)+1)) + parms_list.append( + ( + 'instr_chi', + np.degrees(instr.chi), + False, + np.degrees(instr.chi) - 1, + np.degrees(instr.chi) + 1, + ) + ) parms_list.append(('instr_tvec_x', instr.tvec[0], False, -np.inf, np.inf)) parms_list.append(('instr_tvec_y', instr.tvec[1], False, -np.inf, np.inf)) parms_list.append(('instr_tvec_z', instr.tvec[2], False, -np.inf, np.inf)) if ( - relative_constraints is None or - relative_constraints.type == RelativeConstraintsType.none + relative_constraints is None + or relative_constraints.type == RelativeConstraintsType.none ): add_unconstrained_detector_parameters( instr, @@ -110,35 +118,51 @@ def add_unconstrained_detector_parameters(instr, euler_convention, parms_list): angle_names = param_names_euler_convention(det, euler_convention) for name, angle in zip(angle_names, angles): - parms_list.append((name, - angle, - False, - angle - 2, - angle + 2)) - - parms_list.append((f'{det}_tvec_x', - panel.tvec[0], - True, - panel.tvec[0]-1, - panel.tvec[0]+1)) - parms_list.append((f'{det}_tvec_y', - panel.tvec[1], - True, - panel.tvec[1]-0.5, - panel.tvec[1]+0.5)) - parms_list.append((f'{det}_tvec_z', - panel.tvec[2], - True, - panel.tvec[2]-1, - panel.tvec[2]+1)) + parms_list.append((name, angle, False, angle - 2, angle + 2)) + + parms_list.append( + ( + f'{det}_tvec_x', + panel.tvec[0], + True, + panel.tvec[0] - 1, + panel.tvec[0] + 1, + ) + ) + parms_list.append( + ( + f'{det}_tvec_y', + panel.tvec[1], + True, + panel.tvec[1] - 0.5, + panel.tvec[1] + 0.5, + ) + ) + parms_list.append( + ( + f'{det}_tvec_z', + panel.tvec[2], + True, + panel.tvec[2] - 1, + panel.tvec[2] + 1, + ) + ) if panel.distortion is not None: p = panel.distortion.params for ii, pp in enumerate(p): - parms_list.append((f'{det}_distortion_param_{ii}', pp, - False, -np.inf, np.inf)) + parms_list.append( + ( + f'{det}_distortion_param_{ii}', + pp, + False, + -np.inf, + np.inf, + ) + ) if panel.detector_type.lower() == 'cylindrical': - parms_list.append((f'{det}_radius', panel.radius, False, - -np.inf, np.inf)) + parms_list.append( + (f'{det}_radius', panel.radius, False, -np.inf, np.inf) + ) def _add_constrained_detector_parameters( @@ -154,7 +178,9 @@ def _add_constrained_detector_parameters( # Convert the tilt to the specified Euler convention normalized = normalize_euler_convention(euler_convention) rme = RotMatEuler( - np.zeros(3,), + np.zeros( + 3, + ), axes_order=normalized[0], extrinsic=normalized[1], ) @@ -254,9 +280,11 @@ def fix_detector_y( def update_instrument_from_params( - instr, params, - euler_convention=DEFAULT_EULER_CONVENTION, - relative_constraints: Optional[RelativeConstraints] = None): + instr, + params, + euler_convention=DEFAULT_EULER_CONVENTION, + relative_constraints: Optional[RelativeConstraints] = None, +): """ this function updates the instrument from the lmfit parameter list. we don't have to keep track @@ -266,8 +294,9 @@ def update_instrument_from_params( implemented. """ if not isinstance(params, lmfit.Parameters): - msg = ('Only lmfit.Parameters is acceptable input. ' - f'Received: {params}') + msg = ( + 'Only lmfit.Parameters is acceptable input. ' f'Received: {params}' + ) raise NotImplementedError(msg) # This supports single XRS or multi XRS @@ -295,14 +324,16 @@ def update_instrument_from_params( chi = np.radians(params['instr_chi'].value) instr.chi = chi - instr_tvec = [params['instr_tvec_x'].value, - params['instr_tvec_y'].value, - params['instr_tvec_z'].value] + instr_tvec = [ + params['instr_tvec_x'].value, + params['instr_tvec_y'].value, + params['instr_tvec_z'].value, + ] instr.tvec = np.r_[instr_tvec] if ( - relative_constraints is None or - relative_constraints.type == RelativeConstraintsType.none + relative_constraints is None + or relative_constraints.type == RelativeConstraintsType.none ): update_unconstrained_detector_parameters( instr, @@ -332,9 +363,11 @@ def update_unconstrained_detector_parameters(instr, params, euler_convention): det = det_name.replace('-', '_') set_detector_angles_euler(detector, det, params, euler_convention) - tvec = np.r_[params[f'{det}_tvec_x'].value, - params[f'{det}_tvec_y'].value, - params[f'{det}_tvec_z'].value] + tvec = np.r_[ + params[f'{det}_tvec_x'].value, + params[f'{det}_tvec_y'].value, + params[f'{det}_tvec_z'].value, + ] detector.tvec = tvec if detector.detector_type.lower() == 'cylindrical': rad = params[f'{det}_radius'].value @@ -364,7 +397,6 @@ def _update_constrained_detector_parameters( euler_convention: EULER_CONVENTION_TYPES, prefix: str, constraint_params: dict, - ): tvec = constraint_params['translation'] tilt = constraint_params['tilt'] @@ -462,8 +494,9 @@ def update_group_constrained_detector_parameters( ) -def _tilt_to_rmat(tilt: np.ndarray, - euler_convention: dict | tuple) -> np.ndarray: +def _tilt_to_rmat( + tilt: np.ndarray, euler_convention: dict | tuple +) -> np.ndarray: # Convert the tilt to exponential map parameters, and then # to the rotation matrix, and return. if euler_convention is None: @@ -502,11 +535,9 @@ def create_tth_parameters( val = np.degrees(np.mean(np.hstack(ds_ang))) - parms_list.append((f'{prefix}{ii}', - val, - True, - val-5., - val+5.)) + parms_list.append( + (f'{prefix}{ii}', val, True, val - 5.0, val + 5.0) + ) return parms_list @@ -597,13 +628,15 @@ def create_grain_params(base_name, grain, refinements=None): parms_list = [] for i, name in enumerate(param_names): - parms_list.append(( - name, - grain[i], - refinements[i], - grain[i] - 2, - grain[i] + 2, - )) + parms_list.append( + ( + name, + grain[i], + refinements[i], + grain[i] - 2, + grain[i] + 2, + ) + ) return parms_list @@ -631,8 +664,9 @@ def add_engineering_constraints(params, engineering_constraints): if engineering_constraints == 'TARDIS': # Since these plates always have opposite signs in y, we can add # their absolute values to get the difference. - dist_plates = (np.abs(params['IMAGE_PLATE_2_tvec_y']) + - np.abs(params['IMAGE_PLATE_4_tvec_y'])) + dist_plates = np.abs(params['IMAGE_PLATE_2_tvec_y']) + np.abs( + params['IMAGE_PLATE_4_tvec_y'] + ) min_dist = 22.83 max_dist = 23.43 @@ -657,11 +691,13 @@ def add_engineering_constraints(params, engineering_constraints): params['IMAGE_PLATE_4_tvec_y'].value + 0.5 * delta ) - params.add('tardis_distance_between_plates', - value=dist_plates, - min=min_dist, - max=max_dist, - vary=True) + params.add( + 'tardis_distance_between_plates', + value=dist_plates, + min=min_dist, + max=max_dist, + vary=True, + ) expr = 'tardis_distance_between_plates - abs(IMAGE_PLATE_2_tvec_y)' params['IMAGE_PLATE_4_tvec_y'].expr = expr @@ -674,7 +710,7 @@ def validate_params_list(params_list): # Make sure there are no duplicate names duplicate_names = [] for i, x in enumerate(params_list): - for y in params_list[i + 1:]: + for y in params_list[i + 1 :]: if x[0] == y[0]: duplicate_names.append(x[0]) @@ -713,7 +749,7 @@ def detector_angles_euler(panel, euler_convention): normalized = normalize_euler_convention(euler_convention) rmat = panel.rmat rme = RotMatEuler( - np.zeros(3,), + np.zeros(3), axes_order=normalized[0], extrinsic=normalized[1], ) diff --git a/hexrd/fitting/calibration/powder.py b/hexrd/core/fitting/calibration/powder.py similarity index 84% rename from hexrd/fitting/calibration/powder.py rename to hexrd/core/fitting/calibration/powder.py index 0f09a25c0..bdb94f53e 100644 --- a/hexrd/fitting/calibration/powder.py +++ b/hexrd/core/fitting/calibration/powder.py @@ -3,9 +3,9 @@ import numpy as np -from hexrd import matrixutil as mutil -from hexrd.instrument import calc_angles_from_beam_vec, switch_xray_source -from hexrd.utils.hkl import hkl_to_str, str_to_hkl +from hexrd.core import matrixutil as mutil +from hexrd.core.instrument import calc_angles_from_beam_vec, switch_xray_source +from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl from .calibrator import Calibrator from .lmfit_param_handling import ( @@ -19,14 +19,26 @@ class PowderCalibrator(Calibrator): type = 'powder' - def __init__(self, instr, material, img_dict, default_refinements=None, - tth_tol=None, eta_tol=0.25, - fwhm_estimate=None, min_pk_sep=1e-3, min_ampl=0., - pktype='pvoigt', bgtype='linear', - tth_distortion=None, calibration_picks=None, - xray_source: Optional[str] = None): - assert list(instr.detectors.keys()) == list(img_dict.keys()), \ - "instrument and image dict must have the same keys" + def __init__( + self, + instr, + material, + img_dict, + default_refinements=None, + tth_tol=None, + eta_tol=0.25, + fwhm_estimate=None, + min_pk_sep=1e-3, + min_ampl=0.0, + pktype='pvoigt', + bgtype='linear', + tth_distortion=None, + calibration_picks=None, + xray_source: Optional[str] = None, + ): + assert list(instr.detectors.keys()) == list( + img_dict.keys() + ), "instrument and image dict must have the same keys" self.instr = instr self.material = material @@ -82,8 +94,9 @@ def _update_tth_distortion_panels(self): def create_lmfit_params(self, current_params): # There shouldn't be more than one calibrator for a given material, so # just assume we have a unique name... - params = create_material_params(self.material, - self.default_refinements) + params = create_material_params( + self.material, self.default_refinements + ) # If multiple powder calibrators were used for the same material (such # as in 2XRS), then don't add params again. @@ -113,11 +126,13 @@ def tth_tol(self, x): @property def spectrum_kwargs(self): - return dict(pktype=self.pktype, - bgtype=self.bgtype, - fwhm_init=self.fwhm_estimate, - min_ampl=self.min_ampl, - min_pk_sep=self.min_pk_sep) + return dict( + pktype=self.pktype, + bgtype=self.bgtype, + fwhm_init=self.fwhm_estimate, + min_ampl=self.min_ampl, + min_pk_sep=self.min_pk_sep, + ) @property def calibration_picks(self): @@ -155,7 +170,7 @@ def calibration_picks(self, v): self.data_dict = data_dict - def autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): + def autopick_points(self, fit_tth_tol=5.0, int_cutoff=1e-4): """ return the RHS for the instrument DOF and image dict @@ -172,7 +187,7 @@ def autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): with switch_xray_source(self.instr, self.xray_source): return self._autopick_points(fit_tth_tol, int_cutoff) - def _autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): + def _autopick_points(self, fit_tth_tol=5.0, int_cutoff=1e-4): # ideal tth dsp_ideal = np.atleast_1d(self.plane_data.getPlaneSpacings()) hkls_ref = self.plane_data.hkls.T @@ -247,13 +262,15 @@ def _autopick_points(self, fit_tth_tol=5., int_cutoff=1e-4): ) # cat results - output = np.hstack([ - xy_meas, - tth_meas.reshape(npeaks, 1), - this_hkl, - this_dsp0.reshape(npeaks, 1), - eta_ref_tile.reshape(npeaks, 1), - ]) + output = np.hstack( + [ + xy_meas, + tth_meas.reshape(npeaks, 1), + this_hkl, + this_dsp0.reshape(npeaks, 1), + eta_ref_tile.reshape(npeaks, 1), + ] + ) ret.append(output) if not ret: @@ -314,18 +331,16 @@ def _evaluate(self, output='residual'): # to (tth, eta) meas_xy = pdata[:, :2] updated_angles, _ = panel.cart_to_angles( - meas_xy, - tvec_s=self.instr.tvec, - apply_distortion=True + meas_xy, tvec_s=self.instr.tvec, apply_distortion=True ) # derive ideal tth positions from additional ring point info hkls = pdata[:, 3:6] gvecs = np.dot(hkls, bmat.T) - dsp0 = 1./np.sqrt(np.sum(gvecs*gvecs, axis=1)) + dsp0 = 1.0 / np.sqrt(np.sum(gvecs * gvecs, axis=1)) # updated reference Bragg angles - tth0 = 2.*np.arcsin(0.5*wlen/dsp0) + tth0 = 2.0 * np.arcsin(0.5 * wlen / dsp0) # !!! get eta from mapped markers rather than ref # eta0 = pdata[:, -1] @@ -349,23 +364,16 @@ def _evaluate(self, output='residual'): # meas_xy.flatten() - calc_xy.flatten() # ) retval = np.append( - retval, - updated_angles[:, 0].flatten() - tth0.flatten() + retval, updated_angles[:, 0].flatten() - tth0.flatten() ) elif output == 'model': calc_xy = panel.angles_to_cart( - tth_eta, - tvec_s=self.instr.tvec, - apply_distortion=True - ) - retval = np.append( - retval, - calc_xy.flatten() + tth_eta, tvec_s=self.instr.tvec, apply_distortion=True ) + retval = np.append(retval, calc_xy.flatten()) else: raise RuntimeError( - "unrecognized output flag '%s'" - % output + "unrecognized output flag '%s'" % output ) return retval diff --git a/hexrd/fitting/calibration/relative_constraints.py b/hexrd/core/fitting/calibration/relative_constraints.py similarity index 96% rename from hexrd/fitting/calibration/relative_constraints.py rename to hexrd/core/fitting/calibration/relative_constraints.py index f094b3212..762719187 100644 --- a/hexrd/fitting/calibration/relative_constraints.py +++ b/hexrd/core/fitting/calibration/relative_constraints.py @@ -3,11 +3,12 @@ import numpy as np -from hexrd.instrument import HEDMInstrument +from hexrd.core.instrument import HEDMInstrument class RelativeConstraintsType(Enum): """These are relative constraints between the detectors""" + # 'none' means no relative constraints none = 'None' # 'group' means constrain tilts/translations within a group @@ -18,6 +19,7 @@ class RelativeConstraintsType(Enum): class RotationCenter(Enum): """These are different centers for relative constraint rotations""" + # Rotate about the mean center of all the detectors instrument_mean_center = 'InstrumentMeanCenter' @@ -164,8 +166,9 @@ def center_of_rotation(self, instr: HEDMInstrument) -> np.ndarray: raise NotImplementedError(self.rotation_center) -def create_relative_constraints(type: RelativeConstraintsType, - instr: HEDMInstrument): +def create_relative_constraints( + type: RelativeConstraintsType, instr: HEDMInstrument +): types = { 'None': RelativeConstraintsNone, 'Group': RelativeConstraintsGroup, diff --git a/hexrd/fitting/calibration/structureless.py b/hexrd/core/fitting/calibration/structureless.py similarity index 85% rename from hexrd/fitting/calibration/structureless.py rename to hexrd/core/fitting/calibration/structureless.py index 21a6c8d98..2fff3d0d3 100644 --- a/hexrd/fitting/calibration/structureless.py +++ b/hexrd/core/fitting/calibration/structureless.py @@ -4,7 +4,7 @@ import lmfit import numpy as np -from hexrd.instrument import switch_xray_source +from hexrd.core.instrument import switch_xray_source from .lmfit_param_handling import ( add_engineering_constraints, @@ -40,20 +40,24 @@ class StructurelessCalibrator: 22.83 mm <= |IMAGE-PLATE-2 tvec[1]| + |IMAGE-PLATE-2 tvec[1]| <= 23.43 mm """ - def __init__(self, - instr, - data, - tth_distortion=None, - engineering_constraints=None, - relative_constraints_type=RelativeConstraintsType.none, - euler_convention=DEFAULT_EULER_CONVENTION): + + def __init__( + self, + instr, + data, + tth_distortion=None, + engineering_constraints=None, + relative_constraints_type=RelativeConstraintsType.none, + euler_convention=DEFAULT_EULER_CONVENTION, + ): self._instr = instr self._data = data self._tth_distortion = tth_distortion self._engineering_constraints = engineering_constraints self._relative_constraints = create_relative_constraints( - relative_constraints_type, self.instr) + relative_constraints_type, self.instr + ) self.euler_convention = euler_convention self._update_tth_distortion_panels() self.make_lmfit_params() @@ -91,10 +95,9 @@ def calc_residual(self, params): prefixes = tth_parameter_prefixes(self.instr) for xray_source in self.data: prefix = prefixes[xray_source] - for ii, (rng, corr_rng) in enumerate(zip( - meas_angles[xray_source], - tth_correction[xray_source] - )): + for ii, (rng, corr_rng) in enumerate( + zip(meas_angles[xray_source], tth_correction[xray_source]) + ): for det_name, panel in self.instr.detectors.items(): if rng[det_name] is None or rng[det_name].size == 0: continue @@ -111,13 +114,11 @@ def calc_residual(self, params): return np.hstack(residual) def set_minimizer(self): - self.fitter = lmfit.Minimizer(self.calc_residual, - self.params, - nan_policy='omit') + self.fitter = lmfit.Minimizer( + self.calc_residual, self.params, nan_policy='omit' + ) - def run_calibration(self, - method='least_squares', - odict=None): + def run_calibration(self, method='least_squares', odict=None): """ odict is the options dictionary """ @@ -137,14 +138,12 @@ def run_calibration(self, } fdict.update(odict) - self.res = self.fitter.least_squares(self.params, - **fdict) + self.res = self.fitter.least_squares(self.params, **fdict) else: fdict = odict - self.res = self.fitter.scalar_minimize(method=method, - params=self.params, - max_nfev=50000, - **fdict) + self.res = self.fitter.scalar_minimize( + method=method, params=self.params, max_nfev=50000, **fdict + ) self.params = self.res.params # res = self.fitter.least_squares(**fdict) @@ -182,7 +181,8 @@ def relative_constraints_type(self, v: Optional[RelativeConstraintsType]): current = getattr(self, '_relative_constraints', None) if current is None or current.type != v: self.relative_constraints = create_relative_constraints( - v, self.instr) + v, self.instr + ) @property def relative_constraints(self) -> RelativeConstraints: @@ -258,9 +258,10 @@ def meas_angles(self) -> dict: panel = self.instr.detectors[det_name] angles, _ = panel.cart_to_angles( - meas_xy, - tvec_s=self.instr.tvec, - apply_distortion=True) + meas_xy, + tvec_s=self.instr.tvec, + apply_distortion=True, + ) ang_dict[det_name] = angles ang_list.append(ang_dict) diff --git a/hexrd/fitting/fitpeak.py b/hexrd/core/fitting/fitpeak.py similarity index 83% rename from hexrd/fitting/fitpeak.py rename to hexrd/core/fitting/fitpeak.py index ff641685a..3d175821b 100644 --- a/hexrd/fitting/fitpeak.py +++ b/hexrd/core/fitting/fitpeak.py @@ -26,15 +26,16 @@ # ============================================================ import numpy as np + # from numpy.polynomial import chebyshev from scipy import integrate from scipy import ndimage as imgproc from scipy import optimize -from hexrd import constants -from hexrd.imageutil import snip1d -from hexrd.fitting import peakfunctions as pkfuncs +from hexrd.core import constants +from hexrd.core.imageutil import snip1d +from hexrd.core.fitting import peakfunctions as pkfuncs import matplotlib.pyplot as plt @@ -51,11 +52,11 @@ # dcs param values # !!! converted from deg^-1 in Von Dreele's paper -alpha0, alpha1, beta0, beta1 = np.r_[14.4, 0., 3.016, -7.94] +alpha0, alpha1, beta0, beta1 = np.r_[14.4, 0.0, 3.016, -7.94] def cnst_fit_obj(x, b): - return np.ones_like(x)*b + return np.ones_like(x) * b def cnst_fit_jac(x, b): @@ -63,7 +64,7 @@ def cnst_fit_jac(x, b): def lin_fit_obj(x, m, b): - return m*np.asarray(x) + b + return m * np.asarray(x) + b def lin_fit_jac(x, m, b): @@ -72,19 +73,19 @@ def lin_fit_jac(x, m, b): def quad_fit_obj(x, a, b, c): x = np.asarray(x) - return a*x**2 + b*x + c + return a * x**2 + b * x + c def quad_fit_jac(x, a, b, c): x = np.asarray(x) - return a*x**2 + b*x + c + return a * x**2 + b * x + c return np.vstack([x**2, x, np.ones_like(x)]).T def _amplitude_guess(x, x0, y, fwhm): - pt_l = np.argmin(np.abs(x - (x0 - 0.5*fwhm))) - pt_h = np.argmin(np.abs(x - (x0 + 0.5*fwhm))) - return np.max(y[pt_l:pt_h + 1]) + pt_l = np.argmin(np.abs(x - (x0 - 0.5 * fwhm))) + pt_h = np.argmin(np.abs(x - (x0 + 0.5 * fwhm))) + return np.max(y[pt_l : pt_h + 1]) # ============================================================================= @@ -119,7 +120,9 @@ def estimate_pk_parms_1d(x, f, pktype='pvoigt'): # handle background # ??? make kernel width a kwarg? - bkg = snip1d(np.atleast_2d(f), w=int(2*npts/3.), max_workers=1).flatten() + bkg = snip1d( + np.atleast_2d(f), w=int(2 * npts / 3.0), max_workers=1 + ).flatten() # fit linear bg and grab params bp, _ = optimize.curve_fit(lin_fit_obj, x, bkg, jac=lin_fit_jac) @@ -134,23 +137,23 @@ def estimate_pk_parms_1d(x, f, pktype='pvoigt'): # fix center index if cen_index > 0 and cen_index < npts - 1: - left_hm = np.argmin(abs(pint[:cen_index] - 0.5*A)) - right_hm = np.argmin(abs(pint[cen_index:] - 0.5*A)) + left_hm = np.argmin(abs(pint[:cen_index] - 0.5 * A)) + right_hm = np.argmin(abs(pint[cen_index:] - 0.5 * A)) elif cen_index == 0: - right_hm = np.argmin(abs(pint[cen_index:] - 0.5*A)) + right_hm = np.argmin(abs(pint[cen_index:] - 0.5 * A)) left_hm = right_hm elif cen_index == npts - 1: - left_hm = np.argmin(abs(pint[:cen_index] - 0.5*A)) + left_hm = np.argmin(abs(pint[:cen_index] - 0.5 * A)) right_hm = left_hm # FWHM estimation try: FWHM = x[cen_index + right_hm] - x[left_hm] - except(IndexError): + except IndexError: FWHM = 0 - if FWHM <= 0 or FWHM > 0.75*npts: + if FWHM <= 0 or FWHM > 0.75 * npts: # something is weird, so punt... - FWHM = 0.25*(x[-1] - x[0]) + FWHM = 0.25 * (x[-1] - x[0]) # set params if pktype in ['gaussian', 'lorentzian']: @@ -195,62 +198,69 @@ def fit_pk_parms_1d(p0, x, f, pktype='pvoigt'): peak type """ - weight = np.max(f)*10. # hard coded should be changed + weight = np.max(f) * 10.0 # hard coded should be changed fitArgs = (x, f, pktype) if pktype == 'gaussian': p, outflag = optimize.leastsq( - fit_pk_obj_1d, p0, - args=fitArgs, Dfun=eval_pk_deriv_1d, - ftol=ftol, xtol=xtol + fit_pk_obj_1d, + p0, + args=fitArgs, + Dfun=eval_pk_deriv_1d, + ftol=ftol, + xtol=xtol, ) elif pktype == 'lorentzian': p, outflag = optimize.leastsq( - fit_pk_obj_1d, p0, - args=fitArgs, Dfun=eval_pk_deriv_1d, - ftol=ftol, xtol=xtol + fit_pk_obj_1d, + p0, + args=fitArgs, + Dfun=eval_pk_deriv_1d, + ftol=ftol, + xtol=xtol, ) elif pktype == 'pvoigt': - lb = [p0[0]*0.5, np.min(x), 0., 0., 0., None] - ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 1., 2.*p0[4], None] + lb = [p0[0] * 0.5, np.min(x), 0.0, 0.0, 0.0, None] + ub = [p0[0] * 2.0, np.max(x), 4.0 * p0[2], 1.0, 2.0 * p0[4], None] fitArgs = (x, f, pktype, weight, lb, ub) p, outflag = optimize.leastsq( - fit_pk_obj_1d_bnded, p0, - args=fitArgs, - ftol=ftol, xtol=xtol + fit_pk_obj_1d_bnded, p0, args=fitArgs, ftol=ftol, xtol=xtol ) elif pktype == 'split_pvoigt': + # fmt: off lb = [p0[0]*0.5, np.min(x), 0., 0., 0., 0., 0., None] ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 4.*p0[2], 1., 1., 2.*p0[4], None] + # fmt: on fitArgs = (x, f, pktype, weight, lb, ub) p, outflag = optimize.leastsq( - fit_pk_obj_1d_bnded, p0, - args=fitArgs, - ftol=ftol, xtol=xtol + fit_pk_obj_1d_bnded, p0, args=fitArgs, ftol=ftol, xtol=xtol ) elif pktype == 'tanh_stepdown': p, outflag = optimize.leastsq( - fit_pk_obj_1d, p0, - args=fitArgs, - ftol=ftol, xtol=xtol) + fit_pk_obj_1d, p0, args=fitArgs, ftol=ftol, xtol=xtol + ) elif pktype == 'dcs_pinkbeam': # !!!: for some reason the 'trf' method was not behaving well, # so switched to 'lm' + # fmt: off lb = np.array([0.0, x.min(), -100., -100., -100., -100., 0., 0., -np.inf, -np.inf, -np.inf]) ub = np.array([np.inf, x.max(), 100., 100., 100., 100., 10., 10., np.inf, np.inf, np.inf]) + # fmt: on res = optimize.least_squares( - fit_pk_obj_1d, p0, + fit_pk_obj_1d, + p0, jac='2-point', # bounds=(), # (lb, ub), method='lm', args=fitArgs, ftol=ftol, - xtol=xtol) + xtol=xtol, + ) p = res['x'] # outflag = res['success'] else: @@ -264,10 +274,7 @@ def fit_pk_parms_1d(p0, x, f, pktype='pvoigt'): return p -def fit_mpk_parms_1d( - p0, x, f0, pktype, num_pks, - bgtype=None, bnds=None - ): +def fit_mpk_parms_1d(p0, x, f0, pktype, num_pks, bgtype=None, bnds=None): """ Fit MULTIPLE 1d analytic functions to diffraction data. @@ -303,23 +310,26 @@ def fit_mpk_parms_1d( if bnds is None: p = optimize.least_squares( - fit_mpk_obj_1d, p0, - args=fitArgs, ftol=ftol, xtol=xtol + fit_mpk_obj_1d, p0, args=fitArgs, ftol=ftol, xtol=xtol ) else: p = optimize.least_squares( - fit_mpk_obj_1d, p0, - bounds=bnds, args=fitArgs, ftol=ftol, xtol=xtol + fit_mpk_obj_1d, p0, bounds=bnds, args=fitArgs, ftol=ftol, xtol=xtol ) return p.x def estimate_mpk_parms_1d( - pk_pos_0, x, f, - pktype='pvoigt', bgtype='linear', - fwhm_guess=None, center_bnd=0.02, - amp_lim_mult=[0.1, 10.], fwhm_lim_mult=[0.5, 2.] - ): + pk_pos_0, + x, + f, + pktype='pvoigt', + bgtype='linear', + fwhm_guess=None, + center_bnd=0.02, + amp_lim_mult=[0.1, 10.0], + fwhm_lim_mult=[0.5, 2.0], +): """ Generate function-specific estimate for multi-peak parameters. @@ -360,21 +370,21 @@ def estimate_mpk_parms_1d( num_pks = len(pk_pos_0) center_bnd = np.atleast_1d(center_bnd) - if(len(center_bnd) < 2): - center_bnd = center_bnd*np.ones(num_pks) + if len(center_bnd) < 2: + center_bnd = center_bnd * np.ones(num_pks) if fwhm_guess is None: - fwhm_guess = (np.max(x) - np.min(x))/(20.*num_pks) + fwhm_guess = (np.max(x) - np.min(x)) / (20.0 * num_pks) fwhm_guess = np.atleast_1d(fwhm_guess) - if(len(fwhm_guess) < 2): - fwhm_guess = fwhm_guess*np.ones(num_pks) + if len(fwhm_guess) < 2: + fwhm_guess = fwhm_guess * np.ones(num_pks) min_val = np.min(f) # estimate background with SNIP1d bkg = snip1d( np.atleast_2d(f), - w=int(np.floor(0.25*len(f))), + w=int(np.floor(0.25 * len(f))), max_workers=1, ).flatten() @@ -414,20 +424,16 @@ def estimate_mpk_parms_1d( amp_guess = _amplitude_guess( x, pk_pos_0[ii], fsubtr, fwhm_guess[ii] ) - p0tmp[ii, :] = [ - amp_guess, - pk_pos_0[ii], - fwhm_guess[ii] - ] + p0tmp[ii, :] = [amp_guess, pk_pos_0[ii], fwhm_guess[ii]] p0tmp_lb[ii, :] = [ - amp_guess*amp_lim_mult[0], + amp_guess * amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[0] + fwhm_guess[ii] * fwhm_lim_mult[0], ] p0tmp_ub[ii, :] = [ - amp_guess*amp_lim_mult[1], + amp_guess * amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[1] + fwhm_guess[ii] * fwhm_lim_mult[1], ] elif pktype == 'pvoigt': # x is just 2theta values @@ -436,23 +442,18 @@ def estimate_mpk_parms_1d( amp_guess = _amplitude_guess( x, pk_pos_0[ii], fsubtr, fwhm_guess[ii] ) - p0tmp[ii, :] = [ - amp_guess, - pk_pos_0[ii], - fwhm_guess[ii], - 0.5 - ] + p0tmp[ii, :] = [amp_guess, pk_pos_0[ii], fwhm_guess[ii], 0.5] p0tmp_lb[ii, :] = [ - amp_guess*amp_lim_mult[0], + amp_guess * amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[0], - 0.0 + fwhm_guess[ii] * fwhm_lim_mult[0], + 0.0, ] p0tmp_ub[ii, :] = [ - (amp_guess - min_val + 1.)*amp_lim_mult[1], + (amp_guess - min_val + 1.0) * amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[1], - 1.0 + fwhm_guess[ii] * fwhm_lim_mult[1], + 1.0, ] elif pktype == 'split_pvoigt': # x is just 2theta values @@ -467,23 +468,23 @@ def estimate_mpk_parms_1d( fwhm_guess[ii], fwhm_guess[ii], 0.5, - 0.5 + 0.5, ] p0tmp_lb[ii, :] = [ - amp_guess*amp_lim_mult[0], + amp_guess * amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[0], - fwhm_guess[ii]*fwhm_lim_mult[0], + fwhm_guess[ii] * fwhm_lim_mult[0], + fwhm_guess[ii] * fwhm_lim_mult[0], + 0.0, 0.0, - 0.0 ] p0tmp_ub[ii, :] = [ - amp_guess*amp_lim_mult[1], + amp_guess * amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], - fwhm_guess[ii]*fwhm_lim_mult[1], - fwhm_guess[ii]*fwhm_lim_mult[1], + fwhm_guess[ii] * fwhm_lim_mult[1], + fwhm_guess[ii] * fwhm_lim_mult[1], + 1.0, 1.0, - 1.0 ] elif pktype == 'pink_beam_dcs': # x is just 2theta values @@ -503,24 +504,24 @@ def estimate_mpk_parms_1d( fwhm_guess[ii], ] p0tmp_lb[ii, :] = [ - amp_guess*amp_lim_mult[0], + amp_guess * amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], -1e5, -1e5, -1e5, -1e5, - fwhm_guess[ii]*fwhm_lim_mult[0], - fwhm_guess[ii]*fwhm_lim_mult[0], + fwhm_guess[ii] * fwhm_lim_mult[0], + fwhm_guess[ii] * fwhm_lim_mult[0], ] p0tmp_ub[ii, :] = [ - amp_guess*amp_lim_mult[1], + amp_guess * amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], 1e5, 1e5, 1e5, 1e5, - fwhm_guess[ii]*fwhm_lim_mult[1], - fwhm_guess[ii]*fwhm_lim_mult[1], + fwhm_guess[ii] * fwhm_lim_mult[1], + fwhm_guess[ii] * fwhm_lim_mult[1], ] num_pk_parms = len(p0tmp.ravel()) @@ -627,10 +628,10 @@ def fit_pk_obj_1d(p, x, f0, pktype): f = pkfuncs.tanh_stepdown_nobg(p, x) elif pktype == 'dcs_pinkbeam': f = pkfuncs.pink_beam_dcs(p, x) - ww = 1./np.sqrt(f0) + ww = 1.0 / np.sqrt(f0) ww[np.isnan(ww)] = 0.0 - resd = (f - f0)*ww + resd = (f - f0) * ww return resd @@ -645,7 +646,7 @@ def fit_pk_obj_1d_bnded(p, x, f0, pktype, weight, lb, ub): f = pkfuncs.split_pvoigt1d(p, x) elif pktype == 'dcs_pinkbeam': f = pkfuncs.pink_beam_dcs(p, x) - ww = 1./np.sqrt(f0) + ww = 1.0 / np.sqrt(f0) ww[np.isnan(ww)] = 0.0 num_data = len(f) @@ -656,8 +657,9 @@ def fit_pk_obj_1d_bnded(p, x, f0, pktype, weight, lb, ub): resd[:num_data] = f - f0 for ii in range(num_parm): if lb[ii] is not None: - resd[num_data + ii] = \ - weight*np.max([-(p[ii] - lb[ii]), 0., (p[ii] - ub[ii])]) + resd[num_data + ii] = weight * np.max( + [-(p[ii] - lb[ii]), 0.0, (p[ii] - ub[ii])] + ) return resd @@ -700,14 +702,16 @@ def estimate_pk_parms_2d(x, y, f, pktype): """ bg0 = np.mean([f[0, 0], f[-1, 0], f[-1, -1], f[0, -1]]) - bg1x = (np.mean([f[-1, -1], f[0, -1]]) - np.mean([f[0, 0], f[-1, 0]])) \ - / (x[0, -1] - x[0, 0]) - bg1y = (np.mean([f[-1, -1], f[-1, 0]]) - np.mean([f[0, 0], f[0, -1]])) \ - / (y[-1, 0] - y[0, 0]) + bg1x = (np.mean([f[-1, -1], f[0, -1]]) - np.mean([f[0, 0], f[-1, 0]])) / ( + x[0, -1] - x[0, 0] + ) + bg1y = (np.mean([f[-1, -1], f[-1, 0]]) - np.mean([f[0, 0], f[0, -1]])) / ( + y[-1, 0] - y[0, 0] + ) fnobg = f - (bg0 + bg1x * x + bg1y * y) - labels, numlabels = imgproc.label(fnobg > 0.5*np.max(fnobg)) + labels, numlabels = imgproc.label(fnobg > 0.5 * np.max(fnobg)) # looks for the largest peak areas = np.zeros(numlabels) @@ -727,10 +731,12 @@ def estimate_pk_parms_2d(x, y, f, pktype): if pktype == 'gaussian': p = [A, x0, y0, FWHMx, FWHMy, bg0, bg1x, bg1y] elif pktype == 'gaussian_rot': - p = [A, x0, y0, FWHMx, FWHMy, 0., bg0, bg1x, bg1y] + p = [A, x0, y0, FWHMx, FWHMy, 0.0, bg0, bg1x, bg1y] elif pktype == 'split_pvoigt_rot': + # fmt: off p = [A, x0, y0, FWHMx, FWHMx, FWHMy, FWHMy, 0.5, 0.5, 0.5, 0.5, 0., bg0, bg1x, bg1y] + # fmt: of p = np.array(p) return p @@ -831,15 +837,15 @@ def goodness_of_fit(f, f0): """ - R = np.sum((f - f0)**2) / np.sum(f0**2) - Rw = np.sum(np.abs(f0 * (f - f0)**2)) / np.sum(np.abs(f0**3)) + R = np.sum((f - f0) ** 2) / np.sum(f0**2) + Rw = np.sum(np.abs(f0 * (f - f0) ** 2)) / np.sum(np.abs(f0**3)) return R, Rw -def direct_pk_analysis(x, f, - remove_bg=True, low_int=1., - edge_pts=3, pts_per_meas=100): +def direct_pk_analysis( + x, f, remove_bg=True, low_int=1.0, edge_pts=3, pts_per_meas=100 +): """ Analyze a single peak that is not well matched to any analytic functions @@ -873,12 +879,12 @@ def direct_pk_analysis(x, f, # subtract background, assumed linear if remove_bg: - bg_data = np.hstack((f[:(edge_pts+1)], f[-edge_pts:])) - bg_pts = np.hstack((x[:(edge_pts+1)], x[-edge_pts:])) + bg_data = np.hstack((f[: (edge_pts + 1)], f[-edge_pts:])) + bg_pts = np.hstack((x[: (edge_pts + 1)], x[-edge_pts:])) bg_parm = np.polyfit(bg_pts, bg_data, 1) - f = f - (bg_parm[0]*x + bg_parm[1]) # pull out high background + f = f - (bg_parm[0] * x + bg_parm[1]) # pull out high background f = f - np.min(f) # set the minimum to 0 @@ -886,7 +892,7 @@ def direct_pk_analysis(x, f, plt.plot(x, f, 'r') # make a fine grid of points - spacing = np.diff(x)[0]/pts_per_meas + spacing = np.diff(x)[0] / pts_per_meas xfine = np.arange(np.min(x), np.max(x) + spacing, spacing) ffine = np.interp(xfine, x, f) @@ -901,16 +907,16 @@ def direct_pk_analysis(x, f, # center of mass calculation # !!! this cutoff value is arbitrary, maybe set higher? - if(total_int < low_int): + if total_int < low_int: com = float('NaN') FWHM = float('NaN') total_int = total_int print('Analysis Failed... Intensity too low') else: - com = np.sum(xfine*ffine)/np.sum(ffine) + com = np.sum(xfine * ffine) / np.sum(ffine) - a = np.abs(ffine[cen_index+1:]-A/2.) - b = np.abs(ffine[:cen_index]-A/2.) + a = np.abs(ffine[cen_index + 1 :] - A / 2.0) + b = np.abs(ffine[:cen_index] - A / 2.0) # this is a check to see if the peak is falling out of the bnds if a.size == 0 or b.size == 0: @@ -962,32 +968,28 @@ def calc_pk_integrated_intensities(p, x, pktype, num_pks): ints = np.zeros(num_pks) if pktype == 'gaussian' or pktype == 'lorentzian': - p_fit = np.reshape(p[:3*num_pks], [num_pks, 3]) + p_fit = np.reshape(p[: 3 * num_pks], [num_pks, 3]) elif pktype == 'pvoigt': - p_fit = np.reshape(p[:4*num_pks], [num_pks, 4]) + p_fit = np.reshape(p[: 4 * num_pks], [num_pks, 4]) elif pktype == 'split_pvoigt': - p_fit = np.reshape(p[:6*num_pks], [num_pks, 6]) + p_fit = np.reshape(p[: 6 * num_pks], [num_pks, 6]) for ii in np.arange(num_pks): if pktype == 'gaussian': ints[ii] = integrate.simpson( - pkfuncs._gaussian1d_no_bg(p_fit[ii], x), - x + pkfuncs._gaussian1d_no_bg(p_fit[ii], x), x ) elif pktype == 'lorentzian': ints[ii] = integrate.simpson( - pkfuncs._lorentzian1d_no_bg(p_fit[ii], x), - x + pkfuncs._lorentzian1d_no_bg(p_fit[ii], x), x ) elif pktype == 'pvoigt': ints[ii] = integrate.simpson( - pkfuncs._pvoigt1d_no_bg(p_fit[ii], x), - x + pkfuncs._pvoigt1d_no_bg(p_fit[ii], x), x ) elif pktype == 'split_pvoigt': ints[ii] = integrate.simpson( - pkfuncs._split_pvoigt1d_no_bg(p_fit[ii], x), - x + pkfuncs._split_pvoigt1d_no_bg(p_fit[ii], x), x ) return ints diff --git a/hexrd/core/fitting/grains.py b/hexrd/core/fitting/grains.py new file mode 100644 index 000000000..034bfe8c0 --- /dev/null +++ b/hexrd/core/fitting/grains.py @@ -0,0 +1,407 @@ +"""Grain fitting functions""" + +import numpy as np + +from scipy import optimize + +from hexrd.core import matrixutil as mutil + +from hexrd.core.transforms import xfcapi +from hexrd.core import constants +from hexrd.core import rotations + +from hexrd.hedm.xrdutil import extract_detector_transformation + +return_value_flag = None + +epsf = np.finfo(float).eps # ~2.2e-16 +sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 + +bVec_ref = constants.beam_vec +eta_ref = constants.eta_vec +vInv_ref = np.r_[1.0, 1.0, 1.0, 0.0, 0.0, 0.0] + + +# for grain parameters +gFlag_ref = np.ones(12, dtype=bool) +gScl_ref = np.ones(12, dtype=bool) + + +def fitGrain( + gFull, + instrument, + reflections_dict, + bMat, + wavelength, + gFlag=gFlag_ref, + gScl=gScl_ref, + omePeriod=None, + factor=0.1, + xtol=sqrt_epsf, + ftol=sqrt_epsf, +): + """ + Perform least-squares optimization of grain parameters. + + Parameters + ---------- + gFull : TYPE + DESCRIPTION. + instrument : TYPE + DESCRIPTION. + reflections_dict : TYPE + DESCRIPTION. + bMat : TYPE + DESCRIPTION. + wavelength : TYPE + DESCRIPTION. + gFlag : TYPE, optional + DESCRIPTION. The default is gFlag_ref. + gScl : TYPE, optional + DESCRIPTION. The default is gScl_ref. + omePeriod : TYPE, optional + DESCRIPTION. The default is None. + factor : TYPE, optional + DESCRIPTION. The default is 0.1. + xtol : TYPE, optional + DESCRIPTION. The default is sqrt_epsf. + ftol : TYPE, optional + DESCRIPTION. The default is sqrt_epsf. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + retval : TYPE + DESCRIPTION. + + """ + # FIXME: will currently fail if omePeriod is specifed + if omePeriod is not None: + # xyo_det[:, 2] = rotations.mapAngle(xyo_det[:, 2], omePeriod) + raise RuntimeError + + gFit = gFull[gFlag] + + fitArgs = ( + gFull, + gFlag, + instrument, + reflections_dict, + bMat, + wavelength, + omePeriod, + ) + results = optimize.leastsq( + objFuncFitGrain, + gFit, + args=fitArgs, + diag=1.0 / gScl[gFlag].flatten(), + factor=0.1, + xtol=xtol, + ftol=ftol, + ) + + gFit_opt = results[0] + + retval = gFull + retval[gFlag] = gFit_opt + return retval + + +def objFuncFitGrain( + gFit, + gFull, + gFlag, + instrument, + reflections_dict, + bMat, + wavelength, + omePeriod, + simOnly=False, + return_value_flag=return_value_flag, +): + """ + Calculate residual between measured and simulated ff-HEDM G-vectors. + + gFull[0] = expMap_c[0] + gFull[1] = expMap_c[1] + gFull[2] = expMap_c[2] + gFull[3] = tVec_c[0] + gFull[4] = tVec_c[1] + gFull[5] = tVec_c[2] + gFull[6] = vInv_MV[0] + gFull[7] = vInv_MV[1] + gFull[8] = vInv_MV[2] + gFull[9] = vInv_MV[3] + gFull[10] = vInv_MV[4] + gFull[11] = vInv_MV[5] + + OLD CALL + objFuncFitGrain(gFit, gFull, gFlag, + detectorParams, + xyo_det, hkls_idx, bMat, wavelength, + bVec, eVec, + dFunc, dParams, + omePeriod, + simOnly=False, return_value_flag=return_value_flag) + + Parameters + ---------- + gFit : TYPE + DESCRIPTION. + gFull : TYPE + DESCRIPTION. + gFlag : TYPE + DESCRIPTION. + instrument : TYPE + DESCRIPTION. + reflections_dict : TYPE + DESCRIPTION. + bMat : TYPE + DESCRIPTION. + wavelength : TYPE + DESCRIPTION. + omePeriod : TYPE + DESCRIPTION. + simOnly : TYPE, optional + DESCRIPTION. The default is False. + return_value_flag : TYPE, optional + DESCRIPTION. The default is return_value_flag. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + retval : TYPE + DESCRIPTION. + + """ + bVec = instrument.beam_vector + eVec = instrument.eta_vector + + # fill out parameters + gFull[gFlag] = gFit + + # map parameters to functional arrays + rMat_c = xfcapi.make_rmat_of_expmap(gFull[:3]) + tVec_c = gFull[3:6].reshape(3, 1) + vInv_s = gFull[6:] + vMat_s = mutil.vecMVToSymm(vInv_s) # NOTE: Inverse of V from F = V * R + + # loop over instrument panels + # CAVEAT: keeping track of key ordering in the "detectors" attribute of + # instrument here because I am not sure if instatiating them using + # dict.fromkeys() preserves the same order if using iteration... + # + calc_omes_dict = dict.fromkeys(instrument.detectors, []) + calc_xy_dict = dict.fromkeys(instrument.detectors) + meas_xyo_all = [] + det_keys_ordered = [] + for det_key, panel in instrument.detectors.items(): + det_keys_ordered.append(det_key) + + rMat_d, tVec_d, chi, tVec_s = extract_detector_transformation( + instrument.detector_parameters[det_key] + ) + + results = reflections_dict[det_key] + if len(results) == 0: + continue + + """ + extract data from results list fields: + refl_id, gvec_id, hkl, sum_int, max_int, pred_ang, meas_ang, meas_xy + + or array from spots tables: + 0:5 ID PID H K L + 5:7 sum(int) max(int) + 7:10 pred tth pred eta pred ome + 10:13 meas tth meas eta meas ome + 13:15 pred X pred Y + 15:17 meas X meas Y + """ + if isinstance(results, list): + # WARNING: hkls and derived vectors below must be columnwise; + # strictly necessary??? change affected APIs instead? + # + hkls = np.atleast_2d(np.vstack([x[2] for x in results])).T + + meas_xyo = np.atleast_2d( + np.vstack([np.r_[x[7], x[6][-1]] for x in results]) + ) + elif isinstance(results, np.ndarray): + hkls = np.atleast_2d(results[:, 2:5]).T + meas_xyo = np.atleast_2d(results[:, [15, 16, 12]]) + + # distortion handling + if panel.distortion is not None: + meas_omes = meas_xyo[:, 2] + xy_unwarped = panel.distortion.apply(meas_xyo[:, :2]) + meas_xyo = np.vstack([xy_unwarped.T, meas_omes]).T + + # append to meas_omes + meas_xyo_all.append(meas_xyo) + + # G-vectors: + # 1. calculate full g-vector components in CRYSTAL frame from B + # 2. rotate into SAMPLE frame and apply stretch + # 3. rotate back into CRYSTAL frame and normalize to unit magnitude + # IDEA: make a function for this sequence of operations with option for + # choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB) + gVec_c = np.dot(bMat, hkls) + gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) + gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s)) + + # !!!: check that this operates on UNWARPED xy + match_omes, calc_omes = matchOmegas( + meas_xyo, + hkls, + chi, + rMat_c, + bMat, + wavelength, + vInv=vInv_s, + beamVec=bVec, + etaVec=eVec, + omePeriod=omePeriod, + ) + + # append to omes dict + calc_omes_dict[det_key] = calc_omes + + # TODO: try Numba implementations + rMat_s = xfcapi.make_sample_rmat(chi, calc_omes) + calc_xy = xfcapi.gvec_to_xy( + gHat_c.T, + rMat_d, + rMat_s, + rMat_c, + tVec_d, + tVec_s, + tVec_c, + beam_vec=bVec, + ) + + # append to xy dict + calc_xy_dict[det_key] = calc_xy + + # stack results to concatenated arrays + calc_omes_all = np.hstack([calc_omes_dict[k] for k in det_keys_ordered]) + tmp = [] + for k in det_keys_ordered: + if calc_xy_dict[k] is not None: + tmp.append(calc_xy_dict[k]) + calc_xy_all = np.vstack(tmp) + meas_xyo_all = np.vstack(meas_xyo_all) + + npts = len(meas_xyo_all) + if np.any(np.isnan(calc_xy)): + raise RuntimeError( + "infeasible pFull: may want to scale" + + "back finite difference step size" + ) + + # return values + if simOnly: + # return simulated values + if return_value_flag in [None, 1]: + retval = np.hstack([calc_xy_all, calc_omes_all.reshape(npts, 1)]) + else: + rd = dict.fromkeys(det_keys_ordered) + for det_key in det_keys_ordered: + rd[det_key] = { + 'calc_xy': calc_xy_dict[det_key], + 'calc_omes': calc_omes_dict[det_key], + } + retval = rd + else: + # return residual vector + # IDEA: try angles instead of xys? + diff_vecs_xy = calc_xy_all - meas_xyo_all[:, :2] + diff_ome = rotations.angularDifference( + calc_omes_all, meas_xyo_all[:, 2] + ) + retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1)]).flatten() + if return_value_flag == 1: + # return scalar sum of squared residuals + retval = sum(abs(retval)) + elif return_value_flag == 2: + # return DOF-normalized chisq + # TODO: check this calculation + denom = 3 * npts - len(gFit) - 1.0 + if denom != 0: + nu_fac = 1.0 / denom + else: + nu_fac = 1.0 + retval = nu_fac * sum(retval**2) + return retval + + +def matchOmegas( + xyo_det, + hkls_idx, + chi, + rMat_c, + bMat, + wavelength, + vInv=vInv_ref, + beamVec=bVec_ref, + etaVec=eta_ref, + omePeriod=None, +): + """ + For a given list of (x, y, ome) points, outputs the index into the results + from oscillAnglesOfHKLs, including the calculated omega values. + """ + # get omegas for rMat_s calculation + if omePeriod is not None: + meas_omes = rotations.mapAngle(xyo_det[:, 2], omePeriod) + else: + meas_omes = xyo_det[:, 2] + + oangs0, oangs1 = xfcapi.oscill_angles_of_hkls( + hkls_idx.T, + chi, + rMat_c, + bMat, + wavelength, + v_inv=vInv, + beam_vec=beamVec, + eta_vec=etaVec, + ) + if np.any(np.isnan(oangs0)): + # debugging + # TODO: remove this + # import pdb + # pdb.set_trace() + nanIdx = np.where(np.isnan(oangs0[:, 0]))[0] + errorString = "Infeasible parameters for hkls:\n" + for i in range(len(nanIdx)): + errorString += "%d %d %d\n" % tuple(hkls_idx[:, nanIdx[i]]) + errorString += "you may need to deselect this hkl family." + raise RuntimeError(errorString) + else: + # CAPI version gives vstacked angles... must be (2, nhkls) + calc_omes = np.vstack([oangs0[:, 2], oangs1[:, 2]]) + if omePeriod is not None: + calc_omes = np.vstack( + [ + rotations.mapAngle(oangs0[:, 2], omePeriod), + rotations.mapAngle(oangs1[:, 2], omePeriod), + ] + ) + # do angular difference + diff_omes = rotations.angularDifference( + np.tile(meas_omes, (2, 1)), calc_omes + ) + match_omes = np.argsort(diff_omes, axis=0) == 0 + calc_omes = calc_omes.T.flatten()[match_omes.T.flatten()] + + return match_omes, calc_omes diff --git a/hexrd/fitting/peakfunctions.py b/hexrd/core/fitting/peakfunctions.py similarity index 81% rename from hexrd/fitting/peakfunctions.py rename to hexrd/core/fitting/peakfunctions.py index 40d74c3dd..08e657029 100644 --- a/hexrd/fitting/peakfunctions.py +++ b/hexrd/core/fitting/peakfunctions.py @@ -28,12 +28,16 @@ import numpy as np from numba import njit import copy -from hexrd import constants -from hexrd.constants import \ - c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp +from hexrd.core import constants +from hexrd.core.constants import ( + c_erf, + cnum_exp1exp, + cden_exp1exp, + c_coeff_exp1exp, +) gauss_width_fact = constants.sigma_to_fwhm -lorentz_width_fact = 2. +lorentz_width_fact = 2.0 # FIXME: we need this for the time being to be able to parse multipeak fitting # results; need to wrap all this up in a class in the future! @@ -42,7 +46,7 @@ 'lorentzian': 3, 'pvoigt': 4, 'split_pvoigt': 6, - 'pink_beam_dcs': 8 + 'pink_beam_dcs': 8, } """ @@ -66,10 +70,12 @@ def erfc(x): a1, a2, a3, a4, a5, p = c_erf # A&S formula 7.1.26 - t = 1.0/(1.0 + p*x) - y = 1. - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x) - erf = sign*y # erf(-x) = -erf(x) - return 1. - erf + t = 1.0 / (1.0 + p * x) + y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp( + -x * x + ) + erf = sign * y # erf(-x) = -erf(x) + return 1.0 - erf """ @@ -84,10 +90,10 @@ def erfc(x): def exp1exp_under1(x): f = np.zeros(x.shape).astype(np.complex128) for i in range(6): - xx = x**(i+1) - f += c_coeff_exp1exp[i]*xx + xx = x ** (i + 1) + f += c_coeff_exp1exp[i] * xx - return (f - np.log(x) - np.euler_gamma)*np.exp(x) + return (f - np.log(x) - np.euler_gamma) * np.exp(x) """ @@ -105,21 +111,21 @@ def exp1exp_over1(x): den = np.zeros(x.shape).astype(np.complex128) for i in range(11): - p = 10-i + p = 10 - i if p != 0: xx = x**p - num += cnum_exp1exp[i]*xx - den += cden_exp1exp[i]*xx + num += cnum_exp1exp[i] * xx + den += cden_exp1exp[i] * xx else: num += cnum_exp1exp[i] den += cden_exp1exp[i] - return (num/den)*(1./x) + return (num / den) * (1.0 / x) @njit(cache=True, nogil=True) def exp1exp(x): - mask = np.sign(x.real)*np.abs(x) > 1. + mask = np.sign(x.real) * np.abs(x) > 1.0 f = np.zeros(x.shape).astype(np.complex128) f[mask] = exp1exp_over1(x[mask]) @@ -127,6 +133,7 @@ def exp1exp(x): return f + # ============================================================================= # 1-D Gaussian Functions # ============================================================================= @@ -145,9 +152,9 @@ def _unit_gaussian(p, x): x0 = p[0] FWHM = p[1] - sigma = FWHM/gauss_width_fact + sigma = FWHM / gauss_width_fact - f = np.exp(-(x - x0)**2/(2.*sigma**2.)) + f = np.exp(-((x - x0) ** 2) / (2.0 * sigma**2.0)) return f @@ -162,7 +169,7 @@ def _gaussian1d_no_bg(p, x): """ A = p[0] - f = A*_unit_gaussian(p[[1, 2]], x) + f = A * _unit_gaussian(p[[1, 2]], x) return f @@ -179,7 +186,7 @@ def gaussian1d(p, x): bg0 = p[3] bg1 = p[4] - f = _gaussian1d_no_bg(p[:3], x) + bg0 + bg1*x + f = _gaussian1d_no_bg(p[:3], x) + bg0 + bg1 * x return f @@ -197,12 +204,15 @@ def _gaussian1d_no_bg_deriv(p, x): x0 = p[1] FWHM = p[2] - sigma = FWHM/gauss_width_fact + sigma = FWHM / gauss_width_fact - dydx0 = _gaussian1d_no_bg(p, x)*((x - x0)/(sigma**2.)) + dydx0 = _gaussian1d_no_bg(p, x) * ((x - x0) / (sigma**2.0)) dydA = _unit_gaussian(p[[1, 2]], x) - dydFWHM = _gaussian1d_no_bg(p, x) \ - * ((x - x0)**2./(sigma**3.))/gauss_width_fact + dydFWHM = ( + _gaussian1d_no_bg(p, x) + * ((x - x0) ** 2.0 / (sigma**3.0)) + / gauss_width_fact + ) d_mat = np.zeros((len(p), len(x))) @@ -225,7 +235,7 @@ def gaussian1d_deriv(p, x): d_mat = np.zeros((len(p), len(x))) d_mat[0:3, :] = _gaussian1d_no_bg_deriv(p[0:3], x) - d_mat[3, :] = 1. + d_mat[3, :] = 1.0 d_mat[4, :] = x return d_mat @@ -247,9 +257,9 @@ def _unit_lorentzian(p, x): x0 = p[0] FWHM = p[1] - gamma = FWHM/lorentz_width_fact + gamma = FWHM / lorentz_width_fact - f = gamma**2 / ((x-x0)**2 + gamma**2) + f = gamma**2 / ((x - x0) ** 2 + gamma**2) return f @@ -264,7 +274,7 @@ def _lorentzian1d_no_bg(p, x): """ A = p[0] - f = A*_unit_lorentzian(p[[1, 2]], x) + f = A * _unit_lorentzian(p[[1, 2]], x) return f @@ -282,7 +292,7 @@ def lorentzian1d(p, x): bg0 = p[3] bg1 = p[4] - f = _lorentzian1d_no_bg(p[:3], x)+bg0+bg1*x + f = _lorentzian1d_no_bg(p[:3], x) + bg0 + bg1 * x return f @@ -300,12 +310,17 @@ def _lorentzian1d_no_bg_deriv(p, x): x0 = p[1] FWHM = p[2] - gamma = FWHM/lorentz_width_fact + gamma = FWHM / lorentz_width_fact - dydx0 = _lorentzian1d_no_bg(p, x)*((2.*(x-x0))/((x-x0)**2 + gamma**2)) + dydx0 = _lorentzian1d_no_bg(p, x) * ( + (2.0 * (x - x0)) / ((x - x0) ** 2 + gamma**2) + ) dydA = _unit_lorentzian(p[[1, 2]], x) - dydFWHM = _lorentzian1d_no_bg(p, x) \ - * ((2.*(x-x0)**2.)/(gamma*((x-x0)**2 + gamma**2)))/lorentz_width_fact + dydFWHM = ( + _lorentzian1d_no_bg(p, x) + * ((2.0 * (x - x0) ** 2.0) / (gamma * ((x - x0) ** 2 + gamma**2))) + / lorentz_width_fact + ) d_mat = np.zeros((len(p), len(x))) d_mat[0, :] = dydA @@ -327,7 +342,7 @@ def lorentzian1d_deriv(p, x): d_mat = np.zeros((len(p), len(x))) d_mat[0:3, :] = _lorentzian1d_no_bg_deriv(p[0:3], x) - d_mat[3, :] = 1. + d_mat[3, :] = 1.0 d_mat[4, :] = x return d_mat @@ -337,6 +352,7 @@ def lorentzian1d_deriv(p, x): # 1-D Psuedo Voigt Functions # ============================================================================= + # Split the unit function so this can be called for 2d and 3d functions def _unit_pvoigt1d(p, x): """ @@ -350,7 +366,7 @@ def _unit_pvoigt1d(p, x): n = p[2] - f = (n*_unit_gaussian(p[:2], x)+(1.-n)*_unit_lorentzian(p[:2], x)) + f = n * _unit_gaussian(p[:2], x) + (1.0 - n) * _unit_lorentzian(p[:2], x) return f @@ -365,7 +381,7 @@ def _pvoigt1d_no_bg(p, x): """ A = p[0] - f = A*_unit_pvoigt1d(p[[1, 2, 3]], x) + f = A * _unit_pvoigt1d(p[[1, 2, 3]], x) return f @@ -382,7 +398,7 @@ def pvoigt1d(p, x): bg0 = p[4] bg1 = p[5] - f = _pvoigt1d_no_bg(p[:4], x) + bg0 + bg1*x + f = _pvoigt1d_no_bg(p[:4], x) + bg0 + bg1 * x return f @@ -391,6 +407,7 @@ def pvoigt1d(p, x): # 1-D Split Psuedo Voigt Functions # ============================================================================= + def _split_pvoigt1d_no_bg(p, x): """ Required Arguments: @@ -413,11 +430,11 @@ def _split_pvoigt1d_no_bg(p, x): # + right = np.where(xr)[0] - f[right] = A*_unit_pvoigt1d(p[[1, 3, 5]], x[right]) + f[right] = A * _unit_pvoigt1d(p[[1, 3, 5]], x[right]) # - left = np.where(xl)[0] - f[left] = A*_unit_pvoigt1d(p[[1, 2, 4]], x[left]) + f[left] = A * _unit_pvoigt1d(p[[1, 2, 4]], x[left]) return f @@ -435,7 +452,7 @@ def split_pvoigt1d(p, x): bg0 = p[6] bg1 = p[7] - f = _split_pvoigt1d_no_bg(p[:6], x) + bg0 + bg1*x + f = _split_pvoigt1d_no_bg(p[:6], x) + bg0 + bg1 * x return f @@ -460,13 +477,13 @@ def split_pvoigt1d(p, x): @njit(cache=True, nogil=True) def _calc_alpha(alpha, x0): a0, a1 = alpha - return (a0 + a1*np.tan(np.radians(0.5*x0))) + return a0 + a1 * np.tan(np.radians(0.5 * x0)) @njit(cache=True, nogil=True) def _calc_beta(beta, x0): b0, b1 = beta - return b0 + b1*np.tan(np.radians(0.5*x0)) + return b0 + b1 * np.tan(np.radians(0.5 * x0)) @njit(cache=True, nogil=True) @@ -481,20 +498,25 @@ def _mixing_factor_pv(fwhm_g, fwhm_l): @DETAILS: calculates the mixing factor eta to best approximate voight peak shapes """ - fwhm = fwhm_g**5 + 2.69269 * fwhm_g**4 * fwhm_l + \ - 2.42843 * fwhm_g**3 * fwhm_l**2 + \ - 4.47163 * fwhm_g**2 * fwhm_l**3 +\ - 0.07842 * fwhm_g * fwhm_l**4 +\ - fwhm_l**5 + fwhm = ( + fwhm_g**5 + + 2.69269 * fwhm_g**4 * fwhm_l + + 2.42843 * fwhm_g**3 * fwhm_l**2 + + 4.47163 * fwhm_g**2 * fwhm_l**3 + + 0.07842 * fwhm_g * fwhm_l**4 + + fwhm_l**5 + ) fwhm = fwhm**0.20 - eta = 1.36603 * (fwhm_l/fwhm) - \ - 0.47719 * (fwhm_l/fwhm)**2 + \ - 0.11116 * (fwhm_l/fwhm)**3 - if eta < 0.: - eta = 0. - elif eta > 1.: - eta = 1. + eta = ( + 1.36603 * (fwhm_l / fwhm) + - 0.47719 * (fwhm_l / fwhm) ** 2 + + 0.11116 * (fwhm_l / fwhm) ** 3 + ) + if eta < 0.0: + eta = 0.0 + elif eta > 1.0: + eta = 1.0 return eta, fwhm @@ -518,15 +540,15 @@ def _gaussian_pink_beam(p, x): del_tth = x - x0 sigsqr = fwhm_g**2 - f1 = alpha*sigsqr + 2.0*del_tth - f2 = beta*sigsqr - 2.0*del_tth - f3 = np.sqrt(2.0)*fwhm_g + f1 = alpha * sigsqr + 2.0 * del_tth + f2 = beta * sigsqr - 2.0 * del_tth + f3 = np.sqrt(2.0) * fwhm_g - u = 0.5*alpha*f1 - v = 0.5*beta*f2 + u = 0.5 * alpha * f1 + v = 0.5 * beta * f2 - y = (f1-del_tth)/f3 - z = (f2+del_tth)/f3 + y = (f1 - del_tth) / f3 + z = (f2 + del_tth) / f3 t1 = erfc(y) t2 = erfc(z) @@ -534,11 +556,11 @@ def _gaussian_pink_beam(p, x): g = np.zeros(x.shape) zmask = np.abs(del_tth) > 5.0 - g[~zmask] = \ - (0.5*(alpha*beta)/(alpha + beta)) * np.exp(u[~zmask])*t1[~zmask] \ - + np.exp(v[~zmask])*t2[~zmask] + g[~zmask] = (0.5 * (alpha * beta) / (alpha + beta)) * np.exp( + u[~zmask] + ) * t1[~zmask] + np.exp(v[~zmask]) * t2[~zmask] mask = np.isnan(g) - g[mask] = 0. + g[mask] = 0.0 g *= A / g.max() return g @@ -562,19 +584,19 @@ def _lorentzian_pink_beam(p, x): del_tth = x - x0 - p = -alpha*del_tth + 1j*0.5*alpha*fwhm_l - q = -beta*del_tth + 1j*0.5*beta*fwhm_l + p = -alpha * del_tth + 1j * 0.5 * alpha * fwhm_l + q = -beta * del_tth + 1j * 0.5 * beta * fwhm_l y = np.zeros(x.shape) f1 = exp1exp(p) f2 = exp1exp(q) - y = -(alpha*beta)/(np.pi*(alpha + beta))*(f1 + f2).imag + y = -(alpha * beta) / (np.pi * (alpha + beta)) * (f1 + f2).imag mask = np.isnan(y) - y[mask] = 0. + y[mask] = 0.0 ymax = y.max() - y *= A/ymax + y *= A / ymax return y @@ -608,7 +630,7 @@ def _pink_beam_dcs_no_bg(p, x): G = _gaussian_pink_beam(p_g, x) L = _lorentzian_pink_beam(p_l, x) - return eta*L + (1. - eta)*G + return eta * L + (1.0 - eta) * G def pink_beam_dcs(p, x): @@ -622,11 +644,12 @@ def pink_beam_dcs(p, x): p has the following 10 parameters p = [A, x0, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l, bkg_c0, bkg_c1] """ - return _pink_beam_dcs_no_bg(p[:-2], x) + p[-2] + p[-1]*x + return _pink_beam_dcs_no_bg(p[:-2], x) + p[-2] + p[-1] * x def pink_beam_dcs_lmfit( - x, A, x0, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l): + x, A, x0, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 10/18/2021 SS 1.0 original @@ -648,7 +671,7 @@ def pink_beam_dcs_lmfit( G = _gaussian_pink_beam(p_g, x) L = _lorentzian_pink_beam(p_l, x) - return eta*L + (1. - eta)*G + return eta * L + (1.0 - eta) * G """ @@ -676,7 +699,7 @@ def tanh_stepdown_nobg(p, x): x0 = p[1] w = p[2] - f = A*(0.5*(1.-np.tanh((x-x0)/w))) + f = A * (0.5 * (1.0 - np.tanh((x - x0) / w))) return f @@ -685,12 +708,13 @@ def tanh_stepdown_nobg(p, x): # 2-D Rotation Coordinate Transform # ============================================================================= + def _2d_coord_transform(theta, x0, y0, x, y): - xprime = np.cos(theta)*x+np.sin(theta)*y - yprime = -np.sin(theta)*x+np.cos(theta)*y + xprime = np.cos(theta) * x + np.sin(theta) * y + yprime = -np.sin(theta) * x + np.cos(theta) * y - x0prime = np.cos(theta)*x0+np.sin(theta)*y0 - y0prime = -np.sin(theta)*x0+np.cos(theta)*y0 + x0prime = np.cos(theta) * x0 + np.sin(theta) * y0 + y0prime = -np.sin(theta) * x0 + np.cos(theta) * y0 return x0prime, y0prime, xprime, yprime @@ -699,6 +723,7 @@ def _2d_coord_transform(theta, x0, y0, x, y): # 2-D Gaussian Function # ============================================================================= + def _gaussian2d_no_bg(p, x, y): """ Required Arguments: @@ -711,7 +736,7 @@ def _gaussian2d_no_bg(p, x, y): """ A = p[0] - f = A*_unit_gaussian(p[[1, 3]], x)*_unit_gaussian(p[[2, 4]], y) + f = A * _unit_gaussian(p[[1, 3]], x) * _unit_gaussian(p[[2, 4]], y) return f @@ -729,7 +754,8 @@ def _gaussian2d_rot_no_bg(p, x, y): theta = p[5] x0prime, y0prime, xprime, yprime = _2d_coord_transform( - theta, p[1], p[2], x, y) + theta, p[1], p[2], x, y + ) # this copy was needed so original parameters set isn't changed newp = copy.copy(p) @@ -757,7 +783,7 @@ def gaussian2d_rot(p, x, y): bg1x = p[7] bg1y = p[8] - f = _gaussian2d_rot_no_bg(p[:6], x, y)+(bg0+bg1x*x+bg1y*y) + f = _gaussian2d_rot_no_bg(p[:6], x, y) + (bg0 + bg1x * x + bg1y * y) return f @@ -776,7 +802,7 @@ def gaussian2d(p, x, y): bg1x = p[6] bg1y = p[7] - f = _gaussian2d_no_bg(p[:5], x, y)+(bg0+bg1x*x+bg1y*y) + f = _gaussian2d_no_bg(p[:5], x, y) + (bg0 + bg1x * x + bg1y * y) return f @@ -784,6 +810,7 @@ def gaussian2d(p, x, y): # 2-D Split Psuedo Voigt Function # ============================================================================= + def _split_pvoigt2d_no_bg(p, x, y): """ Required Arguments: @@ -809,23 +836,35 @@ def _split_pvoigt2d_no_bg(p, x, y): # ++ q1 = np.where(xr & yr) - f[q1] = A*_unit_pvoigt1d(p[[1, 4, 8]], x[q1]) * \ - _unit_pvoigt1d(p[[2, 6, 10]], y[q1]) + f[q1] = ( + A + * _unit_pvoigt1d(p[[1, 4, 8]], x[q1]) + * _unit_pvoigt1d(p[[2, 6, 10]], y[q1]) + ) # +- q2 = np.where(xr & yl) - f[q2] = A*_unit_pvoigt1d(p[[1, 4, 8]], x[q2]) * \ - _unit_pvoigt1d(p[[2, 5, 9]], y[q2]) + f[q2] = ( + A + * _unit_pvoigt1d(p[[1, 4, 8]], x[q2]) + * _unit_pvoigt1d(p[[2, 5, 9]], y[q2]) + ) # -+ q3 = np.where(xl & yr) - f[q3] = A*_unit_pvoigt1d(p[[1, 3, 7]], x[q3]) * \ - _unit_pvoigt1d(p[[2, 6, 10]], y[q3]) + f[q3] = ( + A + * _unit_pvoigt1d(p[[1, 3, 7]], x[q3]) + * _unit_pvoigt1d(p[[2, 6, 10]], y[q3]) + ) # -- q4 = np.where(xl & yl) - f[q4] = A*_unit_pvoigt1d(p[[1, 3, 7]], x[q4]) * \ - _unit_pvoigt1d(p[[2, 5, 9]], y[q4]) + f[q4] = ( + A + * _unit_pvoigt1d(p[[1, 3, 7]], x[q4]) + * _unit_pvoigt1d(p[[2, 5, 9]], y[q4]) + ) return f @@ -844,7 +883,8 @@ def _split_pvoigt2d_rot_no_bg(p, x, y): theta = p[11] x0prime, y0prime, xprime, yprime = _2d_coord_transform( - theta, p[1], p[2], x, y) + theta, p[1], p[2], x, y + ) # this copy was needed so original parameters set isn't changed newp = copy.copy(p) @@ -873,7 +913,7 @@ def split_pvoigt2d_rot(p, x, y): bg1x = p[13] bg1y = p[14] - f = _split_pvoigt2d_rot_no_bg(p[:12], x, y)+(bg0+bg1x*x+bg1y*y) + f = _split_pvoigt2d_rot_no_bg(p[:12], x, y) + (bg0 + bg1x * x + bg1y * y) return f @@ -882,6 +922,7 @@ def split_pvoigt2d_rot(p, x, y): # 3-D Gaussian Function # ============================================================================= + def _gaussian3d_no_bg(p, x, y, z): """ Required Arguments: @@ -895,9 +936,12 @@ def _gaussian3d_no_bg(p, x, y, z): """ A = p[0] - f = A * _unit_gaussian(p[[1, 4]], x) \ - * _unit_gaussian(p[[2, 5]], y) \ + f = ( + A + * _unit_gaussian(p[[1, 4]], x) + * _unit_gaussian(p[[2, 5]], y) * _unit_gaussian(p[[3, 6]], z) + ) return f @@ -918,7 +962,7 @@ def gaussian3d(p, x, y, z): bg1y = p[9] bg1z = p[10] - f = _gaussian3d_no_bg(p[:5], x, y)+(bg0+bg1x*x+bg1y*y+bg1z*z) + f = _gaussian3d_no_bg(p[:5], x, y) + (bg0 + bg1x * x + bg1y * y + bg1z * z) return f @@ -926,6 +970,7 @@ def gaussian3d(p, x, y, z): # Mutlipeak # ============================================================================= + def _mpeak_1d_no_bg(p, x, pktype, num_pks): """ Required Arguments: @@ -948,7 +993,7 @@ def _mpeak_1d_no_bg(p, x, pktype, num_pks): npp = mpeak_nparams_dict[pktype] - p_fit = np.reshape(p[:npp*num_pks], [num_pks, npp]) + p_fit = np.reshape(p[: npp * num_pks], [num_pks, npp]) for ii in np.arange(num_pks): if pktype == 'gaussian': @@ -987,10 +1032,12 @@ def mpeak_1d(p, x, pktype, num_pks, bgtype=None): f = _mpeak_1d_no_bg(p, x, pktype, num_pks) if bgtype == 'linear': - f = f+p[-2]+p[-1]*x # c0=p[-2], c1=p[-1] + f = f + p[-2] + p[-1] * x # c0=p[-2], c1=p[-1] elif bgtype == 'constant': - f = f+p[-1] # c0=p[-1] + f = f + p[-1] # c0=p[-1] elif bgtype == 'quadratic': - f = f+p[-3]+p[-2]*x+p[-1]*x**2 # c0=p[-3], c1=p[-2], c2=p[-1], + f = ( + f + p[-3] + p[-2] * x + p[-1] * x**2 + ) # c0=p[-3], c1=p[-2], c2=p[-1], return f diff --git a/hexrd/fitting/spectrum.py b/hexrd/core/fitting/spectrum.py similarity index 79% rename from hexrd/fitting/spectrum.py rename to hexrd/core/fitting/spectrum.py index 7e6d60f75..264f9f9e4 100644 --- a/hexrd/fitting/spectrum.py +++ b/hexrd/core/fitting/spectrum.py @@ -3,20 +3,23 @@ from lmfit import Model, Parameters -from hexrd.constants import fwhm_to_sigma -from hexrd.imageutil import snip1d - -from .utils import (_calc_alpha, _calc_beta, - _mixing_factor_pv, - _gaussian_pink_beam, - _lorentzian_pink_beam, - _parameter_arg_constructor, - _extract_parameters_by_name, - _set_bound_constraints, - _set_refinement_by_name, - _set_width_mixing_bounds, - _set_equality_constraints, - _set_peak_center_bounds) +from hexrd.core.constants import fwhm_to_sigma +from hexrd.core.imageutil import snip1d + +from .utils import ( + _calc_alpha, + _calc_beta, + _mixing_factor_pv, + _gaussian_pink_beam, + _lorentzian_pink_beam, + _parameter_arg_constructor, + _extract_parameters_by_name, + _set_bound_constraints, + _set_refinement_by_name, + _set_width_mixing_bounds, + _set_equality_constraints, + _set_peak_center_bounds, +) # ============================================================================= # PARAMETERS @@ -27,16 +30,22 @@ 'lorentzian': ['amp', 'cen', 'fwhm'], 'pvoigt': ['amp', 'cen', 'fwhm', 'mixing'], 'split_pvoigt': ['amp', 'cen', 'fwhm_l', 'fwhm_h', 'mixing_l', 'mixing_h'], - 'pink_beam_dcs': ['amp', 'cen', - 'alpha0', 'alpha1', - 'beta0', 'beta1', - 'fwhm_g', 'fwhm_l'], + 'pink_beam_dcs': [ + 'amp', + 'cen', + 'alpha0', + 'alpha1', + 'beta0', + 'beta1', + 'fwhm_g', + 'fwhm_l', + ], 'constant': ['c0'], 'linear': ['c0', 'c1'], 'quadratic': ['c0', 'c1', 'c2'], 'cubic': ['c0', 'c1', 'c2', 'c3'], 'quartic': ['c0', 'c1', 'c2', 'c3', 'c4'], - 'quintic': ['c0', 'c1', 'c2', 'c3', 'c4', 'c5'] + 'quintic': ['c0', 'c1', 'c2', 'c3', 'c4', 'c5'], } num_func_params = dict.fromkeys(_function_dict_1d) @@ -69,25 +78,19 @@ def constant_bkg(x, c0): def linear_bkg(x, c0, c1): # return c0 + c1*x - cheb_cls = chebyshev.Chebyshev( - [c0, c1], domain=(min(x), max(x)) - ) + cheb_cls = chebyshev.Chebyshev([c0, c1], domain=(min(x), max(x))) return cheb_cls(x) def quadratic_bkg(x, c0, c1, c2): # return c0 + c1*x + c2*x**2 - cheb_cls = chebyshev.Chebyshev( - [c0, c1, c2], domain=(min(x), max(x)) - ) + cheb_cls = chebyshev.Chebyshev([c0, c1, c2], domain=(min(x), max(x))) return cheb_cls(x) def cubic_bkg(x, c0, c1, c2, c3): # return c0 + c1*x + c2*x**2 + c3*x**3 - cheb_cls = chebyshev.Chebyshev( - [c0, c1, c2, c3], domain=(min(x), max(x)) - ) + cheb_cls = chebyshev.Chebyshev([c0, c1, c2, c3], domain=(min(x), max(x))) return cheb_cls(x) @@ -113,24 +116,27 @@ def chebyshev_bkg(x, *args): def gaussian_1d(x, amp, cen, fwhm): - return amp * np.exp(-(x - cen)**2 / (2*(fwhm_to_sigma*fwhm)**2)) + return amp * np.exp(-((x - cen) ** 2) / (2 * (fwhm_to_sigma * fwhm) ** 2)) def lorentzian_1d(x, amp, cen, fwhm): - return amp * (0.5*fwhm)**2 / ((x - cen)**2 + (0.5*fwhm)**2) + return amp * (0.5 * fwhm) ** 2 / ((x - cen) ** 2 + (0.5 * fwhm) ** 2) def pvoigt_1d(x, amp, cen, fwhm, mixing): - return mixing*gaussian_1d(x, amp, cen, fwhm) \ - + (1 - mixing)*lorentzian_1d(x, amp, cen, fwhm) + return mixing * gaussian_1d(x, amp, cen, fwhm) + ( + 1 - mixing + ) * lorentzian_1d(x, amp, cen, fwhm) def split_pvoigt_1d(x, amp, cen, fwhm_l, fwhm_h, mixing_l, mixing_h): idx_l = x <= cen idx_h = x > cen return np.concatenate( - [pvoigt_1d(x[idx_l], amp, cen, fwhm_l, mixing_l), - pvoigt_1d(x[idx_h], amp, cen, fwhm_h, mixing_h)] + [ + pvoigt_1d(x[idx_l], amp, cen, fwhm_l, mixing_l), + pvoigt_1d(x[idx_h], amp, cen, fwhm_h, mixing_h), + ] ) @@ -156,18 +162,24 @@ def pink_beam_dcs(x, amp, cen, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l): G = _gaussian_pink_beam(p_g, x) L = _lorentzian_pink_beam(p_l, x) - return eta*L + (1. - eta)*G + return eta * L + (1.0 - eta) * G def _amplitude_guess(x, x0, y, fwhm): - pt_l = np.argmin(np.abs(x - (x0 - 0.5*fwhm))) - pt_h = np.argmin(np.abs(x - (x0 + 0.5*fwhm))) - return np.max(y[pt_l:pt_h + 1]) - - -def _initial_guess(peak_positions, x, f, - pktype='pvoigt', bgtype='linear', - fwhm_guess=None, min_ampl=0.): + pt_l = np.argmin(np.abs(x - (x0 - 0.5 * fwhm))) + pt_h = np.argmin(np.abs(x - (x0 + 0.5 * fwhm))) + return np.max(y[pt_l : pt_h + 1]) + + +def _initial_guess( + peak_positions, + x, + f, + pktype='pvoigt', + bgtype='linear', + fwhm_guess=None, + min_ampl=0.0, +): """ Generate function-specific estimate for multi-peak parameters. @@ -197,22 +209,20 @@ def _initial_guess(peak_positions, x, f, num_pks = len(peak_positions) if fwhm_guess is None: - fwhm_guess = (np.max(x) - np.min(x))/(20.*num_pks) + fwhm_guess = (np.max(x) - np.min(x)) / (20.0 * num_pks) fwhm_guess = np.atleast_1d(fwhm_guess) - if(len(fwhm_guess) < 2): - fwhm_guess = fwhm_guess*np.ones(num_pks) + if len(fwhm_guess) < 2: + fwhm_guess = fwhm_guess * np.ones(num_pks) # estimate background with snip1d # !!! using a window size based on abcissa bkg = snip1d( np.atleast_2d(f), - w=int(np.floor(len(f)/num_pks/2.)), + w=int(np.floor(len(f) / num_pks / 2.0)), max_workers=1, ).flatten() - bkg_mod = chebyshev.Chebyshev( - [0., 0.], domain=(min(x), max(x)) - ) + bkg_mod = chebyshev.Chebyshev([0.0, 0.0], domain=(min(x), max(x))) fit_bkg = bkg_mod.fit(x, bkg, 1) coeff = fit_bkg.coef @@ -237,7 +247,7 @@ def _initial_guess(peak_positions, x, f, pkparams[ii, :] = [ max(amp_guess, min_ampl), peak_positions[ii], - fwhm_guess[ii] + fwhm_guess[ii], ] elif pktype == 'pvoigt': # x is just 2theta values @@ -250,7 +260,7 @@ def _initial_guess(peak_positions, x, f, max(amp_guess, min_ampl), peak_positions[ii], fwhm_guess[ii], - 0.5 + 0.5, ] elif pktype == 'split_pvoigt': # x is just 2theta values @@ -265,7 +275,7 @@ def _initial_guess(peak_positions, x, f, fwhm_guess[ii], fwhm_guess[ii], 0.5, - 0.5 + 0.5, ] elif pktype == 'pink_beam_dcs': # x is just 2theta values @@ -292,6 +302,7 @@ def _initial_guess(peak_positions, x, f, return np.hstack([pkparams.flatten(), bgparams]) + # ============================================================================= # MODELS # ============================================================================= @@ -334,9 +345,16 @@ def _build_composite_model(npeaks=1, pktype='gaussian', bgtype='linear'): class SpectrumModel(object): - def __init__(self, data, peak_centers, - pktype='pvoigt', bgtype='linear', - fwhm_init=None, min_ampl=1e-4, min_pk_sep=pk_sep_min): + def __init__( + self, + data, + peak_centers, + pktype='pvoigt', + bgtype='linear', + fwhm_init=None, + min_ampl=1e-4, + min_pk_sep=pk_sep_min, + ): """ Instantiates spectrum model. @@ -362,10 +380,12 @@ def __init__(self, data, peak_centers, """ # peak and background spec - assert pktype in _function_dict_1d.keys(), \ + assert pktype in _function_dict_1d.keys(), ( "peak type '%s' not recognized" % pktype - assert bgtype in _function_dict_1d.keys(), \ + ) + assert bgtype in _function_dict_1d.keys(), ( "background type '%s' not recognized" % bgtype + ) self._pktype = pktype self._bgtype = bgtype @@ -374,10 +394,12 @@ def __init__(self, data, peak_centers, # spectrum data data = np.atleast_2d(data) - assert data.shape[1] == 2, \ - "data must be [[tth_0, int_0], ..., [tth_N, int_N]" - assert len(data > 10), \ - "check your input spectrum; you provided fewer than 10 points." + assert ( + data.shape[1] == 2 + ), "data must be [[tth_0, int_0], ..., [tth_N, int_N]" + assert len( + data > 10 + ), "check your input spectrum; you provided fewer than 10 points." self._data = data xdata, ydata = data.T @@ -388,7 +410,7 @@ def __init__(self, data, peak_centers, num_peaks = len(peak_centers) if fwhm_init is None: - fwhm_init = np.diff(window_range)/(20.*num_peaks) + fwhm_init = np.diff(window_range) / (20.0 * num_peaks) self._min_pk_sep = min_pk_sep @@ -399,9 +421,13 @@ def __init__(self, data, peak_centers, self._model = spectrum_model p0 = _initial_guess( - self._tth0, xdata, ydata, - pktype=self._pktype, bgtype=self._bgtype, - fwhm_guess=fwhm_init, min_ampl=min_ampl + self._tth0, + xdata, + ydata, + pktype=self._pktype, + bgtype=self._bgtype, + fwhm_guess=fwhm_init, + min_ampl=min_ampl, ) psplit = num_func_params[bgtype] p0_pks = np.reshape(p0[:-psplit], (num_peaks, num_func_params[pktype])) @@ -421,10 +447,10 @@ def __init__(self, data, peak_centers, _set_width_mixing_bounds( initial_params_pks, min_w=fwhm_min, - max_w=0.9*float(np.diff(window_range)) + max_w=0.9 * float(np.diff(window_range)), ) _set_bound_constraints( - initial_params_pks, 'amp', min_val=min_ampl, max_val=1.5*ymax + initial_params_pks, 'amp', min_val=min_ampl, max_val=1.5 * ymax ) _set_peak_center_bounds( initial_params_pks, window_range, min_sep=min_pk_sep @@ -436,8 +462,10 @@ def __init__(self, data, peak_centers, _set_refinement_by_name(initial_params_pks, 'beta', vary=False) _set_equality_constraints( initial_params_pks, - zip(_extract_parameters_by_name(initial_params_pks, 'fwhm_g'), - _extract_parameters_by_name(initial_params_pks, 'fwhm_l')) + zip( + _extract_parameters_by_name(initial_params_pks, 'fwhm_g'), + _extract_parameters_by_name(initial_params_pks, 'fwhm_l'), + ), ) elif pktype == 'split_pvoigt': mparams = _extract_parameters_by_name( @@ -445,22 +473,21 @@ def __init__(self, data, peak_centers, ) for mp in mparams[1:]: _set_equality_constraints( - initial_params_pks, ((mp, mparams[0]), ) + initial_params_pks, ((mp, mparams[0]),) ) mparams = _extract_parameters_by_name( initial_params_pks, 'mixing_h' ) for mp in mparams[1:]: _set_equality_constraints( - initial_params_pks, ((mp, mparams[0]), ) + initial_params_pks, ((mp, mparams[0]),) ) # background initial_params_bkg = Parameters() initial_params_bkg.add_many( *_parameter_arg_constructor( - dict(zip(master_keys_bkg, p0_bkg)), - param_hints_DFLT + dict(zip(master_keys_bkg, p0_bkg)), param_hints_DFLT ) ) @@ -523,28 +550,27 @@ def fit(self): _set_refinement_by_name(new_p, 'beta', vary=True) _set_equality_constraints(new_p, 'alpha') _set_equality_constraints(new_p, 'beta') - _set_bound_constraints( - new_p, 'alpha', min_val=-10, max_val=30 - ) - _set_bound_constraints( - new_p, 'beta', min_val=-10, max_val=30 - ) + _set_bound_constraints(new_p, 'alpha', min_val=-10, max_val=30) + _set_bound_constraints(new_p, 'beta', min_val=-10, max_val=30) _set_width_mixing_bounds( new_p, min_w=fwhm_min, - max_w=0.9*float(np.diff(window_range)) + max_w=0.9 * float(np.diff(window_range)), ) # !!! not sure on this, but it seems # to give more stable results with many peaks _set_equality_constraints( new_p, - zip(_extract_parameters_by_name(new_p, 'fwhm_g'), - _extract_parameters_by_name(new_p, 'fwhm_l')) + zip( + _extract_parameters_by_name(new_p, 'fwhm_g'), + _extract_parameters_by_name(new_p, 'fwhm_l'), + ), ) try: - _set_peak_center_bounds(new_p, window_range, - min_sep=self.min_pk_sep) - except(RuntimeError): + _set_peak_center_bounds( + new_p, window_range, min_sep=self.min_pk_sep + ) + except RuntimeError: return res0 # refit diff --git a/hexrd/fitting/utils.py b/hexrd/core/fitting/utils.py similarity index 73% rename from hexrd/fitting/utils.py rename to hexrd/core/fitting/utils.py index 47f72c953..52995a616 100644 --- a/hexrd/fitting/utils.py +++ b/hexrd/core/fitting/utils.py @@ -3,10 +3,13 @@ import numpy as np from numba import njit -from hexrd.constants import ( - c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp +from hexrd.core.constants import ( + c_erf, + cnum_exp1exp, + cden_exp1exp, + c_coeff_exp1exp, ) -from hexrd.matrixutil import uniqueVectors +from hexrd.core.matrixutil import uniqueVectors # ============================================================================= @@ -42,23 +45,27 @@ def _set_equality_constraints(params, pname_spec): raise RuntimeWarning("Only 1 parameter found; exiting") else: for name_pair in pname_spec: - assert len(name_pair) == 2, \ - "entries in name spec must be 2-tuples" + assert len(name_pair) == 2, "entries in name spec must be 2-tuples" params[name_pair[0]].expr = name_pair[1] -def _set_bound_constraints(params, pname_spec, - min_val=-np.inf, max_val=np.inf, - box=None, percentage=False): +def _set_bound_constraints( + params, + pname_spec, + min_val=-np.inf, + max_val=np.inf, + box=None, + percentage=False, +): target_pnames = _extract_parameters_by_name(params, pname_spec) for pname in target_pnames: if box is None: params[pname].min = min_val params[pname].max = max_val else: - hval = 0.5*box + hval = 0.5 * box if percentage: - hval = 0.5*abs(params[pname].value*(box/100.)) + hval = 0.5 * abs(params[pname].value * (box / 100.0)) params[pname].min = params[pname].value - hval params[pname].max = params[pname].value + hval @@ -69,8 +76,8 @@ def _set_width_mixing_bounds(params, min_w=0.01, max_w=np.inf): param.min = min_w param.max = max_w if 'mixing' in pname: - param.min = 0. - param.max = 1. + param.min = 0.0 + param.max = 1.0 def _set_peak_center_bounds(params, window_range, min_sep=0.01): @@ -109,11 +116,13 @@ def _set_peak_center_bounds(params, window_range, min_sep=0.01): for ip, pname in enumerate(sorted_pnames[1:]): curr_peak = params[pname] new_pname = 'pksep%d' % ip - params.add(name=new_pname, - value=curr_peak.value - prev_peak.value, - min=min_sep, - max=window_range[1] - window_range[0], - vary=True) + params.add( + name=new_pname, + value=curr_peak.value - prev_peak.value, + min=min_sep, + max=window_range[1] - window_range[0], + vary=True, + ) curr_peak.expr = '+'.join([prev_peak.name, new_pname]) prev_peak = curr_peak else: @@ -148,10 +157,12 @@ def erfc(x): a1, a2, a3, a4, a5, p = c_erf # A&S formula 7.1.26 - t = 1.0/(1.0 + p*x) - y = 1. - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x) - erf = sign*y # erf(-x) = -erf(x) - return 1. - erf + t = 1.0 / (1.0 + p * x) + y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp( + -x * x + ) + erf = sign * y # erf(-x) = -erf(x) + return 1.0 - erf """ @@ -166,10 +177,10 @@ def erfc(x): def exp1exp_under1(x): f = np.zeros(x.shape).astype(np.complex128) for i in range(6): - xx = x**(i+1) - f += c_coeff_exp1exp[i]*xx + xx = x ** (i + 1) + f += c_coeff_exp1exp[i] * xx - return (f - np.log(x) - np.euler_gamma)*np.exp(x) + return (f - np.log(x) - np.euler_gamma) * np.exp(x) """ @@ -187,21 +198,21 @@ def exp1exp_over1(x): den = np.zeros(x.shape).astype(np.complex128) for i in range(11): - p = 10-i + p = 10 - i if p != 0: xx = x**p - num += cnum_exp1exp[i]*xx - den += cden_exp1exp[i]*xx + num += cnum_exp1exp[i] * xx + den += cden_exp1exp[i] * xx else: num += cnum_exp1exp[i] den += cden_exp1exp[i] - return (num/den)*(1./x) + return (num / den) * (1.0 / x) @njit(cache=True, nogil=True) def exp1exp(x): - mask = np.sign(x.real)*np.abs(x) > 1. + mask = np.sign(x.real) * np.abs(x) > 1.0 f = np.zeros(x.shape).astype(np.complex128) f[mask] = exp1exp_over1(x[mask]) @@ -213,13 +224,13 @@ def exp1exp(x): @njit(cache=True, nogil=True) def _calc_alpha(alpha, x0): a0, a1 = alpha - return (a0 + a1*np.tan(np.radians(0.5*x0))) + return a0 + a1 * np.tan(np.radians(0.5 * x0)) @njit(cache=True, nogil=True) def _calc_beta(beta, x0): b0, b1 = beta - return b0 + b1*np.tan(np.radians(0.5*x0)) + return b0 + b1 * np.tan(np.radians(0.5 * x0)) @njit(cache=True, nogil=True) @@ -234,20 +245,25 @@ def _mixing_factor_pv(fwhm_g, fwhm_l): @DETAILS: calculates the mixing factor eta to best approximate voight peak shapes """ - fwhm = fwhm_g**5 + 2.69269 * fwhm_g**4 * fwhm_l + \ - 2.42843 * fwhm_g**3 * fwhm_l**2 + \ - 4.47163 * fwhm_g**2 * fwhm_l**3 +\ - 0.07842 * fwhm_g * fwhm_l**4 +\ - fwhm_l**5 + fwhm = ( + fwhm_g**5 + + 2.69269 * fwhm_g**4 * fwhm_l + + 2.42843 * fwhm_g**3 * fwhm_l**2 + + 4.47163 * fwhm_g**2 * fwhm_l**3 + + 0.07842 * fwhm_g * fwhm_l**4 + + fwhm_l**5 + ) fwhm = fwhm**0.20 - eta = 1.36603 * (fwhm_l/fwhm) - \ - 0.47719 * (fwhm_l/fwhm)**2 + \ - 0.11116 * (fwhm_l/fwhm)**3 - if eta < 0.: - eta = 0. - elif eta > 1.: - eta = 1. + eta = ( + 1.36603 * (fwhm_l / fwhm) + - 0.47719 * (fwhm_l / fwhm) ** 2 + + 0.11116 * (fwhm_l / fwhm) ** 3 + ) + if eta < 0.0: + eta = 0.0 + elif eta > 1.0: + eta = 1.0 return eta, fwhm @@ -271,15 +287,15 @@ def _gaussian_pink_beam(p, x): del_tth = x - x0 sigsqr = fwhm_g**2 - f1 = alpha*sigsqr + 2.0*del_tth - f2 = beta*sigsqr - 2.0*del_tth - f3 = np.sqrt(2.0)*fwhm_g + f1 = alpha * sigsqr + 2.0 * del_tth + f2 = beta * sigsqr - 2.0 * del_tth + f3 = np.sqrt(2.0) * fwhm_g - u = 0.5*alpha*f1 - v = 0.5*beta*f2 + u = 0.5 * alpha * f1 + v = 0.5 * beta * f2 - y = (f1-del_tth)/f3 - z = (f2+del_tth)/f3 + y = (f1 - del_tth) / f3 + z = (f2 + del_tth) / f3 t1 = erfc(y) t2 = erfc(z) @@ -287,12 +303,12 @@ def _gaussian_pink_beam(p, x): g = np.zeros(x.shape) zmask = np.abs(del_tth) > 5.0 - g[~zmask] = \ - (0.5*(alpha*beta)/(alpha + beta)) * np.exp(u[~zmask])*t1[~zmask] \ - + np.exp(v[~zmask])*t2[~zmask] + g[~zmask] = (0.5 * (alpha * beta) / (alpha + beta)) * np.exp( + u[~zmask] + ) * t1[~zmask] + np.exp(v[~zmask]) * t2[~zmask] mask = np.isnan(g) - g[mask] = 0. + g[mask] = 0.0 g *= A return g @@ -316,28 +332,30 @@ def _lorentzian_pink_beam(p, x): del_tth = x - x0 - p = -alpha*del_tth + 1j*0.5*alpha*fwhm_l - q = -beta*del_tth + 1j*0.5*beta*fwhm_l + p = -alpha * del_tth + 1j * 0.5 * alpha * fwhm_l + q = -beta * del_tth + 1j * 0.5 * beta * fwhm_l y = np.zeros(x.shape) f1 = exp1exp(p) f2 = exp1exp(q) - y = -(alpha*beta)/(np.pi*(alpha + beta))*(f1 + f2).imag + y = -(alpha * beta) / (np.pi * (alpha + beta)) * (f1 + f2).imag mask = np.isnan(y) - y[mask] = 0. + y[mask] = 0.0 y *= A return y + # ============================================================================= # pseudo-Voigt # ============================================================================= -def fit_ring(tth_centers, lineout, tth_pred, spectrum_kwargs, - int_cutoff, fit_tth_tol): +def fit_ring( + tth_centers, lineout, tth_pred, spectrum_kwargs, int_cutoff, fit_tth_tol +): # tth_centers and tth_pred should be in degrees. # The returned tth_meas is in degrees as well. @@ -349,26 +367,26 @@ def fit_ring(tth_centers, lineout, tth_pred, spectrum_kwargs, npeaks = len(tth_pred) # spectrum fitting - sm = SpectrumModel( - spec_data, tth_pred, - **spectrum_kwargs - ) + sm = SpectrumModel(spec_data, tth_pred, **spectrum_kwargs) fit_results = sm.fit() if not fit_results.success: return - fit_params = np.vstack([ - (fit_results.best_values['pk%d_amp' % i], - fit_results.best_values['pk%d_cen' % i]) - for i in range(npeaks) - ]).T + fit_params = np.vstack( + [ + ( + fit_results.best_values['pk%d_amp' % i], + fit_results.best_values['pk%d_cen' % i], + ) + for i in range(npeaks) + ] + ).T pk_amp, tth_meas = fit_params # !!! this is where we can kick out bunk fits - center_err = 100*abs(tth_meas/tth_pred - 1.) + center_err = 100 * abs(tth_meas / tth_pred - 1.0) failed_fit_heuristic = np.logical_or( - pk_amp < int_cutoff, - center_err > fit_tth_tol + pk_amp < int_cutoff, center_err > fit_tth_tol ) if np.any(failed_fit_heuristic): return diff --git a/hexrd/gridutil.py b/hexrd/core/gridutil.py similarity index 83% rename from hexrd/gridutil.py rename to hexrd/core/gridutil.py index e1b0303a7..c2d0c9d9f 100644 --- a/hexrd/gridutil.py +++ b/hexrd/core/gridutil.py @@ -29,8 +29,7 @@ from numpy.linalg import det import numba -from hexrd.constants import sqrt_epsf - +from hexrd.core.constants import sqrt_epsf def cellIndices(edges, points_1d): @@ -95,13 +94,13 @@ def cellIndices(edges, points_1d): def _fill_connectivity(out, m, n, p): i_con = 0 for k in range(p): - extra = k*(n+1)*(m+1) + extra = k * (n + 1) * (m + 1) for j in range(m): for i in range(n): - out[i_con, 0] = i + j*(n + 1) + 1 + extra - out[i_con, 1] = i + j*(n + 1) + extra - out[i_con, 2] = i + j + n*(j+1) + 1 + extra - out[i_con, 3] = i + j + n*(j+1) + 2 + extra + out[i_con, 0] = i + j * (n + 1) + 1 + extra + out[i_con, 1] = i + j * (n + 1) + extra + out[i_con, 2] = i + j + n * (j + 1) + 1 + extra + out[i_con, 3] = i + j + n * (j + 1) + 2 + extra i_con += 1 @@ -113,14 +112,14 @@ def cellConnectivity(m, n, p=1, origin='ul'): choice will affect handedness (cw or ccw) """ - nele = p*m*n + nele = p * m * n con = np.empty((nele, 4), dtype=int) _fill_connectivity(con, m, n, p) if p > 1: - nele = m*n*(p-1) - tmp_con3 = con.reshape((p, m*n, 4)) + nele = m * n * (p - 1) + tmp_con3 = con.reshape((p, m * n, 4)) hex_con = [] for layer in range(p - 1): hex_con.append(np.hstack([tmp_con3[layer], tmp_con3[layer + 1]])) @@ -135,7 +134,7 @@ def cellCentroids(crd, con): nele, conn_count = con.shape dim = crd.shape[1] out = np.empty((nele, dim)) - inv_conn = 1.0/conn_count + inv_conn = 1.0 / conn_count for i in range(nele): for j in range(dim): acc = 0.0 @@ -155,13 +154,13 @@ def compute_areas(xy_eval_vtx, conn): for i in range(len(conn)): vtx0x, vtx0y = xy_eval_vtx[conn[i, 0]] vtx1x, vtx1y = xy_eval_vtx[conn[i, 1]] - v0x, v0y = vtx1x-vtx0x, vtx1y-vtx0y + v0x, v0y = vtx1x - vtx0x, vtx1y - vtx0y acc = 0 for j in range(2, 4): vtx_x, vtx_y = xy_eval_vtx[conn[i, j]] v1x = vtx_x - vtx0x v1y = vtx_y - vtx0y - acc += v0x*v1y - v1x*v0y + acc += v0x * v1y - v1x * v0y areas[i] = 0.5 * acc return areas @@ -179,24 +178,32 @@ def computeArea(polygon): area = 0 for [s1, s2] in triv: tvp = np.diff( - np.hstack([polygon[s1, :], - polygon[s2, :]]), axis=0).flatten() + np.hstack([polygon[s1, :], polygon[s2, :]]), axis=0 + ).flatten() area += 0.5 * np.cross(tvp[:2], tvp[2:]) return area -def make_tolerance_grid(bin_width, window_width, num_subdivisions, - adjust_window=False, one_sided=False): +def make_tolerance_grid( + bin_width, + window_width, + num_subdivisions, + adjust_window=False, + one_sided=False, +): bin_width = min(bin_width, window_width) if adjust_window: - window_width = np.ceil(window_width/bin_width)*bin_width + window_width = np.ceil(window_width / bin_width) * bin_width if one_sided: - ndiv = abs(int(window_width/bin_width)) - grid = (np.arange(0, 2*ndiv+1) - ndiv)*bin_width + ndiv = abs(int(window_width / bin_width)) + grid = (np.arange(0, 2 * ndiv + 1) - ndiv) * bin_width ndiv *= 2 else: - ndiv = int(num_subdivisions*np.ceil(window_width/float(bin_width))) - grid = np.arange(0, ndiv+1)*window_width/float(ndiv) - 0.5*window_width + ndiv = int(num_subdivisions * np.ceil(window_width / float(bin_width))) + grid = ( + np.arange(0, ndiv + 1) * window_width / float(ndiv) + - 0.5 * window_width + ) return ndiv, grid @@ -221,15 +228,15 @@ def computeIntersection(line1, line2): [x3, y3] = line2[0] [x4, y4] = line2[1] - denom = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4) + denom = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) if denom == 0: return [] - subterm1 = x1*y2 - y1*x2 - subterm2 = x3*y4 - y3*x4 + subterm1 = x1 * y2 - y1 * x2 + subterm2 = x3 * y4 - y3 * x4 - intersection[0] = (subterm1*(x3-x4) - subterm2*(x1-x2)) / denom - intersection[1] = (subterm1*(y3-y4) - subterm2*(y1-y2)) / denom + intersection[0] = (subterm1 * (x3 - x4) - subterm2 * (x1 - x2)) / denom + intersection[1] = (subterm1 * (y3 - y4) - subterm2 * (y1 - y2)) / denom return intersection @@ -237,8 +244,8 @@ def isinside(point, boundary, ccw=True): """ Assumes CCW boundary ordering """ - pointPositionVector = np.hstack([point - boundary[0, :], 0.]) - boundaryVector = np.hstack([boundary[1, :] - boundary[0, :], 0.]) + pointPositionVector = np.hstack([point - boundary[0, :], 0.0]) + boundaryVector = np.hstack([boundary[1, :] - boundary[0, :], 0.0]) crossVector = np.cross(pointPositionVector, boundaryVector) @@ -272,10 +279,7 @@ def sutherlandHodgman(subjectPolygon, clipPolygon): curr_clipVertex = clipPolygon[iClip, :] - clipBoundary = np.vstack( - [curr_clipVertex, - prev_clipVertex] - ) + clipBoundary = np.vstack([curr_clipVertex, prev_clipVertex]) inputList = np.array(outputList) if len(inputList) > 0: diff --git a/hexrd/imageseries/__init__.py b/hexrd/core/imageseries/__init__.py similarity index 99% rename from hexrd/imageseries/__init__.py rename to hexrd/core/imageseries/__init__.py index 68dede415..143945e75 100644 --- a/hexrd/imageseries/__init__.py +++ b/hexrd/core/imageseries/__init__.py @@ -4,6 +4,7 @@ and a function for loading. Adapters for particular data formats are managed in the "load" subpackage. """ + from .baseclass import ImageSeries from . import imageseriesabc from . import load @@ -12,6 +13,7 @@ from . import process from . import omega + def open(filename, format=None, **kwargs): # find the appropriate adapter based on format specified reg = load.Registry.adapter_registry @@ -21,4 +23,5 @@ def open(filename, format=None, **kwargs): raise RuntimeError("zero length imageseries") return ims + write = save.write diff --git a/hexrd/imageseries/baseclass.py b/hexrd/core/imageseries/baseclass.py similarity index 96% rename from hexrd/imageseries/baseclass.py rename to hexrd/core/imageseries/baseclass.py index 8d0181d5e..775dc655f 100644 --- a/hexrd/imageseries/baseclass.py +++ b/hexrd/core/imageseries/baseclass.py @@ -1,5 +1,5 @@ -"""Base class for imageseries -""" +"""Base class for imageseries""" + import numpy as np from .imageseriesabc import ImageSeriesABC, RegionType diff --git a/hexrd/imageseries/imageseriesabc.py b/hexrd/core/imageseries/imageseriesabc.py similarity index 99% rename from hexrd/imageseries/imageseriesabc.py rename to hexrd/core/imageseries/imageseriesabc.py index 65ef7771b..7864b3476 100644 --- a/hexrd/imageseries/imageseriesabc.py +++ b/hexrd/core/imageseries/imageseriesabc.py @@ -1,4 +1,5 @@ """Abstract Base Class""" + import collections.abc # Type for extracting regions diff --git a/hexrd/imageseries/imageseriesiter.py b/hexrd/core/imageseries/imageseriesiter.py similarity index 99% rename from hexrd/imageseries/imageseriesiter.py rename to hexrd/core/imageseries/imageseriesiter.py index 9d4855981..868daa90c 100644 --- a/hexrd/imageseries/imageseriesiter.py +++ b/hexrd/core/imageseries/imageseriesiter.py @@ -2,6 +2,7 @@ For use by adapter classes. """ + import collections.abc diff --git a/hexrd/imageseries/load/__init__.py b/hexrd/core/imageseries/load/__init__.py similarity index 71% rename from hexrd/imageseries/load/__init__.py rename to hexrd/core/imageseries/load/__init__.py index 3f891781e..d5d3d7721 100644 --- a/hexrd/imageseries/load/__init__.py +++ b/hexrd/core/imageseries/load/__init__.py @@ -7,27 +7,37 @@ # Metaclass for adapter registry + class _RegisterAdapterClass(abc.ABCMeta): def __init__(cls, name, bases, attrs): abc.ABCMeta.__init__(cls, name, bases, attrs) Registry.register(cls) + class ImageSeriesAdapter(ImageSeriesABC, metaclass=_RegisterAdapterClass): format = None def get_region(self, frame_idx: int, region: RegionType) -> np.ndarray: r = region - return self[frame_idx][r[0][0]:r[0][1], r[1][0]:r[1][1]] + return self[frame_idx][r[0][0] : r[0][1], r[1][0] : r[1][1]] def __getitem__(self, _): pass + # import all adapter modules from . import ( - array, framecache, function, hdf5, imagefiles, rawimage, metadata, trivial + array, + framecache, + function, + hdf5, + imagefiles, + rawimage, + metadata, + trivial, ) try: @@ -40,9 +50,9 @@ def __getitem__(self, _): from . import eiger_stream_v1 -#for loader, name, ispkg in pkgutil.iter_modules(__path__): +# for loader, name, ispkg in pkgutil.iter_modules(__path__): # if name is not 'registry': # __import__(name, globals=globals()) - # - # couldn't get the following line to work due to relative import issue: - # loader.find_module(name).load_module(name) +# +# couldn't get the following line to work due to relative import issue: +# loader.find_module(name).load_module(name) diff --git a/hexrd/imageseries/load/array.py b/hexrd/core/imageseries/load/array.py similarity index 88% rename from hexrd/imageseries/load/array.py rename to hexrd/core/imageseries/load/array.py index 2f199057d..213f365d0 100644 --- a/hexrd/imageseries/load/array.py +++ b/hexrd/core/imageseries/load/array.py @@ -1,5 +1,5 @@ -"""Adapter class for numpy array (3D) -""" +"""Adapter class for numpy array (3D)""" + from . import ImageSeriesAdapter from ..imageseriesiter import ImageSeriesIterator @@ -18,6 +18,7 @@ class ArrayImageSeriesAdapter(ImageSeriesAdapter): metadata: dict (optional) the metadata dictionary """ + format = 'array' def __init__(self, fname, **kwargs): @@ -28,9 +29,9 @@ def __init__(self, fname, **kwargs): self._data = data_arr else: raise RuntimeError( - 'input array must be 2-d or 3-d; you provided ndim=%d' - % data_arr.ndim - ) + 'input array must be 2-d or 3-d; you provided ndim=%d' + % data_arr.ndim + ) self._meta = kwargs.pop('meta', dict()) self._shape = self._data.shape diff --git a/hexrd/imageseries/load/eiger_stream_v1.py b/hexrd/core/imageseries/load/eiger_stream_v1.py similarity index 94% rename from hexrd/imageseries/load/eiger_stream_v1.py rename to hexrd/core/imageseries/load/eiger_stream_v1.py index 14ad28c5c..8b6c0e883 100644 --- a/hexrd/imageseries/load/eiger_stream_v1.py +++ b/hexrd/core/imageseries/load/eiger_stream_v1.py @@ -1,12 +1,12 @@ -"""HDF5 adapter class -""" +"""HDF5 adapter class""" + import warnings from dectris.compression import decompress import h5py import numpy as np -from hexrd.utils.hdf5 import unwrap_h5_to_dict +from hexrd.core.utils.hdf5 import unwrap_h5_to_dict from . import ImageSeriesAdapter from ..imageseriesiter import ImageSeriesIterator @@ -133,6 +133,7 @@ def _decompress_frame(d: dict) -> np.ndarray: if compression_type is None: return np.frombuffer(data, dtype=dtype).reshape(shape) - decompressed_bytes = decompress(data, compression_type, - elem_size=elem_size) + decompressed_bytes = decompress( + data, compression_type, elem_size=elem_size + ) return np.frombuffer(decompressed_bytes, dtype=dtype).reshape(shape) diff --git a/hexrd/imageseries/load/framecache.py b/hexrd/core/imageseries/load/framecache.py similarity index 93% rename from hexrd/imageseries/load/framecache.py rename to hexrd/core/imageseries/load/framecache.py index e4498467f..ff37ac6c9 100644 --- a/hexrd/imageseries/load/framecache.py +++ b/hexrd/core/imageseries/load/framecache.py @@ -12,8 +12,8 @@ from . import ImageSeriesAdapter, RegionType from ..imageseriesiter import ImageSeriesIterator from .metadata import yamlmeta -from hexrd.utils.hdf5 import unwrap_h5_to_dict -from hexrd.utils.compatibility import h5py_read_string +from hexrd.core.utils.hdf5 import unwrap_h5_to_dict +from hexrd.core.utils.compatibility import h5py_read_string import multiprocessing from concurrent.futures import ThreadPoolExecutor @@ -50,9 +50,11 @@ def __init__(self, fname, style='npz', **kwargs): self._from_yml = False self._load_cache() else: - raise TypeError(f"Unknown style format for loading data: {style}." - "Known style formats: 'npz', 'fch5' 'yml', ", - "'yaml', 'test'") + raise TypeError( + f"Unknown style format for loading data: {style}." + "Known style formats: 'npz', 'fch5' 'yml', ", + "'yaml', 'test'", + ) def _load_yml(self): with open(self._fname, "r") as f: @@ -73,13 +75,16 @@ def _load_cache(self): def _load_cache_fch5(self): with h5py.File(self._fname, "r") as file: if 'HEXRD_FRAMECACHE_VERSION' not in file.attrs.keys(): - raise NotImplementedError("Unsupported file. " - "HEXRD_FRAMECACHE_VERSION " - "is missing!") + raise NotImplementedError( + "Unsupported file. " + "HEXRD_FRAMECACHE_VERSION " + "is missing!" + ) version = file.attrs.get('HEXRD_FRAMECACHE_VERSION', 0) if version != 1: - raise NotImplementedError("Framecache version is not " - f"supported: {version}") + raise NotImplementedError( + "Framecache version is not " f"supported: {version}" + ) self._shape = file["shape"][()] self._nframes = file["nframes"][()] @@ -165,12 +170,11 @@ def get_region(self, frame_idx: int, region: RegionType) -> np.ndarray: self._load_framelist_if_needed() csr_frame = self._framelist[frame_idx] r = region - return csr_frame[r[0][0]:r[0][1], r[1][0]:r[1][1]].toarray() + return csr_frame[r[0][0] : r[0][1], r[1][0] : r[1][1]].toarray() @property def metadata(self): - """(read-only) Image sequence metadata - """ + """(read-only) Image sequence metadata""" return self._meta def load_metadata(self, indict): diff --git a/hexrd/imageseries/load/function.py b/hexrd/core/imageseries/load/function.py similarity index 99% rename from hexrd/imageseries/load/function.py rename to hexrd/core/imageseries/load/function.py index d06f01b63..499c11190 100644 --- a/hexrd/imageseries/load/function.py +++ b/hexrd/core/imageseries/load/function.py @@ -1,6 +1,7 @@ """Adapter class for a custom function that takes an int as an argument and returns a 2D numpy array. """ + from . import ImageSeriesAdapter from ..imageseriesiter import ImageSeriesIterator @@ -26,6 +27,7 @@ class FunctionImageSeriesAdapter(ImageSeriesAdapter): metadata: dict (optional) the metadata dictionary """ + format = 'function' def __init__(self, fname, **kwargs): diff --git a/hexrd/imageseries/load/hdf5.py b/hexrd/core/imageseries/load/hdf5.py similarity index 95% rename from hexrd/imageseries/load/hdf5.py rename to hexrd/core/imageseries/load/hdf5.py index f92b19841..7a6cbd193 100644 --- a/hexrd/imageseries/load/hdf5.py +++ b/hexrd/core/imageseries/load/hdf5.py @@ -1,11 +1,11 @@ -"""HDF5 adapter class -""" +"""HDF5 adapter class""" + import h5py import warnings import numpy as np -from . import ImageSeriesAdapter,RegionType +from . import ImageSeriesAdapter, RegionType from ..imageseriesiter import ImageSeriesIterator @@ -60,7 +60,7 @@ def __del__(self): # an issue arises at some point try: self.close() - except(Exception): + except Exception: warnings.warn("HDF5ImageSeries could not close h5 file") def __getitem__(self, key): @@ -81,7 +81,9 @@ def __getitem__(self, key): def get_region(self, frame_idx: int, region: RegionType) -> np.ndarray: r = region - return self.__image_dataset[frame_idx][r[0][0]:r[0][1], r[1][0]:r[1][1]] + return self.__image_dataset[frame_idx][ + r[0][0] : r[0][1], r[1][0] : r[1][1] + ] def __iter__(self): return ImageSeriesIterator(self) diff --git a/hexrd/imageseries/load/imagefiles.py b/hexrd/core/imageseries/load/imagefiles.py similarity index 87% rename from hexrd/imageseries/load/imagefiles.py rename to hexrd/core/imageseries/load/imagefiles.py index 8a20c37d6..50c7303e3 100644 --- a/hexrd/imageseries/load/imagefiles.py +++ b/hexrd/core/imageseries/load/imagefiles.py @@ -1,9 +1,8 @@ -"""Adapter class for list of image files -""" - +"""Adapter class for list of image files""" # import sys import os + # import logging import glob @@ -63,7 +62,7 @@ def __getitem__(self, key): # !!! handled in self._process_files try: dinfo = np.iinfo(self._dtype) - except(ValueError): + except ValueError: dinfo = np.finfo(self._dtype) if np.max(data) > dinfo.max: raise RuntimeError("specified dtype will truncate image") @@ -82,8 +81,14 @@ def __str__(self): dtype: %s shape: %s single frames: %s - """ % (self.fabioclass, len(self._files), len(self), - self.dtype, self.shape, self.singleframes) + """ % ( + self.fabioclass, + len(self._files), + len(self), + self.dtype, + self.shape, + self.singleframes, + ) return s @property @@ -119,12 +124,15 @@ def _load_yml(self): self._files.sort() self.optsd = d['options'] if 'options' else None self._empty = self.optsd[EMPTY] if EMPTY in self.optsd else 0 - self._maxframes_tot = self.optsd[MAXTOTF] \ - if MAXTOTF in self.optsd else 0 - self._maxframes_file = self.optsd[MAXFILF] \ - if MAXFILF in self.optsd else 0 - self._dtype = np.dtype(self.optsd[DTYPE]) \ - if DTYPE in self.optsd else None + self._maxframes_tot = ( + self.optsd[MAXTOTF] if MAXTOTF in self.optsd else 0 + ) + self._maxframes_file = ( + self.optsd[MAXFILF] if MAXFILF in self.optsd else 0 + ) + self._dtype = ( + np.dtype(self.optsd[DTYPE]) if DTYPE in self.optsd else None + ) self._meta = yamlmeta(d['meta']) # , path=imgsd) @@ -139,17 +147,19 @@ def _process_files(self): for imgf in self._files: info = FileInfo(imgf, **kw) infolist.append(info) - shp = self._checkvalue(shp, info.shape, - "inconsistent image shapes") + shp = self._checkvalue( + shp, info.shape, "inconsistent image shapes" + ) if self._dtype is not None: dtp = self._dtype else: dtp = self._checkvalue( - dtp, info.dtype, - "inconsistent image dtypes") - fcl = self._checkvalue(fcl, info.fabioclass, - "inconsistent image types") + dtp, info.dtype, "inconsistent image dtypes" + ) + fcl = self._checkvalue( + fcl, info.fabioclass, "inconsistent image types" + ) nf += info.nframes if info.nframes > 1: self._singleframes = False @@ -266,8 +276,13 @@ def __str__(self): fabio class: %s frames: %s dtype: %s - shape: %s\n""" % (self.filename, self.fabioclass, - self.nframes, self.dtype, self.shape) + shape: %s\n""" % ( + self.filename, + self.fabioclass, + self.nframes, + self.dtype, + self.shape, + ) return s @@ -313,4 +328,4 @@ def _process_gel_data(array): """Convert a gel data array to regular image data""" # An inversion seems to be necessary for our examples array = np.invert(array) - return array.astype(np.float64)**2 * GEL_SCALE_FACTOR + return array.astype(np.float64) ** 2 * GEL_SCALE_FACTOR diff --git a/hexrd/imageseries/load/metadata.py b/hexrd/core/imageseries/load/metadata.py similarity index 55% rename from hexrd/imageseries/load/metadata.py rename to hexrd/core/imageseries/load/metadata.py index 882bebd95..edb0ca211 100644 --- a/hexrd/imageseries/load/metadata.py +++ b/hexrd/core/imageseries/load/metadata.py @@ -1,23 +1,25 @@ """metadata tools for imageseries""" + import os import yaml import numpy as np + def yamlmeta(meta, path=None): - """ Image sequence metadata + """Image sequence metadata - *path* is a full path or directory used to find the relative location - of files loaded via the trigger mechanism + *path* is a full path or directory used to find the relative location + of files loaded via the trigger mechanism -The usual yaml dictionary is returned with the exception that -if the first word of a multiword string is an exclamation mark ("!"), -it will trigger further processing determined by the rest of the string. -Currently only one trigger is used: + The usual yaml dictionary is returned with the exception that + if the first word of a multiword string is an exclamation mark ("!"), + it will trigger further processing determined by the rest of the string. + Currently only one trigger is used: -! load-numpy-object - the returned value will the numpy object read from the file -""" + ! load-numpy-object + the returned value will the numpy object read from the file + """ if path is not None: path = os.path.dirname(path) else: @@ -31,7 +33,7 @@ def yamlmeta(meta, path=None): words = v.split() istrigger = (words[0] == "!") and (len(words) > 1) - if v == '++np.array': # old way used in frame-cache (obsolescent) + if v == '++np.array': # old way used in frame-cache (obsolescent) newk = k + '-array' metad[k] = np.array(meta.pop(newk)) metad.pop(newk, None) diff --git a/hexrd/imageseries/load/rawimage.py b/hexrd/core/imageseries/load/rawimage.py similarity index 96% rename from hexrd/imageseries/load/rawimage.py rename to hexrd/core/imageseries/load/rawimage.py index aa4cfe668..1dd06c1cc 100644 --- a/hexrd/imageseries/load/rawimage.py +++ b/hexrd/core/imageseries/load/rawimage.py @@ -1,4 +1,5 @@ -""" Adapter class for raw image reader""" +"""Adapter class for raw image reader""" + import os import threading @@ -86,13 +87,13 @@ def typechars(numtype, bytes_=4, signed=False, little=True): 1: "b", 2: "h", 4: "i", - 8: "l" + 8: "l", } typechar = { "f": "f", "d": "d", - "b": "?" + "b": "?", } if numtype == "i": @@ -102,7 +103,7 @@ def typechars(numtype, bytes_=4, signed=False, little=True): else: char = typechar[numtype] - return "<"+char if little else ">"+char + return "<" + char if little else ">" + char def __len__(self): return self._len diff --git a/hexrd/imageseries/load/registry.py b/hexrd/core/imageseries/load/registry.py similarity index 91% rename from hexrd/imageseries/load/registry.py rename to hexrd/core/imageseries/load/registry.py index f87315f45..070c426cd 100644 --- a/hexrd/imageseries/load/registry.py +++ b/hexrd/core/imageseries/load/registry.py @@ -1,7 +1,9 @@ -"""Adapter registry -""" +"""Adapter registry""" + + class Registry(object): """Registry for imageseries adapters""" + adapter_registry = dict() @classmethod diff --git a/hexrd/imageseries/load/trivial.py b/hexrd/core/imageseries/load/trivial.py similarity index 99% rename from hexrd/imageseries/load/trivial.py rename to hexrd/core/imageseries/load/trivial.py index c0d189c83..e1b3162fe 100644 --- a/hexrd/imageseries/load/trivial.py +++ b/hexrd/core/imageseries/load/trivial.py @@ -1,6 +1,8 @@ """Trivial adapter: just for testing""" + from . import ImageSeriesAdapter + class TrivialAdapter(ImageSeriesAdapter): def __init__(self, fname): diff --git a/hexrd/imageseries/omega.py b/hexrd/core/imageseries/omega.py similarity index 90% rename from hexrd/imageseries/omega.py rename to hexrd/core/imageseries/omega.py index f0a61ef2d..0db39d1c6 100644 --- a/hexrd/imageseries/omega.py +++ b/hexrd/core/imageseries/omega.py @@ -2,14 +2,17 @@ * OmegaWedges class specifies omega metadata in wedges """ + import numpy as np from .baseclass import ImageSeries OMEGA_KEY = 'omega' + class OmegaImageSeries(ImageSeries): """ImageSeries with omega metadata""" + DFLT_TOL = 1.0e-6 TAU = 360 @@ -40,8 +43,8 @@ def _make_wedges(self, tol=DFLT_TOL): if delta <= 0: raise OmegaSeriesError('omega array must be increasing') # check whether delta changes or ranges not contiguous - d = om[f,1] - om[f,0] - if (np.abs(d - delta) > tol) or (np.abs(om[f,0] - omlast) > tol): + d = om[f, 1] - om[f, 0] + if (np.abs(d - delta) > tol) or (np.abs(om[f, 0] - omlast) > tol): starts.append(f) delta = d omlast = om[f, 1] @@ -55,15 +58,15 @@ def _make_wedges(self, tol=DFLT_TOL): for s in range(nw): ostart = om[starts[s], 0] ostop = om[starts[s + 1] - 1, 1] - steps = starts[s+1] - starts[s] + steps = starts[s + 1] - starts[s] self._omegawedges.addwedge(ostart, ostop, steps) # - delta = (ostop - ostart)/steps + delta = (ostop - ostart) / steps self._wedge_om[s, :] = (ostart, ostop, delta) self._wedge_f[s, 0] = nf0 self._wedge_f[s, 1] = steps nf0 += steps - assert(nf0 == nf) + assert nf0 == nf @property def omega(self): @@ -83,7 +86,7 @@ def nwedges(self): def wedge(self, i): """return i'th wedge as a dictionary""" d = self.omegawedges.wedges[i] - delta = (d['ostop'] - d['ostart'])/d['nsteps'] + delta = (d['ostop'] - d['ostart']) / d['nsteps'] d.update(delta=delta) return d @@ -97,7 +100,9 @@ def omega_to_frame(self, om): omcheck = omin + np.mod(om - omin, self.TAU) if omcheck < omax: odel = self._wedge_om[i, 2] - f = self._wedge_f[i,0] + int(np.floor((omcheck - omin)/odel)) + f = self._wedge_f[i, 0] + int( + np.floor((omcheck - omin) / odel) + ) w = i break @@ -115,7 +120,7 @@ def omegarange_to_frames(self, omin, omax): # if same wedge, require frames be increasing if (w0 == w1) and (f1 > f0): - return list(range(f0, f1+1)) + return list(range(f0, f1 + 1)) # case: adjacent wedges with 2pi jump in omega w0max = self._wedge_om[w0, 1] @@ -137,9 +142,11 @@ class OmegaWedges(object): nframes: int number of frames in imageseries """ + def __init__(self, nframes): self.nframes = nframes self._wedges = [] + # # ============================== API # @@ -147,8 +154,10 @@ def __init__(self, nframes): def omegas(self): """n x 2 array of omega values, one per frame""" if self.nframes != self.wframes: - msg = "number of frames (%s) does not match "\ - "number of wedge frames (%s)" %(self.nframes, self.wframes) + msg = ( + "number of frames (%s) does not match " + "number of wedge frames (%s)" % (self.nframes, self.wframes) + ) raise OmegaSeriesError(msg) oa = np.zeros((self.nframes, 2)) @@ -217,5 +226,6 @@ def save_omegas(self, fname): class OmegaSeriesError(Exception): def __init__(self, value): self.value = value + def __str__(self): return repr(self.value) diff --git a/hexrd/imageseries/process.py b/hexrd/core/imageseries/process.py similarity index 97% rename from hexrd/imageseries/process.py rename to hexrd/core/imageseries/process.py index 4f9f807e2..d4425711d 100644 --- a/hexrd/imageseries/process.py +++ b/hexrd/core/imageseries/process.py @@ -1,4 +1,5 @@ """Class for processing individual frames""" + import copy import numpy as np @@ -20,6 +21,7 @@ class ProcessedImageSeries(ImageSeries): frame_list: list of ints or None, default = None specify subset of frames by list; if None, then all frames are used """ + FLIP = 'flip' DARK = 'dark' RECT = 'rectangle' @@ -32,7 +34,7 @@ def __init__(self, imser, oplist, **kwargs): self._meta = copy.deepcopy(imser.metadata) self._oplist = oplist self._frames = kwargs.pop('frame_list', None) - self._hasframelist = (self._frames is not None) + self._hasframelist = self._frames is not None if self._hasframelist: self._update_omega() self._opdict = {} @@ -71,7 +73,7 @@ def __iter__(self): return (self[i] for i in range(len(self))) def _process_frame(self, key): - # note: key refers to original imageseries + # note: key refers to original imageseries oplist = self.oplist # when rectangle is the first operation we can try to call the @@ -126,7 +128,7 @@ def _rectangle_optimized(self, img_key, r): def _rectangle(self, img, r): # restrict to rectangle - return img[r[0][0]:r[0][1], r[1][0]:r[1][1]] + return img[r[0][0] : r[0][1], r[1][0] : r[1][1]] def _flip(self, img, flip): if flip in ('y', 'v'): # about y-axis (vertical) @@ -160,6 +162,7 @@ def _update_omega(self): if "omega" in self.metadata: omega = self.metadata["omega"] self.metadata["omega"] = omega[self._frames] + # # ==================== API # diff --git a/hexrd/imageseries/save.py b/hexrd/core/imageseries/save.py similarity index 99% rename from hexrd/imageseries/save.py rename to hexrd/core/imageseries/save.py index 09e70dfc6..c35947e77 100644 --- a/hexrd/imageseries/save.py +++ b/hexrd/core/imageseries/save.py @@ -13,8 +13,8 @@ import hdf5plugin import yaml -from hexrd.matrixutil import extract_ijv -from hexrd.utils.hdf5 import unwrap_dict_to_h5 +from hexrd.core.matrixutil import extract_ijv +from hexrd.core.utils.hdf5 import unwrap_dict_to_h5 MAX_NZ_FRACTION = 0.1 # 10% sparsity trigger for frame-cache write diff --git a/hexrd/imageseries/stats.py b/hexrd/core/imageseries/stats.py similarity index 93% rename from hexrd/imageseries/stats.py rename to hexrd/core/imageseries/stats.py index 6f5f7eef3..d39ddab5e 100644 --- a/hexrd/imageseries/stats.py +++ b/hexrd/core/imageseries/stats.py @@ -23,13 +23,14 @@ * Perhaps we should rename min -> minimum and max -> maximum to avoid conflicting with the python built-ins """ + import numpy as np from psutil import virtual_memory # Default Buffer Size: half of available memory vmem = virtual_memory() -STATS_BUFFER = int(0.5*vmem.available) +STATS_BUFFER = int(0.5 * vmem.available) del vmem @@ -105,7 +106,7 @@ def average_iter(ims, nchunk, nframes=0): """average over frames Note: average returns a float even if images are uint -""" + """ nf = _nframes(ims, nframes) stops = _chunk_stops(nf, nchunk) s0, stop = 0, stops[0] @@ -135,7 +136,7 @@ def percentile(ims, pctl, nframes=0): return np.percentile(_toarray(ims, nf), pctl, axis=0).astype(np.float32) -def percentile_iter(ims, pctl, nchunks, nframes=0, use_buffer=True): +def percentile_iter(ims, pctl, nchunks, nframes=0, use_buffer=True): """iterator for percentile function""" nf = _nframes(ims, nframes) nr, nc = ims.shape @@ -146,8 +147,7 @@ def percentile_iter(ims, pctl, nchunks, nframes=0, use_buffer=True): for s in stops: r1 = s + 1 img[r0:r1] = np.percentile( - _toarray(ims, nf, rows=(r0, r1), buffer=buffer), - pctl, axis=0 + _toarray(ims, nf, rows=(r0, r1), buffer=buffer), pctl, axis=0 ) r0 = r1 yield img.astype(np.float32) @@ -176,12 +176,12 @@ def _chunk_stops(n, nchunks): n -- number of items to be chunked (e.g. frames/rows) nchunks -- number of chunks -""" + """ if nchunks > n: raise ValueError("number of chunks cannot exceed number of items") - csize = n//nchunks + csize = n // nchunks rem = n % nchunks - pieces = csize*np.ones(nchunks, dtype=int) + pieces = csize * np.ones(nchunks, dtype=int) pieces[:rem] += 1 pieces[0] += -1 @@ -231,8 +231,8 @@ def _toarray(ims, nframes, rows=None, buffer=None): def _alloc_buffer(ims, nf): """Allocate buffer to save as many full frames as possible""" shp, dt = ims.shape, ims.dtype - framesize = shp[0]*shp[1]*dt.itemsize - nf = np.minimum(nf, np.floor(STATS_BUFFER/framesize).astype(int)) + framesize = shp[0] * shp[1] * dt.itemsize + nf = np.minimum(nf, np.floor(STATS_BUFFER / framesize).astype(int)) bshp = (nf,) + shp return np.empty(bshp, dt) diff --git a/hexrd/imageutil.py b/hexrd/core/imageutil.py similarity index 81% rename from hexrd/imageutil.py rename to hexrd/core/imageutil.py index 24412f354..e4592fce5 100644 --- a/hexrd/imageutil.py +++ b/hexrd/core/imageutil.py @@ -8,14 +8,15 @@ from skimage.feature import blob_dog, blob_log from skimage.exposure import rescale_intensity -from hexrd import convolution -from hexrd.constants import fwhm_to_sigma +from hexrd.core import convolution +from hexrd.core.constants import fwhm_to_sigma # ============================================================================= # BACKGROUND REMOVAL # ============================================================================= + def _scale_image_snip(y, offset, invert=False): """ Log-Log scale image for snip @@ -40,14 +41,13 @@ def _scale_image_snip(y, offset, invert=False): """ if invert: - return (np.exp(np.exp(y) - 1.) - 1.)**2 + offset + return (np.exp(np.exp(y) - 1.0) - 1.0) ** 2 + offset else: - return np.log(np.log(np.sqrt(y - offset) + 1.) + 1.) + return np.log(np.log(np.sqrt(y - offset) + 1.0) + 1.0) def fast_snip1d(y, w=4, numiter=2): - """ - """ + """ """ bkg = np.zeros_like(y) min_val = np.nanmin(y) zfull = _scale_image_snip(y, min_val, invert=False) @@ -55,7 +55,7 @@ def fast_snip1d(y, w=4, numiter=2): b = z for i in range(numiter): for p in range(w, 0, -1): - kernel = np.zeros(p*2 + 1) + kernel = np.zeros(p * 2 + 1) kernel[0] = 0.5 kernel[-1] = 0.5 b = np.minimum(b, signal.convolve(z, kernel, mode='same')) @@ -111,14 +111,18 @@ def _run_snip1d_row(task, numiter, w, min_val): b = z for i in range(numiter): for p in range(w, 0, -1): - kernel = np.zeros(p*2 + 1) - kernel[0] = kernel[-1] = 1./2. + kernel = np.zeros(p * 2 + 1) + kernel[0] = kernel[-1] = 1.0 / 2.0 b = np.minimum( b, convolution.convolve( - z, kernel, boundary='extend', mask=mask, - nan_treatment='interpolate', preserve_nan=True - ) + z, + kernel, + boundary='extend', + mask=mask, + nan_treatment='interpolate', + preserve_nan=True, + ), ) z = b return k, _scale_image_snip(b, min_val, invert=True) @@ -134,19 +138,21 @@ def snip1d_quad(y, w=4, numiter=2): N = p * 2 + 1 # linear kernel kern1 = np.zeros(N) - kern1[0] = kern1[-1] = 1./2. + kern1[0] = kern1[-1] = 1.0 / 2.0 # quadratic kernel kern2 = np.zeros(N) - kern2[0] = kern2[-1] = -1./6. - kern2[int(p/2.)] = kern2[int(3.*p/2.)] = 4./6. + kern2[0] = kern2[-1] = -1.0 / 6.0 + kern2[int(p / 2.0)] = kern2[int(3.0 * p / 2.0)] = 4.0 / 6.0 kernels.append([kern1, kern2]) z = b = _scale_image_snip(y, min_val, invert=False) for i in range(numiter): - for (kern1, kern2) in kernels: - c = np.maximum(ndimage.convolve1d(z, kern1, mode='nearest'), - ndimage.convolve1d(z, kern2, mode='nearest')) + for kern1, kern2 in kernels: + c = np.maximum( + ndimage.convolve1d(z, kern1, mode='nearest'), + ndimage.convolve1d(z, kern2, mode='nearest'), + ) b = np.minimum(b, c) z = b @@ -194,16 +200,16 @@ def snip2d(y, w=4, numiter=2, order=1): # linear filter kernel kern1 = np.zeros((N, N)) # initialize a kernel with all zeros xx, yy = np.indices(kern1.shape) # x-y indices of kernel points - ij = np.round( - np.hypot(xx - p1, yy - p1) - ) == p1 # select circular shape + ij = ( + np.round(np.hypot(xx - p1, yy - p1)) == p1 + ) # select circular shape kern1[ij] = 1 / ij.sum() # normalize so sum of kernel elements is 1 kernels.append([kern1]) if order >= 2: # add quadratic filter kernel p2 = p1 // 2 kern2 = np.zeros_like(kern1) - radii, norms = (p2, 2 * p2), (4/3, -1/3) + radii, norms = (p2, 2 * p2), (4 / 3, -1 / 3) for radius, norm in zip(radii, norms): ij = np.round(np.hypot(xx - p1, yy - p1)) == radius kern2[ij] = norm / ij.sum() @@ -214,8 +220,10 @@ def snip2d(y, w=4, numiter=2, order=1): for i in range(numiter): for kk in kernels: if order > 1: - c = maximum(ndimage.convolve(z, kk[0], mode='nearest'), - ndimage.convolve(z, kk[1], mode='nearest')) + c = maximum( + ndimage.convolve(z, kk[0], mode='nearest'), + ndimage.convolve(z, kk[1], mode='nearest'), + ) else: c = ndimage.convolve(z, kk[0], mode='nearest') b = minimum(b, c) @@ -238,21 +246,16 @@ def find_peaks_2d(img, method, method_kwargs): filter_fwhm = method_kwargs['filter_radius'] if filter_fwhm: filt_stdev = fwhm_to_sigma * filter_fwhm - img = -ndimage.filters.gaussian_laplace( - img, filt_stdev - ) + img = -ndimage.filters.gaussian_laplace(img, filt_stdev) labels_t, numSpots_t = ndimage.label( - img > method_kwargs['threshold'], - structureNDI_label - ) + img > method_kwargs['threshold'], structureNDI_label + ) coms_t = np.atleast_2d( ndimage.center_of_mass( - img, - labels=labels_t, - index=np.arange(1, np.amax(labels_t) + 1) - ) + img, labels=labels_t, index=np.arange(1, np.amax(labels_t) + 1) ) + ) elif method in ['blob_log', 'blob_dog']: # must scale map # TODO: we should so a parameter study here @@ -265,13 +268,9 @@ def find_peaks_2d(img, method, method_kwargs): # for 'blob_dog': min_sigma=0.5, max_sigma=5, # sigma_ratio=1.6, threshold=0.01, overlap=0.1 if method == 'blob_log': - blobs = np.atleast_2d( - blob_log(scl_map, **method_kwargs) - ) + blobs = np.atleast_2d(blob_log(scl_map, **method_kwargs)) else: # blob_dog - blobs = np.atleast_2d( - blob_dog(scl_map, **method_kwargs) - ) + blobs = np.atleast_2d(blob_dog(scl_map, **method_kwargs)) numSpots_t = len(blobs) coms_t = blobs[:, :2] diff --git a/hexrd/instrument/__init__.py b/hexrd/core/instrument/__init__.py similarity index 100% rename from hexrd/instrument/__init__.py rename to hexrd/core/instrument/__init__.py diff --git a/hexrd/instrument/constants.py b/hexrd/core/instrument/constants.py similarity index 69% rename from hexrd/instrument/constants.py rename to hexrd/core/instrument/constants.py index c8cc84907..4809a2ef7 100644 --- a/hexrd/instrument/constants.py +++ b/hexrd/core/instrument/constants.py @@ -1,31 +1,34 @@ -from hexrd.constants import DENSITY, DENSITY_COMPOUNDS +from hexrd.core.constants import DENSITY, DENSITY_COMPOUNDS # default filter and coating materials class FILTER_DEFAULTS: TARDIS = { 'material': 'Ge', - 'density' : DENSITY['Ge'], - 'thickness' : 10 # microns + 'density': DENSITY['Ge'], + 'thickness': 10, # microns } PXRDIP = { 'material': 'Cu', - 'density' : DENSITY['Cu'], - 'thickness' : 10 # microns + 'density': DENSITY['Cu'], + 'thickness': 10, # microns } + COATING_DEFAULT = { 'material': 'C10H8O4', 'density': DENSITY_COMPOUNDS['C10H8O4'], - 'thickness': 9 # microns + 'thickness': 9, # microns } PHOSPHOR_DEFAULT = { 'material': 'Ba2263F2263Br1923I339C741H1730N247O494', - 'density': DENSITY_COMPOUNDS['Ba2263F2263Br1923I339C741H1730N247O494'], # g/cc - 'thickness': 115, # microns - 'readout_length': 222, #microns - 'pre_U0': 0.695 + 'density': DENSITY_COMPOUNDS[ + 'Ba2263F2263Br1923I339C741H1730N247O494' + ], # g/cc + 'thickness': 115, # microns + 'readout_length': 222, # microns + 'pre_U0': 0.695, } @@ -66,14 +69,14 @@ class PHYSICS_PACKAGE_DEFAULTS: # Default pinhole area correction parameters class PINHOLE_DEFAULTS: TARDIS = { - 'pinhole_material' : 'Ta', - 'pinhole_diameter' : 400, # in microns - 'pinhole_thickness' : 100, # in microns - 'pinhole_density' : 16.65, # g/cc + 'pinhole_material': 'Ta', + 'pinhole_diameter': 400, # in microns + 'pinhole_thickness': 100, # in microns + 'pinhole_density': 16.65, # g/cc } PXRDIP = { - 'pinhole_material' : 'Ta', - 'pinhole_diameter' : 130, # in microns - 'pinhole_thickness' : 70, # in microns - 'pinhole_density' : 16.65, # g/cc + 'pinhole_material': 'Ta', + 'pinhole_diameter': 130, # in microns + 'pinhole_thickness': 70, # in microns + 'pinhole_density': 16.65, # g/cc } diff --git a/hexrd/instrument/cylindrical_detector.py b/hexrd/core/instrument/cylindrical_detector.py similarity index 65% rename from hexrd/instrument/cylindrical_detector.py rename to hexrd/core/instrument/cylindrical_detector.py index 843b2abb3..2e448e24e 100644 --- a/hexrd/instrument/cylindrical_detector.py +++ b/hexrd/core/instrument/cylindrical_detector.py @@ -2,9 +2,12 @@ import numpy as np -from hexrd import constants as ct -from hexrd import xrdutil -from hexrd.utils.decorators import memoize +from hexrd.core import constants as ct + +# TODO: Resolve extra-core dependency +from hexrd.hedm import xrdutil +from hexrd.hed.xrdutil.utils import _warp_to_cylinder +from hexrd.core.utils.decorators import memoize from .detector import Detector @@ -15,11 +18,11 @@ class CylindricalDetector(Detector): """2D cylindrical detector - A cylindrical detector is a simple rectangular - row-column detector which has been bent in the - shape of a cylinder. Inherting the PlanarDetector - class except for a few changes to account for the - cylinder ray intersection. + A cylindrical detector is a simple rectangular + row-column detector which has been bent in the + shape of a cylinder. Inherting the PlanarDetector + class except for a few changes to account for the + cylinder ray intersection. """ def __init__(self, radius=49.51, **detector_kwargs): @@ -30,10 +33,15 @@ def __init__(self, radius=49.51, **detector_kwargs): def detector_type(self): return 'cylindrical' - def cart_to_angles(self, xy_data, - rmat_s=None, - tvec_s=None, tvec_c=None, - apply_distortion=False, normalize=True): + def cart_to_angles( + self, + xy_data, + rmat_s=None, + tvec_s=None, + tvec_c=None, + apply_distortion=False, + normalize=True, + ): xy_data = np.asarray(xy_data) if rmat_s is None: rmat_s = ct.identity_3x3 @@ -44,23 +52,30 @@ def cart_to_angles(self, xy_data, if apply_distortion and self.distortion is not None: xy_data = self.distortion.apply(xy_data) - dvecs = xrdutil.utils._warp_to_cylinder(xy_data, - self.tvec, - self.radius, - self.caxis, - self.paxis, - tVec_s=tvec_s, - tVec_c=tvec_c, - rmat_s=rmat_s, - normalize=normalize) + dvecs = _warp_to_cylinder( + xy_data, + self.tvec, + self.radius, + self.caxis, + self.paxis, + tVec_s=tvec_s, + tVec_c=tvec_c, + rmat_s=rmat_s, + normalize=normalize, + ) tth, eta = xrdutil.utils._dvec_to_angs(dvecs, self.bvec, self.evec) tth_eta = np.vstack((tth, eta)).T return tth_eta, dvecs - def angles_to_cart(self, tth_eta, - rmat_s=None, tvec_s=None, - rmat_c=None, tvec_c=None, - apply_distortion=False): + def angles_to_cart( + self, + tth_eta, + rmat_s=None, + tvec_s=None, + rmat_c=None, + tvec_c=None, + apply_distortion=False, + ): if rmat_s is None: rmat_s = ct.identity_3x3 if tvec_s is None: @@ -78,15 +93,24 @@ def angles_to_cart(self, tth_eta, ome = np.arccos(rmat_s[0, 0]) angs = np.hstack([tth_eta, np.tile(ome, (len(tth_eta), 1))]) - kwargs = {'beamVec': self.bvec, - 'etaVec': self.evec, - 'tVec_s': tvec_s, - 'rmat_s': rmat_s, - 'tVec_c': tvec_c} - args = (angs, chi, self.tvec, - self.caxis, self.paxis, - self.radius, self.physical_size, - self.angle_extent, self.distortion) + kwargs = { + 'beamVec': self.bvec, + 'etaVec': self.evec, + 'tVec_s': tvec_s, + 'rmat_s': rmat_s, + 'tVec_c': tvec_c, + } + args = ( + angs, + chi, + self.tvec, + self.caxis, + self.paxis, + self.radius, + self.physical_size, + self.angle_extent, + self.distortion, + ) proj_func = xrdutil.utils._project_on_detector_cylinder valid_xy, rMat_ss, valid_mask = proj_func(*args, **kwargs) @@ -95,20 +119,24 @@ def angles_to_cart(self, tth_eta, xy_det[valid_mask, :] = valid_xy return xy_det - def cart_to_dvecs(self, - xy_data, - tvec_s=ct.zeros_3x1, - rmat_s=ct.identity_3x3, - tvec_c=ct.zeros_3x1): - return xrdutil.utils._warp_to_cylinder(xy_data, - self.tvec, - self.radius, - self.caxis, - self.paxis, - tVec_s=tvec_s, - rmat_s=rmat_s, - tVec_c=tvec_c, - normalize=False) + def cart_to_dvecs( + self, + xy_data, + tvec_s=ct.zeros_3x1, + rmat_s=ct.identity_3x3, + tvec_c=ct.zeros_3x1, + ): + return _warp_to_cylinder( + xy_data, + self.tvec, + self.radius, + self.caxis, + self.paxis, + tVec_s=tvec_s, + rmat_s=rmat_s, + tVec_c=tvec_c, + normalize=False, + ) def pixel_angles(self, origin=ct.zeros_3, bvec: np.ndarray | None = None): if bvec is None: @@ -131,7 +159,7 @@ def local_normal(self): num = x.shape[0] naxis = np.cross(self.paxis, self.caxis) - th = x/self.radius + th = x / self.radius xp = np.sin(th) xn = -np.cos(th) @@ -163,7 +191,7 @@ def calc_filter_coating_transmission(self, energy): t_f = self.filter.thickness t_c = self.coating.thickness t_p = self.phosphor.thickness - L = self.phosphor.readout_length + L = self.phosphor.readout_length pre_U0 = self.phosphor.pre_U0 det_normal = self.local_normal() @@ -171,19 +199,21 @@ def calc_filter_coating_transmission(self, energy): y, x = self.pixel_coords xy_data = np.vstack((x.flatten(), y.flatten())).T dvecs = self.cart_to_dvecs(xy_data) - dvecs = dvecs/np.tile(np.linalg.norm(dvecs, axis=1), [3, 1]).T + dvecs = dvecs / np.tile(np.linalg.norm(dvecs, axis=1), [3, 1]).T - secb = (1./np.sum(dvecs*det_normal, axis=1)).reshape(self.shape) + secb = (1.0 / np.sum(dvecs * det_normal, axis=1)).reshape(self.shape) - transmission_filter = self.calc_transmission_generic(secb, t_f, al_f) + transmission_filter = self.calc_transmission_generic(secb, t_f, al_f) transmission_coating = self.calc_transmission_generic(secb, t_c, al_c) - transmission_phosphor = ( - self.calc_transmission_phosphor(secb, t_p, al_p, L, energy, pre_U0)) + transmission_phosphor = self.calc_transmission_phosphor( + secb, t_p, al_p, L, energy, pre_U0 + ) - transmission_filter = transmission_filter.reshape(self.shape) + transmission_filter = transmission_filter.reshape(self.shape) transmission_coating = transmission_coating.reshape(self.shape) transmission_filter_coating = ( - transmission_filter * transmission_coating) + transmission_filter * transmission_coating + ) return transmission_filter_coating, transmission_phosphor @@ -251,8 +281,9 @@ def radius(self, r): def physical_size(self): # return physical size of detector # in mm after dewarped to rectangle - return np.array([self.rows*self.pixel_size_row, - self.cols*self.pixel_size_col]) + return np.array( + [self.rows * self.pixel_size_row, self.cols * self.pixel_size_col] + ) @property def beam_position(self): @@ -261,13 +292,24 @@ def beam_position(self): frame {Xd, Yd, Zd}. NaNs if no intersection. """ output = np.nan * np.ones(2) - args = (np.atleast_2d(self.bvec), self.caxis, self.paxis, - self.radius, self.tvec) + args = ( + np.atleast_2d(self.bvec), + self.caxis, + self.paxis, + self.radius, + self.tvec, + ) pt_on_cylinder = xrdutil.utils._unitvec_to_cylinder(*args) - args = (pt_on_cylinder, self.tvec, self.caxis, - self.paxis, self.radius, self.physical_size, - self.angle_extent) + args = ( + pt_on_cylinder, + self.tvec, + self.caxis, + self.paxis, + self.radius, + self.physical_size, + self.angle_extent, + ) pt_on_cylinder, _ = xrdutil.utils._clip_to_cylindrical_detector(*args) args = (pt_on_cylinder, self.tvec, self.caxis, self.paxis, self.radius) @@ -284,7 +326,9 @@ def angle_extent(self): def update_memoization_sizes(all_panels): Detector.update_memoization_sizes(all_panels) - num_matches = sum(isinstance(x, CylindricalDetector) for x in all_panels) + num_matches = sum( + isinstance(x, CylindricalDetector) for x in all_panels + ) funcs = [ _pixel_angles, _pixel_tth_gradient, @@ -300,35 +344,30 @@ def extra_config_kwargs(self): @memoize -def _pixel_angles(origin, - pixel_coords, - distortion, - caxis, - paxis, - tvec_d, - radius, - bvec, - evec, - rows, - cols): +def _pixel_angles( + origin, + pixel_coords, + distortion, + caxis, + paxis, + tvec_d, + radius, + bvec, + evec, + rows, + cols, +): assert len(origin) == 3, "origin must have 3 elements" pix_i, pix_j = pixel_coords - xy = np.ascontiguousarray( - np.vstack([ - pix_j.flatten(), pix_i.flatten() - ]).T - ) + xy = np.ascontiguousarray(np.vstack([pix_j.flatten(), pix_i.flatten()]).T) if distortion is not None: xy = distortion.apply(xy) - dvecs = xrdutil.utils._warp_to_cylinder(xy, - tvec_d-origin, - radius, - caxis, - paxis, - normalize=True) + dvecs = _warp_to_cylinder( + xy, tvec_d - origin, radius, caxis, paxis, normalize=True + ) angs = xrdutil.utils._dvec_to_angs(dvecs, bvec, evec) @@ -361,6 +400,5 @@ def _pixel_eta_gradient(origin, **pixel_angle_kwargs): def _fix_branch_cut_in_gradients(pgarray): return np.min( - np.abs(np.stack([pgarray - np.pi, pgarray, pgarray + np.pi])), - axis=0 + np.abs(np.stack([pgarray - np.pi, pgarray, pgarray + np.pi])), axis=0 ) diff --git a/hexrd/instrument/detector.py b/hexrd/core/instrument/detector.py similarity index 92% rename from hexrd/instrument/detector.py rename to hexrd/core/instrument/detector.py index dfcc0aca8..f16fa3ea9 100644 --- a/hexrd/instrument/detector.py +++ b/hexrd/core/instrument/detector.py @@ -3,23 +3,28 @@ import os from typing import Optional -from hexrd.instrument.constants import ( - COATING_DEFAULT, FILTER_DEFAULTS, PHOSPHOR_DEFAULT +from hexrd.core.instrument.constants import ( + COATING_DEFAULT, + FILTER_DEFAULTS, + PHOSPHOR_DEFAULT, ) -from hexrd.instrument.physics_package import AbstractPhysicsPackage +from hexrd.core.instrument.physics_package import AbstractPhysicsPackage import numba import numpy as np -from hexrd import constants as ct -from hexrd import distortion as distortion_pkg -from hexrd import matrixutil as mutil -from hexrd import xrdutil -from hexrd.rotations import mapAngle +from hexrd.core import constants as ct +from hexrd.core import distortion as distortion_pkg +from hexrd.core import matrixutil as mutil -from hexrd.material import crystallography -from hexrd.material.crystallography import PlaneData +# TODO: Resolve extra-core-dependency +from hexrd.hedm import xrdutil +from hexrd.hed.xrdutil import _project_on_detector_plane +from hexrd.core.rotations import mapAngle -from hexrd.transforms.xfcapi import ( +from hexrd.core.material import crystallography +from hexrd.core.material.crystallography import PlaneData + +from hexrd.core.transforms.xfcapi import ( xy_to_gvec, gvec_to_xy, make_beam_rmat, @@ -28,12 +33,13 @@ angles_to_dvec, ) -from hexrd.utils.decorators import memoize -from hexrd.gridutil import cellIndices -from hexrd.instrument import detector_coatings -from hexrd.material.utils import ( +from hexrd.core.utils.decorators import memoize +from hexrd.core.gridutil import cellIndices +from hexrd.core.instrument import detector_coatings +from hexrd.core.material.utils import ( calculate_linear_absorption_length, - calculate_incoherent_scattering) + calculate_incoherent_scattering, +) distortion_registry = distortion_pkg.Registry() @@ -290,7 +296,8 @@ def __init__( if detector_filter is None: detector_filter = detector_coatings.Filter( - **FILTER_DEFAULTS.TARDIS) + **FILTER_DEFAULTS.TARDIS + ) self.filter = detector_filter if detector_coating is None: @@ -555,8 +562,9 @@ def pixel_solid_angles(self) -> np.ndarray: # METHODS # ========================================================================= - def pixel_Q(self, energy: np.floating, - origin: np.ndarray = ct.zeros_3) -> np.ndarray: + def pixel_Q( + self, energy: np.floating, origin: np.ndarray = ct.zeros_3 + ) -> np.ndarray: '''get the equivalent momentum transfer for the angles. @@ -575,7 +583,7 @@ def pixel_Q(self, energy: np.floating, ''' lam = ct.keVToAngstrom(energy) tth, _ = self.pixel_angles(origin=origin) - return 4.*np.pi*np.sin(tth*0.5)/lam + return 4.0 * np.pi * np.sin(tth * 0.5) / lam def pixel_compton_energy_loss( self, @@ -602,9 +610,9 @@ def pixel_compton_energy_loss( ''' energy = np.asarray(energy) tth, _ = self.pixel_angles() - ang_fact = (1 - np.cos(tth)) - beta = energy/ct.cRestmasskeV - return energy/(1 + beta*ang_fact) + ang_fact = 1 - np.cos(tth) + beta = energy / ct.cRestmasskeV + return energy / (1 + beta * ang_fact) def pixel_compton_attenuation_length( self, @@ -653,8 +661,7 @@ def compute_compton_scattering_intensity( physics_package: AbstractPhysicsPackage, origin: np.array = ct.zeros_3, ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - - ''' compute the theoretical compton scattering + '''compute the theoretical compton scattering signal on the detector. this value is corrected for the transmission of compton scattered photons and normlaized before getting subtracting from the @@ -677,18 +684,20 @@ def compute_compton_scattering_intensity( q = self.pixel_Q(energy) inc_s = calculate_incoherent_scattering( - physics_package.sample_material, - q.flatten()).reshape(self.shape) + physics_package.sample_material, q.flatten() + ).reshape(self.shape) inc_w = calculate_incoherent_scattering( - physics_package.window_material, - q.flatten()).reshape(self.shape) + physics_package.window_material, q.flatten() + ).reshape(self.shape) t_s = self.calc_compton_physics_package_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) t_w = self.calc_compton_window_transmission( - energy, rMat_s, physics_package) + energy, rMat_s, physics_package + ) return inc_s * t_s + inc_w * t_w, t_s, t_w @@ -1599,7 +1608,7 @@ def simulate_rotation_series( allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) # find points that fall on the panel - det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( + det_xy, rMat_s, on_plane = _project_on_detector_plane( allAngs, self.rmat, rMat_c, @@ -1806,19 +1815,23 @@ def increase_memoization_sizes(funcs, min_size): if cache_info['maxsize'] < min_size: f.set_cache_maxsize(min_size) - def calc_physics_package_transmission(self, energy: np.floating, - rMat_s: np.array, - physics_package: AbstractPhysicsPackage) -> np.float64: + def calc_physics_package_transmission( + self, + energy: np.floating, + rMat_s: np.array, + physics_package: AbstractPhysicsPackage, + ) -> np.float64: """get the transmission from the physics package need to consider HED and HEDM samples separately """ bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1831,17 +1844,17 @@ def calc_physics_package_transmission(self, energy: np.floating, cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_transmission_sample( - seca, secb, energy, physics_package) - T_window = self.calc_transmission_window( - secb, energy, physics_package) + seca, secb, energy, physics_package + ) + T_window = self.calc_transmission_window(secb, energy, physics_package) transmission_physics_package = T_sample * T_window return transmission_physics_package @@ -1858,12 +1871,13 @@ def calc_compton_physics_package_transmission( routine than elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1876,18 +1890,19 @@ def calc_compton_physics_package_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_sample = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'sample') + seca, secb, energy, physics_package, 'sample' + ) T_window = self.calc_compton_transmission_window( - secb, energy, physics_package) + secb, energy, physics_package + ) return T_sample * T_window @@ -1904,12 +1919,13 @@ def calc_compton_window_transmission( elastically scattered absorption. ''' bvec = self.bvec - sample_normal = np.dot(rMat_s, [0., 0., np.sign(bvec[2])]) - seca = 1./np.dot(bvec, sample_normal) + sample_normal = np.dot(rMat_s, [0.0, 0.0, np.sign(bvec[2])]) + seca = 1.0 / np.dot(bvec, sample_normal) tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) @@ -1922,45 +1938,54 @@ def calc_compton_window_transmission( cosb < 0, np.isclose( cosb, - 0., - atol=5E-2, - ) + 0.0, + atol=5e-2, + ), ) cosb[mask] = np.nan - secb = 1./cosb.reshape(self.shape) + secb = 1.0 / cosb.reshape(self.shape) T_window = self.calc_compton_transmission( - seca, secb, energy, - physics_package, 'window') + seca, secb, energy, physics_package, 'window' + ) T_sample = self.calc_compton_transmission_sample( - seca, energy, physics_package) + seca, energy, physics_package + ) return T_sample * T_window - def calc_transmission_sample(self, seca: np.array, - secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_sample( + self, + seca: np.array, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: thickness_s = physics_package.sample_thickness # in microns if np.isclose(thickness_s, 0): return np.ones(self.shape) # in microns^-1 - mu_s = 1./physics_package.sample_absorption_length(energy) - x = (mu_s*thickness_s) - pre = 1./x/(secb - seca) - num = np.exp(-x*seca) - np.exp(-x*secb) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + x = mu_s * thickness_s + pre = 1.0 / x / (secb - seca) + num = np.exp(-x * seca) - np.exp(-x * secb) return pre * num - def calc_transmission_window(self, secb: np.array, energy: np.floating, - physics_package: AbstractPhysicsPackage) -> np.array: + def calc_transmission_window( + self, + secb: np.array, + energy: np.floating, + physics_package: AbstractPhysicsPackage, + ) -> np.array: material_w = physics_package.window_material thickness_w = physics_package.window_thickness # in microns if material_w is None or np.isclose(thickness_w, 0): return np.ones(self.shape) # in microns^-1 - mu_w = 1./physics_package.window_absorption_length(energy) - return np.exp(-thickness_w*mu_w*secb) + mu_w = 1.0 / physics_package.window_absorption_length(energy) + return np.exp(-thickness_w * mu_w * secb) def calc_compton_transmission( self, @@ -1975,9 +2000,11 @@ def calc_compton_transmission( formula = physics_package.sample_material density = physics_package.sample_density thickness = physics_package.sample_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1. / self.pixel_compton_attenuation_length( - energy, density, formula, + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, + density, + formula, ) elif pp_layer == 'window': formula = physics_package.window_material @@ -1986,17 +2013,18 @@ def calc_compton_transmission( density = physics_package.window_density thickness = physics_package.window_thickness - mu = 1./physics_package.sample_absorption_length(energy) - mu_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) + mu = 1.0 / physics_package.sample_absorption_length(energy) + mu_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) if thickness <= 0: return np.ones(self.shape) - x1 = mu*thickness*seca - x2 = mu_prime*thickness*secb - num = (np.exp(-x1) - np.exp(-x2)) - return -num/(x1 - x2) + x1 = mu * thickness * seca + x2 = mu_prime * thickness * secb + num = np.exp(-x1) - np.exp(-x2) + return -num / (x1 - x2) def calc_compton_transmission_sample( self, @@ -2006,9 +2034,8 @@ def calc_compton_transmission_sample( ) -> np.ndarray: thickness_s = physics_package.sample_thickness # in microns - mu_s = 1./physics_package.sample_absorption_length( - energy) - return np.exp(-mu_s*thickness_s*seca) + mu_s = 1.0 / physics_package.sample_absorption_length(energy) + return np.exp(-mu_s * thickness_s * seca) def calc_compton_transmission_window( self, @@ -2020,12 +2047,13 @@ def calc_compton_transmission_window( if formula is None: return np.ones(self.shape) - density = physics_package.window_density # in g/cc + density = physics_package.window_density # in g/cc thickness_w = physics_package.window_thickness # in microns - mu_w_prime = 1./self.pixel_compton_attenuation_length( - energy, density, formula) - return np.exp(-mu_w_prime*thickness_w*secb) + mu_w_prime = 1.0 / self.pixel_compton_attenuation_length( + energy, density, formula + ) + return np.exp(-mu_w_prime * thickness_w * secb) def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) -> np.array: '''get the effective pinhole area correction @@ -2063,30 +2091,35 @@ def calc_effective_pinhole_area(self, physics_package: AbstractPhysicsPackage) - return 0.5*(f1 - f2) - def calc_transmission_generic(self, - secb: np.array, - thickness: np.floating, - absorption_length: np.floating) -> np.array: + def calc_transmission_generic( + self, + secb: np.array, + thickness: np.floating, + absorption_length: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - mu = 1./absorption_length # in microns^-1 - return np.exp(-thickness*mu*secb) + mu = 1.0 / absorption_length # in microns^-1 + return np.exp(-thickness * mu * secb) - def calc_transmission_phosphor(self, - secb: np.array, - thickness: np.floating, - readout_length: np.floating, - absorption_length: np.floating, - energy: np.floating, - pre_U0: np.floating) -> np.array: + def calc_transmission_phosphor( + self, + secb: np.array, + thickness: np.floating, + readout_length: np.floating, + absorption_length: np.floating, + energy: np.floating, + pre_U0: np.floating, + ) -> np.array: if np.isclose(thickness, 0): return np.ones(self.shape) - f1 = absorption_length*thickness - f2 = absorption_length*readout_length - arg = (secb + 1/f2) - return pre_U0 * energy*((1.0 - np.exp(-f1*arg))/arg) + f1 = absorption_length * thickness + f2 = absorption_length * readout_length + arg = secb + 1 / f2 + return pre_U0 * energy * ((1.0 - np.exp(-f1 * arg)) / arg) + # ============================================================================= # UTILITY METHODS diff --git a/hexrd/instrument/detector_coatings.py b/hexrd/core/instrument/detector_coatings.py similarity index 99% rename from hexrd/instrument/detector_coatings.py rename to hexrd/core/instrument/detector_coatings.py index 733677a6e..5fd40e955 100644 --- a/hexrd/instrument/detector_coatings.py +++ b/hexrd/core/instrument/detector_coatings.py @@ -1,7 +1,7 @@ import numpy as np -from hexrd.material.utils import ( - calculate_energy_absorption_length, +from hexrd.core.material.utils import ( calculate_linear_absorption_length, + calculate_energy_absorption_length, ) @@ -139,6 +139,7 @@ def deserialize(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) + class Filter(AbstractLayer): def __init__(self, **abstractlayer_kwargs): diff --git a/hexrd/instrument/hedm_instrument.py b/hexrd/core/instrument/hedm_instrument.py similarity index 78% rename from hexrd/instrument/hedm_instrument.py rename to hexrd/core/instrument/hedm_instrument.py index 6d7442c9d..d2e09be4e 100644 --- a/hexrd/instrument/hedm_instrument.py +++ b/hexrd/core/instrument/hedm_instrument.py @@ -52,48 +52,49 @@ from scipy.linalg import logm from skimage.measure import regionprops -from hexrd import constants -from hexrd.imageseries import ImageSeries -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.imageseries.omega import OmegaImageSeries -from hexrd.fitting.utils import fit_ring -from hexrd.gridutil import make_tolerance_grid -from hexrd import matrixutil as mutil -from hexrd.transforms.xfcapi import ( +from hexrd.core import constants +from hexrd.core.imageseries import ImageSeries +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.imageseries.omega import OmegaImageSeries +from hexrd.core.fitting.utils import fit_ring +from hexrd.core.gridutil import make_tolerance_grid +from hexrd.core import matrixutil as mutil +from hexrd.core.transforms.xfcapi import ( angles_to_gvec, gvec_to_xy, make_sample_rmat, make_rmat_of_expmap, unit_vector, ) -from hexrd import xrdutil -from hexrd.material.crystallography import PlaneData -from hexrd import constants as ct -from hexrd.rotations import mapAngle -from hexrd import distortion as distortion_pkg -from hexrd.utils.concurrent import distribute_tasks -from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict -from hexrd.utils.yaml import NumpyToNativeDumper -from hexrd.valunits import valWUnit -from hexrd.wppf import LeBail + +# TODO: Resolve extra-core-dependency +from hexrd.hedm import xrdutil +from hexrd.hed.xrdutil import _project_on_detector_plane +from hexrd.core.material.crystallography import PlaneData +from hexrd.core import constants as ct +from hexrd.core.rotations import mapAngle +from hexrd.core import distortion as distortion_pkg +from hexrd.core.utils.concurrent import distribute_tasks +from hexrd.core.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict +from hexrd.core.utils.yaml import NumpyToNativeDumper +from hexrd.core.valunits import valWUnit +from hexrd.powder.wppf import LeBail from .cylindrical_detector import CylindricalDetector -from .detector import ( - beam_energy_DFLT, - Detector, - max_workers_DFLT, -) +from .detector import beam_energy_DFLT, Detector, max_workers_DFLT from .planar_detector import PlanarDetector from skimage.draw import polygon from skimage.util import random_noise -from hexrd.wppf import wppfsupport +from hexrd.powder.wppf import wppfsupport try: from fast_histogram import histogram1d + fast_histogram = True except ImportError: from numpy import histogram as histogram1d + fast_histogram = False logger = logging.getLogger() @@ -116,9 +117,9 @@ pixel_size_DFLT = (0.2, 0.2) tilt_params_DFLT = np.zeros(3) -t_vec_d_DFLT = np.r_[0., 0., -1000.] +t_vec_d_DFLT = np.r_[0.0, 0.0, -1000.0] -chi_DFLT = 0. +chi_DFLT = 0.0 t_vec_s_DFLT = np.zeros(3) multi_ims_key = ct.shared_ims_key @@ -132,8 +133,9 @@ # ============================================================================= -def generate_chunks(nrows, ncols, base_nrows, base_ncols, - row_gap=0, col_gap=0): +def generate_chunks( + nrows, ncols, base_nrows, base_ncols, row_gap=0, col_gap=0 +): """ Generate chunking data for regularly tiled composite detectors. @@ -165,18 +167,15 @@ def generate_chunks(nrows, ncols, base_nrows, base_ncols, [[row_start, row_stop], [col_start, col_stop]] """ - row_starts = np.array([i*(base_nrows + row_gap) for i in range(nrows)]) - col_starts = np.array([i*(base_ncols + col_gap) for i in range(ncols)]) + row_starts = np.array([i * (base_nrows + row_gap) for i in range(nrows)]) + col_starts = np.array([i * (base_ncols + col_gap) for i in range(ncols)]) rr = np.vstack([row_starts, row_starts + base_nrows]) cc = np.vstack([col_starts, col_starts + base_ncols]) rects = [] labels = [] for i in range(nrows): for j in range(ncols): - this_rect = np.array( - [[rr[0, i], rr[1, i]], - [cc[0, j], cc[1, j]]] - ) + this_rect = np.array([[rr[0, i], rr[1, i]], [cc[0, j], cc[1, j]]]) rects.append(this_rect) labels.append('%d_%d' % (i, j)) return rects, labels @@ -202,9 +201,11 @@ def chunk_instrument(instr, rects, labels, use_roi=False): """ icfg_dict = instr.write_config() - new_icfg_dict = dict(beam=icfg_dict['beam'], - oscillation_stage=icfg_dict['oscillation_stage'], - detectors={}) + new_icfg_dict = dict( + beam=icfg_dict['beam'], + oscillation_stage=icfg_dict['oscillation_stage'], + detectors={}, + ) for panel_id, panel in instr.detectors.items(): pcfg_dict = panel.config_dict(instr.chi, instr.tvec)['detector'] @@ -214,7 +215,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): row_col_dim = np.diff(rect) # (2, 1) shape = tuple(row_col_dim.flatten()) - center = (rect[:, 0].reshape(2, 1) + 0.5*row_col_dim) + center = rect[:, 0].reshape(2, 1) + 0.5 * row_col_dim sp_tvec = np.concatenate( [panel.pixelToCart(center.T).flatten(), np.zeros(1)] @@ -239,7 +240,7 @@ def chunk_instrument(instr, rects, labels, use_roi=False): if panel.panel_buffer is not None: if panel.panel_buffer.ndim == 2: # have a mask array! submask = panel.panel_buffer[ - rect[0, 0]:rect[0, 1], rect[1, 0]:rect[1, 1] + rect[0, 0] : rect[0, 1], rect[1, 0] : rect[1, 1] ] new_icfg_dict['detectors'][panel_name]['buffer'] = submask return new_icfg_dict @@ -270,7 +271,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): Returns ------- - ims : hexrd.imageseries + ims : hexrd.core.imageseries The desired imageseries object. """ @@ -283,9 +284,7 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): images_in = imgser_dict[multi_ims_key] elif np.any(matched_det_keys): if sum(matched_det_keys) != 1: - raise RuntimeError( - f"multiple entries found for '{det_key}'" - ) + raise RuntimeError(f"multiple entries found for '{det_key}'") # use boolean array to index the proper key # !!! these should be in the same order img_keys = img_keys = np.asarray(list(imgser_dict.keys())) @@ -305,7 +304,12 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): if isinstance(images_in, ims_classes): # input is an imageseries of some kind - ims = ProcessedImageSeries(images_in, [('rectangle', roi), ]) + ims = ProcessedImageSeries( + images_in, + [ + ('rectangle', roi), + ], + ) if isinstance(images_in, OmegaImageSeries): # if it was an OmegaImageSeries, must re-cast ims = OmegaImageSeries(ims) @@ -313,16 +317,16 @@ def _parse_imgser_dict(imgser_dict, det_key, roi=None): # 2- or 3-d array of images ndim = images_in.ndim if ndim == 2: - ims = images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims = images_in[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]] elif ndim == 3: nrows = roi[0][1] - roi[0][0] ncols = roi[1][1] - roi[1][0] n_images = len(images_in) - ims = np.empty((n_images, nrows, ncols), - dtype=images_in.dtype) + ims = np.empty((n_images, nrows, ncols), dtype=images_in.dtype) for i, image in images_in: - ims[i, :, :] = \ - images_in[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]] + ims[i, :, :] = images_in[ + roi[0][0] : roi[0][1], roi[1][0] : roi[1][1] + ] else: raise RuntimeError( f"image input dim must be 2 or 3; you gave {ndim}" @@ -340,9 +344,8 @@ def calc_beam_vec(azim, pola): tht = np.radians(azim) phi = np.radians(pola) bv = np.r_[ - np.sin(phi)*np.cos(tht), - np.cos(phi), - np.sin(phi)*np.sin(tht)] + np.sin(phi) * np.cos(tht), np.cos(phi), np.sin(phi) * np.sin(tht) + ] return -bv @@ -353,9 +356,7 @@ def calc_angles_from_beam_vec(bvec): """ bvec = np.atleast_1d(bvec).flatten() nvec = unit_vector(-bvec) - azim = float( - np.degrees(np.arctan2(nvec[2], nvec[0])) - ) + azim = float(np.degrees(np.arctan2(nvec[2], nvec[0]))) pola = float(np.degrees(np.arccos(nvec[1]))) return azim, pola @@ -379,9 +380,9 @@ def angle_in_range(angle, ranges, ccw=True, units='degrees'): WARNING: always clockwise; assumes wedges are not overlapping """ - tau = 360. + tau = 360.0 if units.lower() == 'radians': - tau = 2*np.pi + tau = 2 * np.pi w = np.nan for i, wedge in enumerate(ranges): amin = wedge[0] @@ -405,7 +406,7 @@ def max_tth(instr): Parameters ---------- - instr : hexrd.instrument.HEDMInstrument instance + instr : hexrd.hedm.instrument.HEDMInstrument instance the instrument class to evalutate. Returns @@ -413,7 +414,7 @@ def max_tth(instr): tth_max : float The maximum observable Bragg angle by the instrument in radians. """ - tth_max = 0. + tth_max = 0.0 for det in instr.detectors.values(): ptth, peta = det.pixel_angles() tth_max = max(np.max(ptth), tth_max) @@ -445,10 +446,9 @@ def pixel_resolution(instr): ang_ps_full = [] for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) ang_ps_full.append(angps) max_tth = min(max_tth, np.min(angps[:, 0])) @@ -480,10 +480,9 @@ def max_resolution(instr): max_eta = np.inf for panel in instr.detectors.values(): angps = panel.angularPixelSize( - np.stack( - panel.pixel_coords, - axis=0 - ).reshape(2, np.cumprod(panel.shape)[-1]).T + np.stack(panel.pixel_coords, axis=0) + .reshape(2, np.cumprod(panel.shape)[-1]) + .T ) mask = ~np.logical_or( np.isclose(angps[:,0], 0), @@ -494,16 +493,16 @@ def max_resolution(instr): def _gaussian_dist(x, cen, fwhm): - sigm = fwhm/(2*np.sqrt(2*np.log(2))) - return np.exp(-0.5*(x - cen)**2/sigm**2) + sigm = fwhm / (2 * np.sqrt(2 * np.log(2))) + return np.exp(-0.5 * (x - cen) ** 2 / sigm**2) def _sigma_to_fwhm(sigm): - return sigm*ct.sigma_to_fwhm + return sigm * ct.sigma_to_fwhm def _fwhm_to_sigma(fwhm): - return fwhm/ct.sigma_to_fwhm + return fwhm / ct.sigma_to_fwhm # ============================================================================= @@ -519,12 +518,17 @@ class HEDMInstrument(object): * where should reference eta be defined? currently set to default config """ - def __init__(self, instrument_config=None, - image_series=None, eta_vector=None, - instrument_name=None, tilt_calibration_mapping=None, - max_workers=max_workers_DFLT, - physics_package=None, - active_beam_name: Optional[str] = None): + def __init__( + self, + instrument_config=None, + image_series=None, + eta_vector=None, + instrument_name=None, + tilt_calibration_mapping=None, + max_workers=max_workers_DFLT, + physics_package=None, + active_beam_name: Optional[str] = None, + ): self._id = instrument_name_DFLT self._active_beam_name = active_beam_name @@ -549,7 +553,8 @@ def __init__(self, instrument_config=None, # FIXME: must add cylindrical self._detectors = dict( panel_id_DFLT=PlanarDetector( - rows=nrows_DFLT, cols=ncols_DFLT, + rows=nrows_DFLT, + cols=ncols_DFLT, pixel_size=pixel_size_DFLT, tvec=t_vec_d_DFLT, tilt=tilt_params_DFLT, @@ -557,9 +562,11 @@ def __init__(self, instrument_config=None, xrs_dist=self.source_distance, evec=self._eta_vector, distortion=None, - roi=None, group=None, - max_workers=self.max_workers), - ) + roi=None, + group=None, + max_workers=self.max_workers, + ), + ) self._tvec = t_vec_s_DFLT self._chi = chi_DFLT @@ -586,10 +593,7 @@ def __init__(self, instrument_config=None, self.physics_package = instrument_config['physics_package'] xrs_config = instrument_config['beam'] - is_single_beam = ( - 'energy' in xrs_config and - 'vector' in xrs_config - ) + is_single_beam = 'energy' in xrs_config and 'vector' in xrs_config if is_single_beam: # Assume single beam. Load the same way as multibeam self._create_default_beam() @@ -647,7 +651,7 @@ def __init__(self, instrument_config=None, elif isinstance(det_buffer, list): panel_buffer = np.asarray(det_buffer) elif np.isscalar(det_buffer): - panel_buffer = det_buffer*np.ones(2) + panel_buffer = det_buffer * np.ones(2) else: raise RuntimeError( "panel buffer spec invalid for %s" % det_id @@ -724,9 +728,9 @@ def mean_detector_center(self) -> np.ndarray: def mean_group_center(self, group: str) -> np.ndarray: """Return the mean center for detectors belonging to a group""" - centers = np.array([ - x.tvec for x in self.detectors_in_group(group).values() - ]) + centers = np.array( + [x.tvec for x in self.detectors_in_group(group).values()] + ) return centers.sum(axis=0) / len(centers) @property @@ -760,10 +764,11 @@ def detector_parameters(self): pdict = {} for key, panel in self.detectors.items(): pdict[key] = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) return pdict @@ -868,8 +873,9 @@ def beam_vector(self) -> np.ndarray: def beam_vector(self, x: np.ndarray): x = np.array(x).flatten() if len(x) == 3: - assert sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' bvec = x elif len(x) == 2: bvec = calc_beam_vec(*x) @@ -886,8 +892,9 @@ def source_distance(self): @source_distance.setter def source_distance(self, x): - assert np.isscalar(x), \ - f"'source_distance' must be a scalar; you input '{x}'" + assert np.isscalar( + x + ), f"'source_distance' must be a scalar; you input '{x}'" self.active_beam['distance'] = x self.beam_dict_modified() @@ -943,8 +950,9 @@ def eta_vector(self): @eta_vector.setter def eta_vector(self, x): x = np.array(x).flatten() - assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' self._eta_vector = x # ...maybe change dictionary item behavior for 3.x compatibility? for detector_id in self.detectors: @@ -956,10 +964,11 @@ def eta_vector(self, x): # ========================================================================= def write_config(self, file=None, style='yaml', calibration_dict={}): - """ WRITE OUT YAML FILE """ + """WRITE OUT YAML FILE""" # initialize output dictionary - assert style.lower() in ['yaml', 'hdf5'], \ + assert style.lower() in ['yaml', 'hdf5'], ( "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) par_dict = {} @@ -993,10 +1002,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): if calibration_dict: par_dict['calibration_crystal'] = calibration_dict - ostage = dict( - chi=self.chi, - translation=self.tvec.tolist() - ) + ostage = dict(chi=self.chi, translation=self.tvec.tolist()) par_dict['oscillation_stage'] = ostage det_dict = dict.fromkeys(self.detectors) @@ -1004,10 +1010,13 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): # grab panel config # !!! don't need beam or tvec # !!! have vetted style - pdict = detector.config_dict(chi=self.chi, tvec=self.tvec, - beam_energy=self.beam_energy, - beam_vector=self.beam_vector, - style=style) + pdict = detector.config_dict( + chi=self.chi, + tvec=self.tvec, + beam_energy=self.beam_energy, + beam_vector=self.beam_vector, + style=style, + ) det_dict[det_name] = pdict['detector'] par_dict['detectors'] = det_dict @@ -1017,6 +1026,7 @@ def write_config(self, file=None, style='yaml', calibration_dict={}): with open(file, 'w') as f: yaml.dump(par_dict, stream=f, Dumper=NumpyToNativeDumper) else: + def _write_group(file): instr_grp = file.create_group('instrument') unwrap_dict_to_h5(instr_grp, par_dict, asattr=False) @@ -1032,9 +1042,15 @@ def _write_group(file): return par_dict - def extract_polar_maps(self, plane_data, imgser_dict, - active_hkls=None, threshold=None, - tth_tol=None, eta_tol=0.25): + def extract_polar_maps( + self, + plane_data, + imgser_dict, + active_hkls=None, + threshold=None, + tth_tol=None, + eta_tol=0.25, + ): """ Extract eta-omega maps from an imageseries. @@ -1058,23 +1074,25 @@ def extract_polar_maps(self, plane_data, imgser_dict, # detectors, so calculate it once # !!! grab first panel panel = next(iter(self.detectors.values())) - pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = \ + pow_angs, pow_xys, tth_ranges, eta_idx, eta_edges = ( panel.make_powder_rings( - plane_data, merge_hkls=False, - delta_eta=eta_tol, full_output=True + plane_data, + merge_hkls=False, + delta_eta=eta_tol, + full_output=True, ) + ) if active_hkls is not None: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" # need to re-cast for element-wise operations active_hkls = np.array(active_hkls) # these are all active reflection unique hklIDs - active_hklIDs = plane_data.getHKLID( - plane_data.hkls, master=True - ) + active_hklIDs = plane_data.getHKLID(plane_data.hkls, master=True) # find indices idx = np.zeros_like(active_hkls, dtype=int) @@ -1131,9 +1149,14 @@ def extract_polar_maps(self, plane_data, imgser_dict, # Divide up the images among processes tasks = distribute_tasks(len(ims), self.max_workers) - func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges, - ring_maps=ring_maps, ring_params=ring_params, - threshold=threshold) + func = partial( + _run_histograms, + ims=ims, + tth_ranges=tth_ranges, + ring_maps=ring_maps, + ring_params=ring_params, + threshold=threshold, + ) max_workers = self.max_workers if max_workers == 1 or len(tasks) == 1: @@ -1151,12 +1174,21 @@ def extract_polar_maps(self, plane_data, imgser_dict, return ring_maps_panel, eta_edges - def extract_line_positions(self, plane_data, imgser_dict, - tth_tol=None, eta_tol=1., npdiv=2, - eta_centers=None, - collapse_eta=True, collapse_tth=False, - do_interpolation=True, do_fitting=False, - tth_distortion=None, fitting_kwargs=None): + def extract_line_positions( + self, + plane_data, + imgser_dict, + tth_tol=None, + eta_tol=1.0, + npdiv=2, + eta_centers=None, + collapse_eta=True, + collapse_tth=False, + do_interpolation=True, + do_fitting=False, + tth_distortion=None, + fitting_kwargs=None, + ): """ Perform annular interpolation on diffraction images. @@ -1200,7 +1232,7 @@ def extract_line_positions(self, plane_data, imgser_dict, for special case of pinhole camera distortions. See classes in hexrd.xrdutil.phutil fitting_kwargs : dict, optional - kwargs passed to hexrd.fitting.utils.fit_ring if do_fitting is True + kwargs passed to hexrd.core.fitting.utils.fit_ring if do_fitting is True Raises ------ @@ -1231,8 +1263,12 @@ def extract_line_positions(self, plane_data, imgser_dict, # LOOP OVER DETECTORS # ===================================================================== logger.info("Interpolating ring data") - pbar_dets = partial(tqdm, total=self.num_panels, desc="Detector", - position=self.num_panels) + pbar_dets = partial( + tqdm, + total=self.num_panels, + desc="Detector", + position=self.num_panels, + ) # Split up the workers among the detectors max_workers_per_detector = max(1, self.max_workers // self.num_panels) @@ -1255,23 +1291,26 @@ def extract_line_positions(self, plane_data, imgser_dict, def make_instr_cfg(panel): return panel.config_dict( - chi=self.chi, tvec=self.tvec, + chi=self.chi, + tvec=self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) images = [] for detector_id, panel in self.detectors.items(): - images.append(_parse_imgser_dict(imgser_dict, detector_id, - roi=panel.roi)) + images.append( + _parse_imgser_dict(imgser_dict, detector_id, roi=panel.roi) + ) panels = [self.detectors[k] for k in self.detectors] instr_cfgs = [make_instr_cfg(x) for x in panels] pbp_array = np.arange(self.num_panels) iter_args = zip(panels, instr_cfgs, images, pbp_array) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=self.num_panels) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=self.num_panels + ) as executor: results = list(pbar_dets(executor.map(func, iter_args))) panel_data = {} @@ -1280,12 +1319,9 @@ def make_instr_cfg(panel): return panel_data - def simulate_powder_pattern(self, - mat_list, - params=None, - bkgmethod=None, - origin=None, - noise=None): + def simulate_powder_pattern( + self, mat_list, params=None, bkgmethod=None, origin=None, noise=None + ): """ Generate powder diffraction iamges from specified materials. @@ -1324,8 +1360,7 @@ def simulate_powder_pattern(self, if origin is None: origin = self.tvec origin = np.asarray(origin).squeeze() - assert len(origin) == 3, \ - "origin must be a 3-element sequence" + assert len(origin) == 3, "origin must be a 3-element sequence" if bkgmethod is None: bkgmethod = {'chebyshev': 3} @@ -1365,7 +1400,7 @@ def simulate_powder_pattern(self, # find min and max tth over all panels tth_mi = np.inf - tth_ma = 0. + tth_ma = 0.0 ptth_dict = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): ptth, peta = panel.pixel_angles(origin=origin) @@ -1387,7 +1422,7 @@ def simulate_powder_pattern(self, ang_res = max_resolution(self) # !!! calc nsteps by oversampling - nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0]))) + nsteps = int(np.ceil(2 * (tth_ma - tth_mi) / np.degrees(ang_res[0]))) # evaulation vector for LeBail tth = np.linspace(tth_mi, tth_ma, nsteps) @@ -1396,7 +1431,7 @@ def simulate_powder_pattern(self, wavelength = [ valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'), - 1. + 1.0, ] ''' @@ -1409,23 +1444,25 @@ def simulate_powder_pattern(self, tth = mat.planeData.getTTh() - LP = (1 + np.cos(tth)**2) / \ - np.cos(0.5*tth)/np.sin(0.5*tth)**2 + LP = ( + (1 + np.cos(tth) ** 2) + / np.cos(0.5 * tth) + / np.sin(0.5 * tth) ** 2 + ) intensity[mat.name] = {} - intensity[mat.name]['synchrotron'] = \ + intensity[mat.name]['synchrotron'] = ( mat.planeData.structFact * LP * multiplicity + ) kwargs = { 'expt_spectrum': expt, 'params': params, 'phases': mat_list, - 'wavelength': { - 'synchrotron': wavelength - }, + 'wavelength': {'synchrotron': wavelength}, 'bkgmethod': bkgmethod, 'intensity_init': intensity, - 'peakshape': 'pvtch' + 'peakshape': 'pvtch', } self.WPPFclass = LeBail(**kwargs) @@ -1443,9 +1480,11 @@ def simulate_powder_pattern(self, for det_key, panel in self.detectors.items(): ptth = ptth_dict[det_key] - img = np.interp(np.degrees(ptth), - self.simulated_spectrum.x, - self.simulated_spectrum.y + self.background.y) + img = np.interp( + np.degrees(ptth), + self.simulated_spectrum.x, + self.simulated_spectrum.y + self.background.y, + ) if noise is None: img_dict[det_key] = img @@ -1456,13 +1495,11 @@ def simulate_powder_pattern(self, img /= prev_max if noise.lower() == 'poisson': - im_noise = random_noise(img, - mode='poisson', - clip=True) + im_noise = random_noise(img, mode='poisson', clip=True) mi = im_noise.min() ma = im_noise.max() if ma > mi: - im_noise = (im_noise - mi)/(ma - mi) + im_noise = (im_noise - mi) / (ma - mi) elif noise.lower() == 'gaussian': im_noise = random_noise(img, mode='gaussian', clip=True) @@ -1484,9 +1521,14 @@ def simulate_powder_pattern(self, return img_dict - def simulate_laue_pattern(self, crystal_data, - minEnergy=5., maxEnergy=35., - rmat_s=None, grain_params=None): + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + grain_params=None, + ): """ Simulate Laue diffraction over the instrument. @@ -1516,17 +1558,28 @@ def simulate_laue_pattern(self, crystal_data, for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_laue_pattern( crystal_data, - minEnergy=minEnergy, maxEnergy=maxEnergy, - rmat_s=rmat_s, tvec_s=self.tvec, + minEnergy=minEnergy, + maxEnergy=maxEnergy, + rmat_s=rmat_s, + tvec_s=self.tvec, grain_params=grain_params, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) return results - def simulate_rotation_series(self, plane_data, grain_param_list, - eta_ranges=[(-np.pi, np.pi), ], - ome_ranges=[(-np.pi, np.pi), ], - ome_period=(-np.pi, np.pi), - wavelength=None): + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + wavelength=None, + ): """ Simulate a monochromatic rotation series over the instrument. @@ -1555,7 +1608,8 @@ def simulate_rotation_series(self, plane_data, grain_param_list, results = dict.fromkeys(self.detectors) for det_key, panel in self.detectors.items(): results[det_key] = panel.simulate_rotation_series( - plane_data, grain_param_list, + plane_data, + grain_param_list, eta_ranges=eta_ranges, ome_ranges=ome_ranges, ome_period=ome_period, @@ -1564,16 +1618,28 @@ def simulate_rotation_series(self, plane_data, grain_param_list, energy_correction=self.energy_correction) return results - def pull_spots(self, plane_data, grain_params, - imgser_dict, - tth_tol=0.25, eta_tol=1., ome_tol=1., - npdiv=2, threshold=10, - eta_ranges=[(-np.pi, np.pi), ], - ome_period=None, - dirname='results', filename=None, output_format='text', - return_spot_list=False, - quiet=True, check_only=False, - interp='nearest'): + def pull_spots( + self, + plane_data, + grain_params, + imgser_dict, + tth_tol=0.25, + eta_tol=1.0, + ome_tol=1.0, + npdiv=2, + threshold=10, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_period=None, + dirname='results', + filename=None, + output_format='text', + return_spot_list=False, + quiet=True, + check_only=False, + interp='nearest', + ): """ Exctract reflection info from a rotation series. @@ -1633,12 +1699,14 @@ def pull_spots(self, plane_data, grain_params, # WARNING: all imageseries AND all wedges within are assumed to have # the same omega values; put in a check that they are all the same??? oims0 = next(iter(imgser_dict.values())) - ome_ranges = [np.radians([i['ostart'], i['ostop']]) - for i in oims0.omegawedges.wedges] + ome_ranges = [ + np.radians([i['ostart'], i['ostop']]) + for i in oims0.omegawedges.wedges + ] if ome_period is None: ims = next(iter(imgser_dict.values())) ostart = ims.omega[0, 0] - ome_period = np.radians(ostart + np.r_[0., 360.]) + ome_period = np.radians(ostart + np.r_[0.0, 360.0]) # delta omega in DEGREES grabbed from first imageseries in the dict delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] @@ -1646,7 +1714,10 @@ def pull_spots(self, plane_data, grain_params, # make omega grid for frame expansion around reference frame # in DEGREES ndiv_ome, ome_del = make_tolerance_grid( - delta_ome, ome_tol, 1, adjust_window=True, + delta_ome, + ome_tol, + 1, + adjust_window=True, ) # generate structuring element for connected component labeling @@ -1657,24 +1728,37 @@ def pull_spots(self, plane_data, grain_params, # simulate rotation series sim_results = self.simulate_rotation_series( - plane_data, [grain_params, ], + plane_data, + [ + grain_params, + ], eta_ranges=eta_ranges, ome_ranges=ome_ranges, - ome_period=ome_period) + ome_period=ome_period, + ) # patch vertex generator (global for instrument) - tol_vec = 0.5*np.radians( - [-tth_tol, -eta_tol, - -tth_tol, eta_tol, - tth_tol, eta_tol, - tth_tol, -eta_tol]) + tol_vec = 0.5 * np.radians( + [ + -tth_tol, + -eta_tol, + -tth_tol, + eta_tol, + tth_tol, + eta_tol, + tth_tol, + -eta_tol, + ] + ) # prepare output if requested if filename is not None and output_format.lower() == 'hdf5': this_filename = os.path.join(dirname, filename) writer = GrainDataWriter_h5( os.path.join(dirname, filename), - self.write_config(), grain_params) + self.write_config(), + grain_params, + ) # ===================================================================== # LOOP OVER PANELS @@ -1686,28 +1770,25 @@ def pull_spots(self, plane_data, grain_params, for detector_id, panel in self.detectors.items(): # initialize text-based output writer if filename is not None and output_format.lower() == 'text': - output_dir = os.path.join( - dirname, detector_id - ) + output_dir = os.path.join(dirname, detector_id) os.makedirs(output_dir, exist_ok=True) - this_filename = os.path.join( - output_dir, filename - ) + this_filename = os.path.join(output_dir, filename) writer = PatchDataWriter(this_filename) # grab panel instr_cfg = panel.config_dict( - self.chi, self.tvec, + self.chi, + self.tvec, beam_energy=self.beam_energy, beam_vector=self.beam_vector, - style='hdf5' + style='hdf5', ) native_area = panel.pixel_area # pixel ref area # pull out the OmegaImageSeries for this panel from input dict - ome_imgser = _parse_imgser_dict(imgser_dict, - detector_id, - roi=panel.roi) + ome_imgser = _parse_imgser_dict( + imgser_dict, detector_id, roi=panel.roi + ) # extract simulation results sim_results_p = sim_results[detector_id] @@ -1723,19 +1804,24 @@ def pull_spots(self, plane_data, grain_params, # patch vertex array from sim nangs = len(ang_centers) patch_vertices = ( - np.tile(ang_centers[:, :2], (1, 4)) + - np.tile(tol_vec, (nangs, 1)) - ).reshape(4*nangs, 2) - ome_dupl = np.tile( - ang_centers[:, 2], (4, 1) - ).T.reshape(len(patch_vertices), 1) + np.tile(ang_centers[:, :2], (1, 4)) + + np.tile(tol_vec, (nangs, 1)) + ).reshape(4 * nangs, 2) + ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape( + len(patch_vertices), 1 + ) # find vertices that all fall on the panel - det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane( + det_xy, rmats_s, on_plane = _project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), - panel.rmat, rMat_c, self.chi, - panel.tvec, tVec_c, self.tvec, - panel.distortion) + panel.rmat, + rMat_c, + self.chi, + panel.tvec, + tVec_c, + self.tvec, + panel.distortion, + ) _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) # all vertices must be on... @@ -1766,7 +1852,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d %d %d) falls outside omega range - """ % tuple(hkls_p[i_pt, :]) + """ % tuple( + hkls_p[i_pt, :] + ) print(msg) continue else: @@ -1784,11 +1872,16 @@ def pull_spots(self, plane_data, grain_params, # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( instr_cfg, - ang_centers[:, :2], ang_pixel_size, + ang_centers[:, :2], + ang_pixel_size, omega=ang_centers[:, 2], - tth_tol=tth_tol, eta_tol=eta_tol, - rmat_c=rMat_c, tvec_c=tVec_c, - npdiv=npdiv, quiet=True) + tth_tol=tth_tol, + eta_tol=eta_tol, + rmat_c=rMat_c, + tvec_c=tVec_c, + npdiv=npdiv, + quiet=True, + ) # GRAND LOOP over reflections for this panel patch_output = [] @@ -1798,7 +1891,7 @@ def pull_spots(self, plane_data, grain_params, vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch prows, pcols = areas.shape - nrm_fac = areas/float(native_area) + nrm_fac = areas / float(native_area) nrm_fac = nrm_fac / np.min(nrm_fac) # grab hkl info @@ -1812,8 +1905,9 @@ def pull_spots(self, plane_data, grain_params, delta_eta = eta_edges[1] - eta_edges[0] # need to reshape eval pts for interpolation - xy_eval = np.vstack([xy_eval[0].flatten(), - xy_eval[1].flatten()]).T + xy_eval = np.vstack( + [xy_eval[0].flatten(), xy_eval[1].flatten()] + ).T # the evaluation omegas; # expand about the central value using tol vector @@ -1829,7 +1923,9 @@ def pull_spots(self, plane_data, grain_params, if not quiet: msg = """ window for (%d%d%d) falls outside omega range - """ % tuple(hkl) + """ % tuple( + hkl + ) print(msg) continue else: @@ -1838,8 +1934,8 @@ def pull_spots(self, plane_data, grain_params, peak_id = next_invalid_peak_id sum_int = np.nan max_int = np.nan - meas_angs = np.nan*np.ones(3) - meas_xy = np.nan*np.ones(2) + meas_angs = np.nan * np.ones(3) + meas_xy = np.nan * np.ones(2) # quick check for intensity contains_signal = False @@ -1857,19 +1953,23 @@ def pull_spots(self, plane_data, grain_params, # initialize patch data array for intensities if interp.lower() == 'bilinear': patch_data = np.zeros( - (len(frame_indices), prows, pcols)) + (len(frame_indices), prows, pcols) + ) for i, i_frame in enumerate(frame_indices): - patch_data[i] = \ - panel.interpolate_bilinear( - xy_eval, - ome_imgser[i_frame], - pad_with_nans=False - ).reshape(prows, pcols) # * nrm_fac + patch_data[i] = panel.interpolate_bilinear( + xy_eval, + ome_imgser[i_frame], + pad_with_nans=False, + ).reshape( + prows, pcols + ) # * nrm_fac elif interp.lower() == 'nearest': patch_data = patch_data_raw # * nrm_fac else: - msg = "interpolation option " + \ - "'%s' not understood" + msg = ( + "interpolation option " + + "'%s' not understood" + ) raise RuntimeError(msg % interp) # now have interpolated patch data... @@ -1882,9 +1982,10 @@ def pull_spots(self, plane_data, grain_params, peak_id = iRefl props = regionprops(labels, patch_data) coms = np.vstack( - [x.weighted_centroid for x in props]) + [x.weighted_centroid for x in props] + ) if num_peaks > 1: - center = np.r_[patch_data.shape]*0.5 + center = np.r_[patch_data.shape] * 0.5 center_t = np.tile(center, (num_peaks, 1)) com_diff = coms - center_t closest_peak_idx = np.argmin( @@ -1900,12 +2001,15 @@ def pull_spots(self, plane_data, grain_params, # meas_omes = \ # ome_eval[0] + coms[0]*delta_ome meas_angs = np.hstack( - [tth_edges[0] + (0.5 + coms[2])*delta_tth, - eta_edges[0] + (0.5 + coms[1])*delta_eta, - mapAngle( - np.radians(meas_omes), ome_period - ) - ] + [ + tth_edges[0] + + (0.5 + coms[2]) * delta_tth, + eta_edges[0] + + (0.5 + coms[1]) * delta_eta, + mapAngle( + np.radians(meas_omes), ome_period + ), + ] ) # intensities @@ -1932,15 +2036,21 @@ def pull_spots(self, plane_data, grain_params, meas_angs, chi=self.chi, rmat_c=rMat_c, - beam_vec=self.beam_vector) + beam_vec=self.beam_vector, + ) rMat_s = make_sample_rmat( self.chi, meas_angs[2] ) meas_xy = gvec_to_xy( gvec_c, - panel.rmat, rMat_s, rMat_c, - panel.tvec, self.tvec, tVec_c, - beam_vec=self.beam_vector) + panel.rmat, + rMat_s, + rMat_c, + panel.tvec, + self.tvec, + tVec_c, + beam_vec=self.beam_vector, + ) if panel.distortion is not None: meas_xy = panel.distortion.apply_inverse( np.atleast_2d(meas_xy) @@ -1959,19 +2069,38 @@ def pull_spots(self, plane_data, grain_params, if filename is not None: if output_format.lower() == 'text': writer.dump_patch( - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, - xy_centers[i_pt], meas_xy) + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + xy_centers[i_pt], + meas_xy, + ) elif output_format.lower() == 'hdf5': xyc_arr = xy_eval.reshape( prows, pcols, 2 ).transpose(2, 0, 1) writer.dump_patch( - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy) + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, + ) if return_spot_list: # Full output @@ -1979,17 +2108,34 @@ def pull_spots(self, plane_data, grain_params, prows, pcols, 2 ).transpose(2, 0, 1) _patch_output = [ - detector_id, iRefl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, np.radians(ome_eval), - xyc_arr, ijs, frame_indices, patch_data, - ang_centers[i_pt], xy_centers[i_pt], - meas_angs, meas_xy + detector_id, + iRefl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + np.radians(ome_eval), + xyc_arr, + ijs, + frame_indices, + patch_data, + ang_centers[i_pt], + xy_centers[i_pt], + meas_angs, + meas_xy, ] else: # Trimmed output _patch_output = [ - peak_id, hkl_id, hkl, sum_int, max_int, - ang_centers[i_pt], meas_angs, meas_xy + peak_id, + hkl_id, + hkl, + sum_int, + max_int, + ang_centers[i_pt], + meas_angs, + meas_xy, ] patch_output.append(_patch_output) iRefl += 1 @@ -2007,7 +2153,9 @@ def update_memoization_sizes(self): PlanarDetector.update_memoization_sizes(all_panels) CylindricalDetector.update_memoization_sizes(all_panels) - def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: + def calc_transmission( + self, rMat_s: np.ndarray = None + ) -> dict[str, np.ndarray]: """calculate the transmission from the filter and polymer coating. the inverse of this number is the intensity correction that needs @@ -2021,26 +2169,31 @@ def calc_transmission(self, rMat_s: np.ndarray = None) -> dict[str, np.ndarray]: transmissions = {} for det_name, det in self.detectors.items(): transmission_filter, transmission_phosphor = ( - det.calc_filter_coating_transmission(energy)) + det.calc_filter_coating_transmission(energy) + ) transmission = transmission_filter * transmission_phosphor if self.physics_package is not None: transmission_physics_package = ( det.calc_physics_package_transmission( - energy, rMat_s, self.physics_package)) + energy, rMat_s, self.physics_package + ) + ) effective_pinhole_area = det.calc_effective_pinhole_area( - self.physics_package) + self.physics_package + ) transmission = ( - transmission * - transmission_physics_package * - effective_pinhole_area + transmission + * transmission_physics_package + * effective_pinhole_area ) transmissions[det_name] = transmission return transmissions + # ============================================================================= # UTILITIES # ============================================================================= @@ -2051,6 +2204,7 @@ class PatchDataWriter(object): def __init__(self, filename): self._delim = ' ' + # fmt: off header_items = ( '# ID', 'PID', 'H', 'K', 'L', @@ -2065,6 +2219,7 @@ def __init__(self, filename): self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]), self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17]) ]) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2077,30 +2232,34 @@ def __del__(self): def close(self): self.fid.close() - def dump_patch(self, peak_id, hkl_id, - hkl, spot_int, max_int, - pangs, mangs, pxy, mxy): + def dump_patch( + self, peak_id, hkl_id, hkl, spot_int, max_int, pangs, mangs, pxy, mxy + ): """ !!! maybe need to check that last four inputs are arrays """ if mangs is None: spot_int = np.nan max_int = np.nan - mangs = np.nan*np.ones(3) - mxy = np.nan*np.ones(2) - - res = [int(peak_id), int(hkl_id)] \ - + np.array(hkl, dtype=int).tolist() \ - + [spot_int, max_int] \ - + pangs.tolist() \ - + mangs.tolist() \ - + pxy.tolist() \ + mangs = np.nan * np.ones(3) + mxy = np.nan * np.ones(2) + + res = ( + [int(peak_id), int(hkl_id)] + + np.array(hkl, dtype=int).tolist() + + [spot_int, max_int] + + pangs.tolist() + + mangs.tolist() + + pxy.tolist() + mxy.tolist() + ) output_str = self._delim.join( - [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), - self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), - self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])] + [ + self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]), + self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]), + self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:]), + ] ) print(output_str, file=self.fid) return output_str @@ -2116,20 +2275,23 @@ def __init__(self, filename=None, array=None): """ if filename is None and array is None: raise RuntimeError( - 'GrainDataWriter must be specified with filename or array') + 'GrainDataWriter must be specified with filename or array' + ) self.array = None self.fid = None # array supersedes filename if array is not None: - assert array.shape[1] == 21, \ - f'grain data table must have 21 columns not {array.shape[21]}' + assert ( + array.shape[1] == 21 + ), f'grain data table must have 21 columns not {array.shape[21]}' self.array = array self._array_row = 0 return self._delim = ' ' + # fmt: off header_items = ( '# grain ID', 'completeness', 'chi^2', 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]', @@ -2149,6 +2311,7 @@ def __init__(self, filename=None, array=None): np.tile('{:<23}', len(header_items) - 3) ).format(*header_items[3:])] ) + # fmt: on if isinstance(filename, IOBase): self.fid = filename else: @@ -2162,35 +2325,40 @@ def close(self): if self.fid is not None: self.fid.close() - def dump_grain(self, grain_id, completeness, chisq, - grain_params): - assert len(grain_params) == 12, \ - "len(grain_params) must be 12, not %d" % len(grain_params) + def dump_grain(self, grain_id, completeness, chisq, grain_params): + assert ( + len(grain_params) == 12 + ), "len(grain_params) must be 12, not %d" % len(grain_params) # extract strain emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:]))) evec = mutil.symmToVecMV(emat, scale=False) - res = [int(grain_id), completeness, chisq] \ - + grain_params.tolist() \ + res = ( + [int(grain_id), completeness, chisq] + + grain_params.tolist() + evec.tolist() + ) if self.array is not None: row = self._array_row - assert row < self.array.shape[0], \ - f'invalid row {row} in array table' + assert ( + row < self.array.shape[0] + ), f'invalid row {row} in array table' self.array[row] = res self._array_row += 1 return res # (else) format and write to file output_str = self._delim.join( - [self._delim.join( - ['{:<12d}', '{:<12f}', '{:<12e}'] - ).format(*res[:3]), - self._delim.join( - np.tile('{:<23.16e}', len(res) - 3) - ).format(*res[3:])] + [ + self._delim.join(['{:<12d}', '{:<12f}', '{:<12e}']).format( + *res[:3] + ), + self._delim.join(np.tile('{:<23.16e}', len(res) - 3)).format( + *res[3:] + ), + ] ) print(output_str, file=self.fid) return output_str @@ -2220,12 +2388,12 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): vinv_s = np.array(grain_params[6:]).flatten() vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) - if use_attr: # attribute version + if use_attr: # attribute version self.grain_grp.attrs.create('rmat_c', rmat_c) self.grain_grp.attrs.create('tvec_c', tvec_c) self.grain_grp.attrs.create('inv(V)_s', vinv_s) self.grain_grp.attrs.create('vmat_s', vmat_s) - else: # dataset version + else: # dataset version self.grain_grp.create_dataset('rmat_c', data=rmat_c) self.grain_grp.create_dataset('tvec_c', data=tvec_c) self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) @@ -2244,11 +2412,26 @@ def __init__(self, filename, instr_cfg, grain_params, use_attr=False): def close(self): self.fid.close() - def dump_patch(self, panel_id, - i_refl, peak_id, hkl_id, hkl, - tth_edges, eta_edges, ome_centers, - xy_centers, ijs, frame_indices, - spot_data, pangs, pxy, mangs, mxy, gzip=1): + def dump_patch( + self, + panel_id, + i_refl, + peak_id, + hkl_id, + hkl, + tth_edges, + eta_edges, + ome_centers, + xy_centers, + ijs, + frame_indices, + spot_data, + pangs, + pxy, + mangs, + mxy, + gzip=1, + ): """ to be called inside loop over patches @@ -2264,10 +2447,10 @@ def dump_patch(self, panel_id, spot_grp.attrs.create('predicted_angles', pangs) spot_grp.attrs.create('predicted_xy', pxy) if mangs is None: - mangs = np.nan*np.ones(3) + mangs = np.nan * np.ones(3) spot_grp.attrs.create('measured_angles', mangs) if mxy is None: - mxy = np.nan*np.ones(3) + mxy = np.nan * np.ones(3) spot_grp.attrs.create('measured_xy', mxy) # get centers crds from edge arrays @@ -2286,27 +2469,55 @@ def dump_patch(self, panel_id, eta_crd = centers_of_edge_vec(eta_edges) shuffle_data = True # reduces size by 20% - spot_grp.create_dataset('tth_crd', data=tth_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('eta_crd', data=eta_crd, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ome_crd', data=ome_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('xy_centers', data=xy_centers, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('ij_centers', data=ijs, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('frame_indices', data=fi, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) - spot_grp.create_dataset('intensities', data=spot_data, - compression="gzip", compression_opts=gzip, - shuffle=shuffle_data) + spot_grp.create_dataset( + 'tth_crd', + data=tth_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'eta_crd', + data=eta_crd, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ome_crd', + data=ome_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'xy_centers', + data=xy_centers, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'ij_centers', + data=ijs, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'frame_indices', + data=fi, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) + spot_grp.create_dataset( + 'intensities', + data=spot_data, + compression="gzip", + compression_opts=gzip, + shuffle=shuffle_data, + ) return @@ -2328,9 +2539,16 @@ class GenerateEtaOmeMaps(object): """ - def __init__(self, image_series_dict, instrument, plane_data, - active_hkls=None, eta_step=0.25, threshold=None, - ome_period=(0, 360)): + def __init__( + self, + image_series_dict, + instrument, + plane_data, + active_hkls=None, + eta_step=0.25, + threshold=None, + ome_period=(0, 360), + ): """ image_series must be OmegaImageSeries class instrument_params must be a dict (loaded from yaml spec) @@ -2344,13 +2562,12 @@ def __init__(self, image_series_dict, instrument, plane_data, # ???: change name of iHKLList? # ???: can we change the behavior of iHKLList? if active_hkls is None: - self._iHKLList = plane_data.getHKLID( - plane_data.hkls, master=True - ) + self._iHKLList = plane_data.getHKLID(plane_data.hkls, master=True) n_rings = len(self._iHKLList) else: - assert hasattr(active_hkls, '__len__'), \ - "active_hkls must be an iterable with __len__" + assert hasattr( + active_hkls, '__len__' + ), "active_hkls must be an iterable with __len__" self._iHKLList = active_hkls n_rings = len(active_hkls) @@ -2365,14 +2582,18 @@ def __init__(self, image_series_dict, instrument, plane_data, omegas_array = this_det_ims.metadata['omega'] # !!! DEGREES delta_ome = omegas_array[0][-1] - omegas_array[0][0] frame_mask = None - ome_period = omegas_array[0, 0] + np.r_[0., 360.] # !!! be careful + ome_period = omegas_array[0, 0] + np.r_[0.0, 360.0] # !!! be careful if this_det_ims.omegawedges.nwedges > 1: - delta_omes = [(i['ostop'] - i['ostart'])/i['nsteps'] - for i in this_det_ims.omegawedges.wedges] - check_wedges = mutil.uniqueVectors(np.atleast_2d(delta_omes), - tol=1e-6).squeeze() - assert check_wedges.size == 1, \ - "all wedges must have the same delta omega to 1e-6" + delta_omes = [ + (i['ostop'] - i['ostart']) / i['nsteps'] + for i in this_det_ims.omegawedges.wedges + ] + check_wedges = mutil.uniqueVectors( + np.atleast_2d(delta_omes), tol=1e-6 + ).squeeze() + assert ( + check_wedges.size == 1 + ), "all wedges must have the same delta omega to 1e-6" # grab representative delta ome # !!! assuming positive delta consistent with OmegaImageSeries delta_ome = delta_omes[0] @@ -2388,9 +2609,9 @@ def __init__(self, image_series_dict, instrument, plane_data, ) # compute total nsteps # FIXME: need check for roundoff badness - nsteps = int((ostop - ostart)/delta_ome) + nsteps = int((ostop - ostart) / delta_ome) ome_edges_full = np.linspace( - ostart, ostop, num=nsteps+1, endpoint=True + ostart, ostop, num=nsteps + 1, endpoint=True ) omegas_array = np.vstack( [ome_edges_full[:-1], ome_edges_full[1:]] @@ -2401,15 +2622,21 @@ def __init__(self, image_series_dict, instrument, plane_data, # !!! this array has -1 outside a wedge # !!! again assuming the valid frame order increases monotonically frame_mask = np.array( - [this_det_ims.omega_to_frame(ome)[0] != -1 - for ome in ome_centers] + [ + this_det_ims.omega_to_frame(ome)[0] != -1 + for ome in ome_centers + ] ) # ???: need to pass a threshold? eta_mapping, etas = instrument.extract_polar_maps( - plane_data, image_series_dict, - active_hkls=active_hkls, threshold=threshold, - tth_tol=None, eta_tol=eta_step) + plane_data, + image_series_dict, + active_hkls=active_hkls, + threshold=threshold, + tth_tol=None, + eta_tol=eta_step, + ) # for convenience grab map shape from first map_shape = next(iter(eta_mapping.values())).shape[1:] @@ -2436,7 +2663,7 @@ def __init__(self, image_series_dict, instrument, plane_data, if frame_mask is not None: # !!! must expand row dimension to include # skipped omegas - tmp = np.ones((len(frame_mask), map_shape[1]))*np.nan + tmp = np.ones((len(frame_mask), map_shape[1])) * np.nan tmp[frame_mask, :] = full_map full_map = tmp data_store.append(full_map) @@ -2445,11 +2672,11 @@ def __init__(self, image_series_dict, instrument, plane_data, # set required attributes self._omegas = mapAngle( np.radians(np.average(omegas_array, axis=1)), - np.radians(ome_period) + np.radians(ome_period), ) self._omeEdges = mapAngle( np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]), - np.radians(ome_period) + np.radians(ome_period), ) # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the @@ -2463,7 +2690,7 @@ def __init__(self, image_series_dict, instrument, plane_data, # WARNING: unlinke the omegas in imageseries metadata, # these are in RADIANS and represent bin centers self._etaEdges = etas - self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step) + self._etas = self._etaEdges[:-1] + 0.5 * np.radians(eta_step) @property def dataStore(self): @@ -2499,9 +2726,7 @@ def save(self, filename): def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): # mark pixels in the spec'd tth range - pixels_in_tthr = np.logical_and( - ptth >= tthr[0], ptth <= tthr[1] - ) + pixels_in_tthr = np.logical_and(ptth >= tthr[0], ptth <= tthr[1]) # catch case where ring isn't on detector if not np.any(pixels_in_tthr): @@ -2518,8 +2743,7 @@ def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): def run_fast_histogram(x, bins, weights=None): - return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), - weights=weights) + return histogram1d(x, len(bins) - 1, (bins[0], bins[-1]), weights=weights) def run_numpy_histogram(x, bins, weights=None): @@ -2537,7 +2761,7 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): if threshold is not None: # !!! NaNs get preserved image = np.array(image) - image[image < threshold] = 0. + image[image < threshold] = 0.0 for i_r, tthr in enumerate(tth_ranges): this_map = ring_maps[i_r] @@ -2554,12 +2778,21 @@ def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold): this_map[i_row, bins_on_detector] = result[bins_on_detector] -def _extract_detector_line_positions(iter_args, plane_data, tth_tol, - eta_tol, eta_centers, npdiv, - collapse_tth, collapse_eta, - do_interpolation, do_fitting, - fitting_kwargs, tth_distortion, - max_workers): +def _extract_detector_line_positions( + iter_args, + plane_data, + tth_tol, + eta_tol, + eta_centers, + npdiv, + collapse_tth, + collapse_eta, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, + max_workers, +): panel, instr_cfg, images, pbp = iter_args if images.ndim == 2: @@ -2574,9 +2807,13 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, tth_distr_cls = tth_distortion[panel.name] pow_angs, pow_xys, tth_ranges = panel.make_powder_rings( - plane_data, merge_hkls=True, - delta_tth=tth_tol, delta_eta=eta_tol, - eta_list=eta_centers, tth_distortion=tth_distr_cls) + plane_data, + merge_hkls=True, + delta_tth=tth_tol, + delta_eta=eta_tol, + eta_list=eta_centers, + tth_distortion=tth_distr_cls, + ) tth_tols = np.degrees(np.hstack([i[1] - i[0] for i in tth_ranges])) @@ -2591,8 +2828,9 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, # ================================================================= # LOOP OVER RING SETS # ================================================================= - pbar_rings = partial(tqdm, total=len(pow_angs), desc="Ringset", - position=pbp) + pbar_rings = partial( + tqdm, total=len(pow_angs), desc="Ringset", position=pbp + ) kwargs = { 'instr_cfg': instr_cfg, @@ -2609,15 +2847,26 @@ def _extract_detector_line_positions(iter_args, plane_data, tth_tol, } func = partial(_extract_ring_line_positions, **kwargs) iter_arg = zip(pow_angs, pow_xys, tth_tols, tth0) - with ProcessPoolExecutor(mp_context=constants.mp_context, - max_workers=max_workers) as executor: + with ProcessPoolExecutor( + mp_context=constants.mp_context, max_workers=max_workers + ) as executor: return list(pbar_rings(executor.map(func, iter_arg))) -def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, - collapse_tth, collapse_eta, images, - do_interpolation, do_fitting, fitting_kwargs, - tth_distortion): +def _extract_ring_line_positions( + iter_args, + instr_cfg, + panel, + eta_tol, + npdiv, + collapse_tth, + collapse_eta, + images, + do_interpolation, + do_fitting, + fitting_kwargs, + tth_distortion, +): """ Extracts data for a single Debye-Scherrer ring . @@ -2665,16 +2914,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, nan_mask = ~np.logical_or(np.isnan(xys), np.isnan(angs)) nan_mask = np.logical_or.reduce(nan_mask, 1) if angs.ndim > 1 and xys.ndim > 1: - angs = angs[nan_mask,:] - xys = xys[nan_mask, :] + angs = angs[nan_mask, :] + xys = xys[nan_mask, :] n_images = len(images) native_area = panel.pixel_area # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( - instr_cfg, angs, panel.angularPixelSize(xys), - tth_tol=tth_tol, eta_tol=eta_tol, npdiv=npdiv, quiet=True) + instr_cfg, + angs, + panel.angularPixelSize(xys), + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + quiet=True, + ) # loop over patches # FIXME: fix initialization @@ -2693,9 +2948,7 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, areas = np.abs(areas) # need to reshape eval pts for interpolation - xy_eval = np.vstack([ - xys_eval[0].flatten(), - xys_eval[1].flatten()]).T + xy_eval = np.vstack([xys_eval[0].flatten(), xys_eval[1].flatten()]).T _, on_panel = panel.clip_to_panel(xy_eval) @@ -2703,25 +2956,20 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, continue if collapse_tth: - ang_data = (vtx_angs[0][0, [0, -1]], - vtx_angs[1][[0, -1], 0]) + ang_data = (vtx_angs[0][0, [0, -1]], vtx_angs[1][[0, -1], 0]) elif collapse_eta: # !!! yield the tth bin centers tth_centers = np.average( - np.vstack( - [vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]] - ), - axis=0 + np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]), axis=0 ) - ang_data = (tth_centers, - angs[i_p][-1]) + ang_data = (tth_centers, angs[i_p][-1]) if do_fitting: fit_data = [] else: ang_data = vtx_angs prows, pcols = areas.shape - area_fac = areas/float(native_area) + area_fac = areas / float(native_area) # interpolate if not collapse_tth: @@ -2730,19 +2978,22 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, # catch interpolation type image = images[j_p] if do_interpolation: - p_img = panel.interpolate_bilinear( + p_img = ( + panel.interpolate_bilinear( xy_eval, image, - ).reshape(prows, pcols)*area_fac + ).reshape(prows, pcols) + * area_fac + ) else: - p_img = image[ijs[0], ijs[1]]*area_fac + p_img = image[ijs[0], ijs[1]] * area_fac # catch flat spectrum data, which will cause # fitting to fail. # ???: best here, or make fitting handle it? mxval = np.max(p_img) mnval = np.min(p_img) - if mxval == 0 or (1. - mnval/mxval) < 0.01: + if mxval == 0 or (1.0 - mnval / mxval) < 0.01: continue # catch collapsing options @@ -2759,11 +3010,16 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, tmp = tth_distortion.apply( panel.angles_to_cart( np.vstack( - [np.radians(this_tth0), - np.tile(ang_data[-1], len(this_tth0))] + [ + np.radians(this_tth0), + np.tile( + ang_data[-1], len(this_tth0) + ), + ] ).T ), - return_nominal=True) + return_nominal=True, + ) pk_centers = np.degrees(tmp[:, 0]) else: pk_centers = this_tth0 diff --git a/hexrd/instrument/physics_package.py b/hexrd/core/instrument/physics_package.py similarity index 98% rename from hexrd/instrument/physics_package.py rename to hexrd/core/instrument/physics_package.py index a1451e4e9..f03d62b37 100644 --- a/hexrd/instrument/physics_package.py +++ b/hexrd/core/instrument/physics_package.py @@ -3,7 +3,7 @@ from functools import partial import numpy as np -from hexrd.material.utils import calculate_linear_absorption_length +from hexrd.core.material.utils import calculate_linear_absorption_length # Below are the possible layers @@ -47,6 +47,7 @@ class AbstractPhysicsPackage: Readout models for BaFBr0.85I0.15:Eu image plates Rev. Sci. Instrum. 89, 063101 (2018 """ + # Abstract methods that must be redefined in derived classes @property @abstractmethod diff --git a/hexrd/instrument/planar_detector.py b/hexrd/core/instrument/planar_detector.py similarity index 65% rename from hexrd/instrument/planar_detector.py rename to hexrd/core/instrument/planar_detector.py index a2bc7dd16..4b5be4533 100644 --- a/hexrd/instrument/planar_detector.py +++ b/hexrd/core/instrument/planar_detector.py @@ -1,14 +1,14 @@ import numpy as np -from hexrd import constants as ct -from hexrd.transforms.xfcapi import ( +from hexrd.core import constants as ct +from hexrd.core.transforms.xfcapi import ( angles_to_gvec, xy_to_gvec, gvec_to_xy, make_beam_rmat, angles_to_dvec, ) -from hexrd.utils.decorators import memoize +from hexrd.core.utils.decorators import memoize from .detector import Detector @@ -23,10 +23,14 @@ def __init__(self, **detector_kwargs): def detector_type(self): return 'planar' - def cart_to_angles(self, xy_data, - rmat_s=None, - tvec_s=None, tvec_c=None, - apply_distortion=False): + def cart_to_angles( + self, + xy_data, + rmat_s=None, + tvec_s=None, + tvec_c=None, + apply_distortion=False, + ): if rmat_s is None: rmat_s = ct.identity_3x3 if tvec_s is None: @@ -37,16 +41,26 @@ def cart_to_angles(self, xy_data, xy_data = self.distortion.apply(xy_data) rmat_b = make_beam_rmat(self.bvec, self.evec) angs, g_vec = xy_to_gvec( - xy_data, self.rmat, rmat_s, - self.tvec, tvec_s, tvec_c, - rmat_b=rmat_b) + xy_data, + self.rmat, + rmat_s, + self.tvec, + tvec_s, + tvec_c, + rmat_b=rmat_b, + ) tth_eta = np.vstack([angs[0], angs[1]]).T return tth_eta, g_vec - def angles_to_cart(self, tth_eta, - rmat_s=None, tvec_s=None, - rmat_c=None, tvec_c=None, - apply_distortion=False): + def angles_to_cart( + self, + tth_eta, + rmat_s=None, + tvec_s=None, + rmat_c=None, + tvec_c=None, + apply_distortion=False, + ): if rmat_s is None: rmat_s = ct.identity_3x3 if tvec_s is None: @@ -64,13 +78,19 @@ def angles_to_cart(self, tth_eta, ome = np.arccos(rmat_s[0, 0]) angs = np.hstack([tth_eta, np.tile(ome, (len(tth_eta), 1))]) - gvec = angles_to_gvec(angs, beam_vec=self.bvec, eta_vec=self.evec, - chi=chi) + gvec = angles_to_gvec( + angs, beam_vec=self.bvec, eta_vec=self.evec, chi=chi + ) xy_det = gvec_to_xy( gvec, - self.rmat, rmat_s, rmat_c, - self.tvec, tvec_s, tvec_c, - beam_vec=self.bvec) + self.rmat, + rmat_s, + rmat_c, + self.tvec, + tvec_s, + tvec_c, + beam_vec=self.bvec, + ) if apply_distortion and self.distortion is not None: xy_det = self.distortion.apply_inverse(xy_det) return xy_det @@ -89,16 +109,34 @@ def pixel_angles(self, origin=ct.zeros_3, bvec: np.ndarray | None = None): self.rows, self.cols) def pixel_tth_gradient(self, origin=ct.zeros_3): - return _pixel_tth_gradient(origin, self.pixel_coords, self.distortion, - self.rmat, self.tvec, self.bvec, self.evec, - self.rows, self.cols) + return _pixel_tth_gradient( + origin, + self.pixel_coords, + self.distortion, + self.rmat, + self.tvec, + self.bvec, + self.evec, + self.rows, + self.cols, + ) def pixel_eta_gradient(self, origin=ct.zeros_3): - return _pixel_eta_gradient(origin, self.pixel_coords, self.distortion, - self.rmat, self.tvec, self.bvec, self.evec, - self.rows, self.cols) + return _pixel_eta_gradient( + origin, + self.pixel_coords, + self.distortion, + self.rmat, + self.tvec, + self.bvec, + self.evec, + self.rows, + self.cols, + ) - def calc_filter_coating_transmission(self, energy: np.floating) -> tuple[np.ndarray, np.ndarray]: + def calc_filter_coating_transmission( + self, energy: np.floating + ) -> tuple[np.ndarray, np.ndarray]: """ calculate thetrnasmission after x-ray beam interacts with the filter and the mylar polymer coating. @@ -121,26 +159,29 @@ def calc_filter_coating_transmission(self, energy: np.floating) -> tuple[np.ndar t_f = self.filter.thickness t_c = self.coating.thickness t_p = self.phosphor.thickness - L = self.phosphor.readout_length + L = self.phosphor.readout_length pre_U0 = self.phosphor.pre_U0 det_normal = -self.normal bvec = self.bvec tth, eta = self.pixel_angles() - angs = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(tth.flatten().shape))).T + angs = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(tth.flatten().shape)) + ).T dvecs = angles_to_dvec(angs, beam_vec=bvec) - secb = 1./np.dot(dvecs, det_normal).reshape(self.shape) + secb = 1.0 / np.dot(dvecs, det_normal).reshape(self.shape) - transmission_filter = self.calc_transmission_generic(secb, t_f, al_f) + transmission_filter = self.calc_transmission_generic(secb, t_f, al_f) transmission_coating = self.calc_transmission_generic(secb, t_c, al_c) - transmission_phosphor = ( - self.calc_transmission_phosphor(secb, t_p, al_p, L, energy, pre_U0)) + transmission_phosphor = self.calc_transmission_phosphor( + secb, t_p, al_p, L, energy, pre_U0 + ) transmission_filter_coating = ( - transmission_filter * transmission_coating) + transmission_filter * transmission_coating + ) return transmission_filter_coating, transmission_phosphor @@ -148,12 +189,9 @@ def calc_filter_coating_transmission(self, energy: np.floating) -> tuple[np.ndar def beam_position(self): output = np.nan * np.ones(2) b_dot_n = np.dot(self.bvec, self.normal) - if np.logical_and( - abs(b_dot_n) > ct.sqrt_epsf, - np.sign(b_dot_n) == -1 - ): + if np.logical_and(abs(b_dot_n) > ct.sqrt_epsf, np.sign(b_dot_n) == -1): u = np.dot(self.normal, self.tvec) / b_dot_n - p2_l = u*self.bvec + p2_l = u * self.bvec p2_d = np.dot(self.rmat.T, p2_l - self.tvec) output = p2_d[:2] return output @@ -180,24 +218,20 @@ def update_memoization_sizes(all_panels): @memoize -def _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, bvec, evec, - rows, cols): +def _pixel_angles( + origin, pixel_coords, distortion, rmat, tvec, bvec, evec, rows, cols +): assert len(origin) == 3, "origin must have 3 elements" pix_i, pix_j = pixel_coords - xy = np.ascontiguousarray( - np.vstack([ - pix_j.flatten(), pix_i.flatten() - ]).T - ) + xy = np.ascontiguousarray(np.vstack([pix_j.flatten(), pix_i.flatten()]).T) if distortion is not None: xy = distortion.apply(xy) rmat_b = make_beam_rmat(bvec, evec) angs, g_vec = xy_to_gvec( - xy, rmat, ct.identity_3x3, - tvec, ct.zeros_3, origin, - rmat_b=rmat_b) + xy, rmat, ct.identity_3x3, tvec, ct.zeros_3, origin, rmat_b=rmat_b + ) tth = angs[0].reshape(rows, cols) eta = angs[1].reshape(rows, cols) @@ -205,20 +239,24 @@ def _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, bvec, evec, @memoize -def _pixel_tth_gradient(origin, pixel_coords, distortion, rmat, tvec, bvec, - evec, rows, cols): +def _pixel_tth_gradient( + origin, pixel_coords, distortion, rmat, tvec, bvec, evec, rows, cols +): assert len(origin) == 3, "origin must have 3 elements" - ptth, _ = _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, - bvec, evec, rows, cols) + ptth, _ = _pixel_angles( + origin, pixel_coords, distortion, rmat, tvec, bvec, evec, rows, cols + ) return np.linalg.norm(np.stack(np.gradient(ptth)), axis=0) @memoize -def _pixel_eta_gradient(origin, pixel_coords, distortion, rmat, tvec, bvec, - evec, rows, cols): +def _pixel_eta_gradient( + origin, pixel_coords, distortion, rmat, tvec, bvec, evec, rows, cols +): assert len(origin) == 3, "origin must have 3 elemnts" - _, peta = _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, - bvec, evec, rows, cols) + _, peta = _pixel_angles( + origin, pixel_coords, distortion, rmat, tvec, bvec, evec, rows, cols + ) peta_grad_row = np.gradient(peta, axis=0) peta_grad_col = np.gradient(peta, axis=1) @@ -232,6 +270,5 @@ def _pixel_eta_gradient(origin, pixel_coords, distortion, rmat, tvec, bvec, def _fix_branch_cut_in_gradients(pgarray): return np.min( - np.abs(np.stack([pgarray - np.pi, pgarray, pgarray + np.pi])), - axis=0 + np.abs(np.stack([pgarray - np.pi, pgarray, pgarray + np.pi])), axis=0 ) diff --git a/hexrd/material/__init__.py b/hexrd/core/material/__init__.py similarity index 100% rename from hexrd/material/__init__.py rename to hexrd/core/material/__init__.py diff --git a/hexrd/material/crystallography.py b/hexrd/core/material/crystallography.py similarity index 99% rename from hexrd/material/crystallography.py rename to hexrd/core/material/crystallography.py index 574225e67..29e621972 100644 --- a/hexrd/material/crystallography.py +++ b/hexrd/core/material/crystallography.py @@ -34,21 +34,21 @@ import numpy as np -from hexrd.material.unitcell import unitcell -from hexrd.deprecation import deprecated -from hexrd import constants -from hexrd.matrixutil import unitVector -from hexrd.rotations import ( +from hexrd.core.material.unitcell import unitcell +from hexrd.core.deprecation import deprecated +from hexrd.core import constants +from hexrd.core.matrixutil import unitVector +from hexrd.core.rotations import ( rotMatOfExpMap, mapAngle, applySym, ltypeOfLaueGroup, quatOfLaueGroup, ) -from hexrd.transforms import xfcapi -from hexrd import valunits -from hexrd.valunits import toFloat -from hexrd.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf +from hexrd.core.transforms import xfcapi +from hexrd.core import valunits +from hexrd.core.valunits import toFloat +from hexrd.core.constants import d2r, r2d, sqrt3by2, epsf, sqrt_epsf """module vars""" @@ -165,6 +165,7 @@ def processWavelength(arg: Union[valunits.valWUnit, float]) -> float: 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' ).getVal(dUnit) + def latticeParameters(lvec): """ Generates direct and reciprocal lattice vector components in a @@ -192,6 +193,7 @@ def latticeParameters(lvec): return [a, b, c, alfa, beta, gama] + def latticePlanes( hkls: np.ndarray, lparms: np.ndarray, @@ -568,6 +570,7 @@ def latticeVectors( 'rparms': rparms, } + def hexagonalIndicesFromRhombohedral(hkl): """ converts rhombohedral hkl to hexagonal indices @@ -915,7 +918,7 @@ def exclusions(self, new_exclusions: Optional[np.ndarray]) -> None: elif len(exclusions.shape) == 2: # treat exclusions as ranges of indices for r in exclusions: - excl[self.tThSort[r[0]:r[1]]] = True + excl[self.tThSort[r[0] : r[1]]] = True else: raise RuntimeError( f'Unclear behavior for shape {exclusions.shape}' @@ -1850,8 +1853,10 @@ def get_exclusions(self): def set_exclusions(self, exclusions): self.exclusions = exclusions - @deprecated(new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", - removal_date="2025-08-01") + @deprecated( + new_func="rotations.ltypeOfLaueGroup(self.laueGroup)", + removal_date="2025-08-01", + ) def getLatticeType(self): return ltypeOfLaueGroup(self.laueGroup) diff --git a/hexrd/material/jcpds.py b/hexrd/core/material/jcpds.py similarity index 86% rename from hexrd/material/jcpds.py rename to hexrd/core/material/jcpds.py index 0affaa8c2..202b29639 100644 --- a/hexrd/material/jcpds.py +++ b/hexrd/core/material/jcpds.py @@ -2,7 +2,7 @@ import numpy as np -class JCPDS_extend(): +class JCPDS_extend: def __init__(self, filename=None): self.a0 = 0 self.b0 = 0 @@ -40,15 +40,15 @@ def read_file(self, file): # Construct base name = file without path and without extension name = os.path.splitext(os.path.basename(self.file))[0] self.name = name -# line = '', nd=0 - version = 0. + # line = '', nd=0 + version = 0.0 self.comments = [] self.DiffLines = [] version_status = '' inp = open(file, 'r').readlines() -# my_list = [] # get all the text first and throw into my_list + # my_list = [] # get all the text first and throw into my_list if inp[0][0] in ('2', '3', '4'): version = int(inp[0]) # JCPDS version number @@ -86,44 +86,44 @@ def read_file(self, file): a = float(item[0]) b = a c = a - alpha = 90. - beta = 90. - gamma = 90. + alpha = 90.0 + beta = 90.0 + gamma = 90.0 elif crystal_system == 7: # P, d-sp input a = float(item[0]) b = a c = a - alpha = 90. - beta = 90. - gamma = 90. + alpha = 90.0 + beta = 90.0 + gamma = 90.0 elif crystal_system == 2: # hexagonal a = float(item[0]) c = float(item[1]) b = a - alpha = 90. - beta = 90. - gamma = 120. + alpha = 90.0 + beta = 90.0 + gamma = 120.0 elif crystal_system == 3: # tetragonal a = float(item[0]) c = float(item[1]) b = a - alpha = 90. - beta = 90. - gamma = 90. + alpha = 90.0 + beta = 90.0 + gamma = 90.0 elif crystal_system == 4: # orthorhombic a = float(item[0]) b = float(item[1]) c = float(item[2]) - alpha = 90. - beta = 90. - gamma = 90. + alpha = 90.0 + beta = 90.0 + gamma = 90.0 elif crystal_system == 5: # monoclinic a = float(item[0]) b = float(item[1]) c = float(item[2]) beta = float(item[3]) - alpha = 90. - gamma = 90. + alpha = 90.0 + gamma = 90.0 elif crystal_system == 6: # triclinic a = float(item[0]) b = float(item[1]) @@ -142,7 +142,7 @@ def read_file(self, file): item = str.split(inp[4]) if self.version == 3: - alpha_t = 0. + alpha_t = 0.0 else: alpha_t = float(item[0]) self.alpha_t = alpha_t @@ -227,32 +227,32 @@ def read_file(self, file): if self.symmetry == 'cubic': self.b0 = self.a0 self.c0 = self.a0 - self.alpha0 = 90. - self.beta0 = 90. - self.gamma0 = 90. + self.alpha0 = 90.0 + self.beta0 = 90.0 + self.gamma0 = 90.0 elif self.symmetry == 'manual': self.b0 = self.a0 self.c0 = self.a0 - self.alpha0 = 90. - self.beta0 = 90. - self.gamma0 = 90. + self.alpha0 = 90.0 + self.beta0 = 90.0 + self.gamma0 = 90.0 elif self.symmetry == 'hexagonal' or self.symmetry == 'trigonal': self.b0 = self.a0 - self.alpha0 = 90. - self.beta0 = 90. - self.gamma0 = 120. + self.alpha0 = 90.0 + self.beta0 = 90.0 + self.gamma0 = 120.0 elif self.symmetry == 'tetragonal': self.b0 = self.a0 - self.alpha0 = 90. - self.beta0 = 90. - self.gamma0 = 90. + self.alpha0 = 90.0 + self.beta0 = 90.0 + self.gamma0 = 90.0 elif self.symmetry == 'orthorhombic': - self.alpha0 = 90. - self.beta0 = 90. - self.gamma0 = 90. + self.alpha0 = 90.0 + self.beta0 = 90.0 + self.gamma0 = 90.0 elif self.symmetry == 'monoclinic': - self.alpha0 = 90. - self.gamma0 = 90. + self.alpha0 = 90.0 + self.gamma0 = 90.0 # elif self.symmetry == 'triclinic': jcpdsfile.close() @@ -317,10 +317,9 @@ def calc_volume_unitcell(self): cb = np.cos(np.radians(self.beta0)) cg = np.cos(np.radians(self.gamma0)) - v0 = self.a0*self.b0*self.c0 - f = np.sqrt(1 - ca**2 - cb**2 - cg**2 - + 2 * ca * cb * cg) - return v0*f + v0 = self.a0 * self.b0 * self.c0 + f = np.sqrt(1 - ca**2 - cb**2 - cg**2 + 2 * ca * cb * cg) + return v0 * f class SymmetryMismatch(Exception): diff --git a/hexrd/material/material.py b/hexrd/core/material/material.py similarity index 97% rename from hexrd/material/material.py rename to hexrd/core/material/material.py index 813687a60..b1c6c296d 100644 --- a/hexrd/material/material.py +++ b/hexrd/core/material/material.py @@ -34,27 +34,25 @@ from configparser import ConfigParser as Parser import numpy as np -from hexrd.material.crystallography import PlaneData as PData -from hexrd.material import symmetry, unitcell -from hexrd.material.symbols import two_origin_choice -from hexrd.valunits import valWUnit -from hexrd.constants import (ptable, - ptableinverse, - chargestate) +from hexrd.core.material.crystallography import PlaneData as PData +from hexrd.core.material import symmetry, unitcell +from hexrd.core.material.symbols import two_origin_choice +from hexrd.core.valunits import valWUnit +from hexrd.core.constants import ptable, ptableinverse, chargestate from os import path from pathlib import Path from CifFile import ReadCif import h5py from warnings import warn -from hexrd.material.mksupport import Write2H5File -from hexrd.material.symbols import ( +from hexrd.core.material.mksupport import Write2H5File +from hexrd.core.material.symbols import ( xtal_sys_dict, Hall_to_sgnum, HM_to_sgnum, ) -from hexrd.utils.compatibility import h5py_read_string -from hexrd.fitting.peakfunctions import _unit_gaussian +from hexrd.core.utils.compatibility import h5py_read_string +from hexrd.core.fitting.peakfunctions import _unit_gaussian __all__ = ['Material', 'loadMaterialList'] @@ -86,6 +84,7 @@ def get_default_sgsetting(sgnum): else: return 0 + # # ---------------------------------------------------CLASS: Material # @@ -492,10 +491,12 @@ def pt_lp_factor(self): def lparms0(self): # Get the lattice parameters for 0 pressure and temperature (at v0) lparms = self.lparms - return np.array([ - *(lparms[:3] / self.pt_lp_factor), - *lparms[3:], - ]) + return np.array( + [ + *(lparms[:3] / self.pt_lp_factor), + *lparms[3:], + ] + ) def calc_pressure(self, volume=None, temperature=None): '''calculate the pressure given the volume @@ -909,13 +910,15 @@ def _readHDFxtal(self, fhdf=DFLT_NAME, xtal=DFLT_NAME): ''' self.pressure = 0 if 'pressure' in gid: - self.pressure = np.array(gid.get('pressure'), - dtype=np.float64).item() + self.pressure = np.array( + gid.get('pressure'), dtype=np.float64 + ).item() self.temperature = 298 if 'temperature' in gid: - self.temperature = np.array(gid.get('temperature'), - dtype=np.float64).item() + self.temperature = np.array( + gid.get('temperature'), dtype=np.float64 + ).item() self.k0 = 100.0 if 'k0' in gid: @@ -955,8 +958,9 @@ def _readHDFxtal(self, fhdf=DFLT_NAME, xtal=DFLT_NAME): if 'dalpha_t_dt' in gid: # this is the temperature derivation of # the pressure derivative of isotropic bulk modulus - dalpha_t_dt = np.array(gid.get('dalpha_t_dt'), - dtype=np.float64).item() + dalpha_t_dt = np.array( + gid.get('dalpha_t_dt'), dtype=np.float64 + ).item() self.dalpha_t_dt = dalpha_t_dt '''Finished with the BM EOS @@ -1104,11 +1108,13 @@ def lparms(self): def lparms(self, v): # Assume we are in `nm`, since that is what self.lparms returns. # Convert to angstroms and set with latticeParameters - self.latticeParameters = np.array([ - # Convert to angstroms - *(v[:3] * 10), - *v[3:], - ]) + self.latticeParameters = np.array( + [ + # Convert to angstroms + *(v[:3] * 10), + *v[3:], + ] + ) @property def latticeType(self): @@ -1166,8 +1172,7 @@ def sgsetting(self, val): if val in [0, 1]: self._sgsetting = val else: - msg = (f'space group setting must be either 0' - f' or 1.') + msg = f'space group setting must be either 0' f' or 1.' raise ValueError(msg) sgnum = property(_get_sgnum, _set_sgnum, None, "Space group number") @@ -1455,8 +1460,7 @@ def loadMaterialList(cfgFile): return matList -def load_materials_hdf5( - f, dmin=None, kev=None): +def load_materials_hdf5(f, dmin=None, kev=None): """Load materials from an HDF5 file The file uses the HDF5 file format. diff --git a/hexrd/material/mksupport.py b/hexrd/core/material/mksupport.py similarity index 77% rename from hexrd/material/mksupport.py rename to hexrd/core/material/mksupport.py index 73f718700..9d40712c9 100644 --- a/hexrd/material/mksupport.py +++ b/hexrd/core/material/mksupport.py @@ -1,15 +1,17 @@ -from hexrd.material.symbols import (pstr_Elements, - two_origin_choice, - PrintPossibleSG, - TRIG, - pstr_spacegroup, - pstr_mkxtal) +from hexrd.core.material.symbols import ( + pstr_Elements, + two_origin_choice, + PrintPossibleSG, + TRIG, + pstr_spacegroup, + pstr_mkxtal, +) import h5py import os import numpy as np import datetime import getpass -from hexrd.material.unitcell import _StiffnessDict, _pgDict +from hexrd.core.material.unitcell import _StiffnessDict, _pgDict def mk(filename, xtalname): @@ -26,9 +28,15 @@ def mk(filename, xtalname): space_group, iset = GetSpaceGroup(xtal_sys, bool_trigonal, bool_hexset) AtomInfo = GetAtomInfo() - AtomInfo.update({'file': filename, 'xtalname': xtalname, - 'xtal_sys': xtal_sys, 'SG': space_group, - 'SGsetting': iset}) + AtomInfo.update( + { + 'file': filename, + 'xtalname': xtalname, + 'xtal_sys': xtal_sys, + 'SG': space_group, + 'SGsetting': iset, + } + ) Write2H5File(AtomInfo, lat_param) @@ -36,41 +44,44 @@ def mk(filename, xtalname): def GetXtalSystem(): xtal_sys = input("Crystal System (1-7 use the legend above): ") - if(not xtal_sys.isdigit()): + if not xtal_sys.isdigit(): raise ValueError( "Invalid value. \ - Please enter valid number between 1-7 using the legend above.") + Please enter valid number between 1-7 using the legend above." + ) else: xtal_sys = int(xtal_sys) - if(xtal_sys < 1 or xtal_sys > 7): + if xtal_sys < 1 or xtal_sys > 7: raise ValueError( "Value outside range. \ - Please enter numbers between 1 and 7 using legend above") + Please enter numbers between 1 and 7 using legend above" + ) btrigonal = False bhexset = False - if(xtal_sys == 5): + if xtal_sys == 5: print(" 1. Hexagonal setting \n 2. Rhombohedral setting") hexset = input("(1/2)? : ") - if(not hexset.isdigit()): + if not hexset.isdigit(): raise ValueError("Invalid value.") else: hexset = int(hexset) - if(not hexset in [1, 2]): + if not hexset in [1, 2]: raise ValueError( - "Invalid value of integer. Only 1 or 2 is acceptable.") + "Invalid value of integer. Only 1 or 2 is acceptable." + ) btrigonal = True - if(hexset == 1): + if hexset == 1: bhexset = True xtal_sys = 4 # only temporarily set to 4 so that the correct # lattice parameter can be queried next - elif(hexset == 2): + elif hexset == 2: bhexset = False return xtal_sys, btrigonal, bhexset @@ -79,7 +90,7 @@ def GetXtalSystem(): def GetLatticeParameters(xtal_sys, bool_trigonal): a = input("a [nm] : ") - if(not a.replace('.', '', 1).isdigit()): + if not a.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: a = float(a) @@ -89,18 +100,24 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): alpha = 90.0 beta = 90.0 gamma = 90.0 - lat_param = {'a': a, 'b': b, 'c': c, - 'alpha': alpha, 'beta': beta, 'gamma': gamma} + lat_param = { + 'a': a, + 'b': b, + 'c': c, + 'alpha': alpha, + 'beta': beta, + 'gamma': gamma, + } # cubic symmetry - if (xtal_sys == 1): + if xtal_sys == 1: pass # tetragonal symmetry - elif(xtal_sys == 2): + elif xtal_sys == 2: c = input("c [nm] : ") - if(not c.replace('.', '', 1).isdigit()): + if not c.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: c = float(c) @@ -108,15 +125,15 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): lat_param['c'] = c # orthorhombic symmetry - elif(xtal_sys == 3): + elif xtal_sys == 3: b = input("b [nm] : ") - if(not b.replace('.', '', 1).isdigit()): + if not b.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: b = float(b) c = input("c [nm] : ") - if(not c.replace('.', '', 1).isdigit()): + if not c.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: c = float(c) @@ -125,9 +142,9 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): lat_param['c'] = c # hexagonal system - elif(xtal_sys == 4): + elif xtal_sys == 4: c = input("c [nm] : ") - if(not c.replace('.', '', 1).isdigit()): + if not c.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: c = float(c) @@ -135,13 +152,13 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): lat_param['c'] = c lat_param['gamma'] = 120.0 - if(bool_trigonal): + if bool_trigonal: xtal_sys = 5 # rhombohedral system - elif(xtal_sys == 5): + elif xtal_sys == 5: alpha = input("alpha [deg] : ") - if(not alpha.replace('.', '', 1).isdigit()): + if not alpha.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: alpha = float(alpha) @@ -151,21 +168,21 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): lat_param['gamma'] = alpha # monoclinic system - elif(xtal_sys == 6): + elif xtal_sys == 6: b = input("b [nm] : ") - if(not b.replace('.', '', 1).isdigit()): + if not b.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: b = float(b) c = input("c [nm] : ") - if(not c.replace('.', '', 1).isdigit()): + if not c.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: c = float(c) beta = input("beta [deg] : ") - if(not beta.replace('.', '', 1).isdigit()): + if not beta.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: beta = float(beta) @@ -175,33 +192,33 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): lat_param['beta'] = beta # triclinic system - elif(xtal_sys == 7): + elif xtal_sys == 7: b = input("b [nm] : ") - if(not b.replace('.', '', 1).isdigit()): + if not b.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: b = float(b) c = input("c [nm] : ") - if(not c.replace('.', '', 1).isdigit()): + if not c.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: c = float(c) alpha = input("alpha [deg] : ") - if(not alpha.replace('.', '', 1).isdigit()): + if not alpha.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: alpha = float(alpha) beta = input("beta [deg] : ") - if(not beta.replace('.', '', 1).isdigit()): + if not beta.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: beta = float(beta) gamma = input("gamma [deg] : ") - if(not gamma.replace('.', '', 1).isdigit()): + if not gamma.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value.") else: gamma = float(gamma) @@ -218,58 +235,63 @@ def GetLatticeParameters(xtal_sys, bool_trigonal): def GetSpaceGroup(xtal_sys, btrigonal, bhexset): - if(btrigonal): + if btrigonal: xtal_sys = 5 - if(btrigonal and (not bhexset)): + if btrigonal and (not bhexset): print("\n The space groups below correspond to the ") print("second (rhombohedral) setting.") print(" Please select one of these space groups.\n") for i in range(0, 7): pstr = str(TRIG[i]) + ":" + pstr_spacegroup[TRIG[i]] - if ((i + 1) % 4 == 0 or i == 6): + if (i + 1) % 4 == 0 or i == 6: print(pstr) else: print(pstr, end='') - print(50*"-"+"\n") + print(50 * "-" + "\n") else: sgmin, sgmax = PrintPossibleSG(xtal_sys) sg = input("Space group number (use legend above): ") - if(not sg.isdigit()): + if not sg.isdigit(): raise ValueError( "Invalid value. Please enter valid number between \ - 1 and 230 using the legend above.") + 1 and 230 using the legend above." + ) else: sg = int(sg) - if(btrigonal and (not bhexset)): - if(not sg in TRIG): + if btrigonal and (not bhexset): + if not sg in TRIG: raise ValueError( "Invalid space group entered. \ - Please use one of the space groups from the legend above") - if (sg == 146): + Please use one of the space groups from the legend above" + ) + if sg == 146: sg = 231 - if (sg == 148): + if sg == 148: sg = 232 - if (sg == 155): + if sg == 155: sg = 233 - if (sg == 160): + if sg == 160: sg = 234 - if (sg == 161): + if sg == 161: sg = 235 - if (sg == 166): + if sg == 166: sg = 236 - if (sg == 167): + if sg == 167: sg = 237 else: - if(sg < sgmin or sg > sgmax): + if sg < sgmin or sg > sgmax: raise ValueError( "Value outside range. Please enter numbers between \ - {} and {} using legend above".format(sgmin, sgmax)) + {} and {} using legend above".format( + sgmin, sgmax + ) + ) iset = SpaceGroupSetting(sg) @@ -279,25 +301,24 @@ def GetSpaceGroup(xtal_sys, btrigonal, bhexset): def SpaceGroupSetting(sgnum): iset = 1 - if(sgnum in two_origin_choice): + if sgnum in two_origin_choice: sitesym1 = two_origin_choice[sgnum][0] sitesym2 = two_origin_choice[sgnum][1] print(' ---------------------------------------------') print(' This space group has two origin settings.') - print(' The first setting has site symmetry : ' + - sitesym1) - print(' the second setting has site symmetry : ' + - sitesym2) + print(' The first setting has site symmetry : ' + sitesym1) + print(' the second setting has site symmetry : ' + sitesym2) iset = input(' Which setting do you wish to use (1/2) : ') - if(not iset.isdigit()): + if not iset.isdigit(): raise ValueError("Invalid integer value for atomic number.") else: iset = int(iset) print(iset) - if(not iset in [1, 2]): + if not iset in [1, 2]: raise ValueError(" Value entered for setting must be 1 or 2 !") - return iset-1 + return iset - 1 + def GetAtomInfo(): @@ -310,10 +331,10 @@ def GetAtomInfo(): stiffness = np.zeros([6, 6]) ques = 'y' - while(ques.strip().lower() == 'y' or ques.strip().lower() == 'yes'): + while ques.strip().lower() == 'y' or ques.strip().lower() == 'yes': tmp = input("Enter atomic number of species : ") - if(not tmp.isdigit()): + if not tmp.isdigit(): raise ValueError("Invalid integer value for atomic number.") else: tmp = int(tmp) @@ -331,32 +352,35 @@ def GetAsymmetricPositions(aniU): asym = input( "Enter asymmetric position of atom in unit cell \ - separated by comma (fractional coordinates) : ") + separated by comma (fractional coordinates) : " + ) asym = [x.strip() for x in asym.split(',')] for i, x in enumerate(asym): tmp = x.split('/') - if(len(tmp) == 2): - if(tmp[1].strip() != '0'): - asym[i] = str(float(tmp[0])/float(tmp[1])) + if len(tmp) == 2: + if tmp[1].strip() != '0': + asym[i] = str(float(tmp[0]) / float(tmp[1])) else: raise ValueError("Division by zero in fractional coordinates.") else: pass - if(len(asym) != 3): + if len(asym) != 3: raise ValueError("Need 3 coordinates in x,y,z fractional coordinates.") for i, x in enumerate(asym): - if(not x.replace('.', '', 1).isdigit()): + if not x.replace('.', '', 1).isdigit(): raise ValueError( - "Invalid floating point value in fractional coordinates.") + "Invalid floating point value in fractional coordinates." + ) else: asym[i] = float(x) - if(asym[i] < 0.0 or asym[i] >= 1.0): + if asym[i] < 0.0 or asym[i] >= 1.0: raise ValueError( " fractional coordinates only in the \ - range [0,1) i.e. 1 excluded") + range [0,1) i.e. 1 excluded" + ) occ, dw = GetOccDW(aniU=aniU) if isinstance(dw, float): @@ -371,73 +395,81 @@ def GetAsymmetricPositions(aniU): def GetOccDW(aniU=0): occ = input("Enter site occupation : ") - if(not occ.replace('.', '', 1).isdigit()): + if not occ.replace('.', '', 1).isdigit(): raise ValueError( - "Invalid floating point value in fractional coordinates.") + "Invalid floating point value in fractional coordinates." + ) else: occ = float(occ) - if(occ > 1.0 or occ <= 0.0): + if occ > 1.0 or occ <= 0.0: raise ValueError( - "site occupation can only in range (0,1.0] i.e. 0 excluded") + "site occupation can only in range (0,1.0] i.e. 0 excluded" + ) - if(aniU != 0): + if aniU != 0: ani = aniU else: ani = input( "Isotropic or anisotropic Debye-Waller factor? \n \ - 1 for isotropic, 2 for anisotropic : ") + 1 for isotropic, 2 for anisotropic : " + ) - if(not ani.isdigit()): + if not ani.isdigit(): raise ValueError("Invalid integer value for atomic number.") else: ani = int(ani) - if(ani == 1): + if ani == 1: dw = input("Enter isotropic Debye-Waller factor [nm^(-2)] : ") - if(not dw.replace('.', '', 1).isdigit()): + if not dw.replace('.', '', 1).isdigit(): raise ValueError( - "Invalid floating point value in fractional coordinates.") + "Invalid floating point value in fractional coordinates." + ) else: dw = float(dw) return [occ, dw] - elif(ani == 2): + elif ani == 2: - U = np.zeros([6, ]) + U = np.zeros( + [ + 6, + ] + ) U11 = input("Enter U11 [nm^2] : ") - if(not U11.replace('.', '', 1).isdigit()): + if not U11.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U11.") else: U[0] = float(U11) U22 = input("Enter U22 [nm^2] : ") - if(not U22.replace('.', '', 1).isdigit()): + if not U22.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U22.") else: U[1] = float(U22) U33 = input("Enter U33 [nm^2] : ") - if(not U33.replace('.', '', 1).isdigit()): + if not U33.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U33.") else: U[2] = float(U33) U12 = input("Enter U12 [nm^2] : ") - if(not U12.replace('.', '', 1).isdigit()): + if not U12.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U12.") else: U[3] = float(U12) U13 = input("Enter U13 [nm^2] : ") - if(not U13.replace('.', '', 1).isdigit()): + if not U13.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U13.") else: U[4] = float(U13) U23 = input("Enter U23 [nm^2] : ") - if(not U23.replace('.', '', 1).isdigit()): + if not U23.replace('.', '', 1).isdigit(): raise ValueError("Invalid floating point value in U23.") else: U[5] = float(U23) @@ -447,6 +479,7 @@ def GetOccDW(aniU=0): else: raise ValueError("Invalid input. Only 1 or 2 acceptable.") + # write to H5 file @@ -458,10 +491,10 @@ def Write2H5File(AtomInfo, lat_param, path=None): # first check if file exists fexist = os.path.isfile(AtomInfo['file']) - if(fexist): + if fexist: fid = h5py.File(AtomInfo['file'], 'r+') else: - Warning('File doesn''t exist. creating it') + Warning('File doesn' 't exist. creating it') fid = h5py.File(AtomInfo['file'], 'x') close = True @@ -498,7 +531,8 @@ def WriteH5Data(fid, AtomInfo, lat_param, path=None): gid = fid.create_group(path) did = gid.create_dataset( - "Atomtypes", (len(AtomInfo['Z']), ), dtype=np.int32) + "Atomtypes", (len(AtomInfo['Z']),), dtype=np.int32 + ) did.write_direct(np.array(AtomInfo['Z'], dtype=np.int32)) did = gid.create_dataset("CrystalSystem", (1,), dtype=np.int32) @@ -552,9 +586,9 @@ def WriteH5Data(fid, AtomInfo, lat_param, path=None): if 'hkls' in AtomInfo: if AtomInfo['hkls'].shape != (0,): - did = gid.create_dataset("hkls", - AtomInfo['hkls'].shape, - dtype=np.int32) + did = gid.create_dataset( + "hkls", AtomInfo['hkls'].shape, dtype=np.int32 + ) did.write_direct(AtomInfo['hkls']) if 'dmin' in AtomInfo: @@ -566,7 +600,8 @@ def WriteH5Data(fid, AtomInfo, lat_param, path=None): did.write_direct(np.array(AtomInfo['kev'], dtype=np.float64)) did = gid.create_dataset( - "AtomData", (4, len(AtomInfo['Z'])), dtype=np.float64) + "AtomData", (4, len(AtomInfo['Z'])), dtype=np.float64 + ) # this is done for contiguous c-allocation arr = np.array(AtomInfo['APOS'], dtype=np.float32).transpose() arr2 = arr.copy() @@ -575,9 +610,9 @@ def WriteH5Data(fid, AtomInfo, lat_param, path=None): if 'charge' in AtomInfo: data = np.array(AtomInfo['charge'], dtype=object) dt = h5py.special_dtype(vlen=str) - did = gid.create_dataset("ChargeStates", - (len(AtomInfo['Z']),), - dtype=dt) + did = gid.create_dataset( + "ChargeStates", (len(AtomInfo['Z']),), dtype=dt + ) did.write_direct(data) ''' @@ -587,7 +622,8 @@ def WriteH5Data(fid, AtomInfo, lat_param, path=None): if not isinstance(AtomInfo['U'][0], np.floating): did = gid.create_dataset( - "U", (6, len(AtomInfo['Z'])), dtype=np.float64) + "U", (6, len(AtomInfo['Z'])), dtype=np.float64 + ) arr = np.array(AtomInfo['U'], dtype=np.float32).transpose() arr2 = arr.copy() did.write_direct(arr2) diff --git a/hexrd/material/spacegroup.py b/hexrd/core/material/spacegroup.py similarity index 64% rename from hexrd/material/spacegroup.py rename to hexrd/core/material/spacegroup.py index 0fd69894e..647f3777b 100644 --- a/hexrd/material/spacegroup.py +++ b/hexrd/core/material/spacegroup.py @@ -72,10 +72,11 @@ from collections import OrderedDict from math import sqrt, floor -from hexrd import constants -from hexrd.material import symbols, symmetry +from hexrd.core import constants +from hexrd.core.material import symbols, symmetry import numpy as np + # __all__ = ['SpaceGroup'] @@ -134,8 +135,7 @@ def _set_sgnum(self, v): self._pointGroup = pglg[0] self._laueGroup = pglg[1] - sgnum = property(_get_sgnum, _set_sgnum, None, - "Space group number") + sgnum = property(_get_sgnum, _set_sgnum, None, "Space group number") @property def laueGroup(self): @@ -161,7 +161,7 @@ def latticeType(self): Rhombohedral lattices are treated as trigonal using the "obverse" setting. -""" + """ return _ltDict[self.laueGroup] @property @@ -273,34 +273,38 @@ def _map_sg_info(hstr): laue_10 = 'th' laue_11 = 'oh' -_laue_international = {laue_1:"-1", -laue_2:"2/m", -laue_3:"mmm", -laue_4:"4/m", -laue_5:"4/mmm", -laue_6:"-3", -laue_7:"-3m", -laue_8:"6/m", -laue_9:"6/mmm", -laue_10:"m3", -laue_11:"m3m"} +_laue_international = { + laue_1: "-1", + laue_2: "2/m", + laue_3: "mmm", + laue_4: "4/m", + laue_5: "4/mmm", + laue_6: "-3", + laue_7: "-3m", + laue_8: "6/m", + laue_9: "6/mmm", + laue_10: "m3", + laue_11: "m3m", +} + -def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range +def _sgrange(min, max): + return tuple(range(min, max + 1)) # inclusive range _pgDict = { - _sgrange(1, 1): ('c1', laue_1), # Triclinic - _sgrange(2, 2): ('ci', laue_1), # laue 1 - _sgrange(3, 5): ('c2', laue_2), # Monoclinic - _sgrange(6, 9): ('cs', laue_2), - _sgrange(10, 15): ('c2h', laue_2), # laue 2 - _sgrange(16, 24): ('d2', laue_3), # Orthorhombic - _sgrange(25, 46): ('c2v', laue_3), - _sgrange(47, 74): ('d2h', laue_3), # laue 3 - _sgrange(75, 80): ('c4', laue_4), # Tetragonal - _sgrange(81, 82): ('s4', laue_4), - _sgrange(83, 88): ('c4h', laue_4), # laue 4 - _sgrange(89, 98): ('d4', laue_5), + _sgrange(1, 1): ('c1', laue_1), # Triclinic + _sgrange(2, 2): ('ci', laue_1), # laue 1 + _sgrange(3, 5): ('c2', laue_2), # Monoclinic + _sgrange(6, 9): ('cs', laue_2), + _sgrange(10, 15): ('c2h', laue_2), # laue 2 + _sgrange(16, 24): ('d2', laue_3), # Orthorhombic + _sgrange(25, 46): ('c2v', laue_3), + _sgrange(47, 74): ('d2h', laue_3), # laue 3 + _sgrange(75, 80): ('c4', laue_4), # Tetragonal + _sgrange(81, 82): ('s4', laue_4), + _sgrange(83, 88): ('c4h', laue_4), # laue 4 + _sgrange(89, 98): ('d4', laue_5), _sgrange(99, 110): ('c4v', laue_5), _sgrange(111, 122): ('d2d', laue_5), _sgrange(123, 142): ('d4h', laue_5), # laue 5 @@ -316,9 +320,9 @@ def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range _sgrange(183, 186): ('c6v', laue_9), _sgrange(187, 190): ('d3h', laue_9), _sgrange(191, 194): ('d6h', laue_9), # laue 9 - _sgrange(195, 199): ('t', laue_10), # Cubic + _sgrange(195, 199): ('t', laue_10), # Cubic _sgrange(200, 206): ('th', laue_10), # laue 10 - _sgrange(207, 214): ('o', laue_11), + _sgrange(207, 214): ('o', laue_11), _sgrange(215, 220): ('td', laue_11), _sgrange(221, 230): ('oh', laue_11), # laue 11 } @@ -347,7 +351,7 @@ def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range laue_8: ltype_6, laue_9: ltype_6, laue_10: ltype_7, - laue_11: ltype_7 + laue_11: ltype_7, } @@ -361,11 +365,11 @@ def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range ltype_1: (tuple(range(6)), lambda p: p), # all 6 # note beta ltype_2: ((0, 1, 2, 4), lambda p: (p[0], p[1], p[2], 90, p[3], 90)), - ltype_3: ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), - ltype_4: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), - ltype_5: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - ltype_6: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - ltype_7: ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), + ltype_3: ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), + ltype_4: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), + ltype_5: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + ltype_6: ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + ltype_7: ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), } @@ -374,47 +378,53 @@ def Allowed_HKLs(sgnum, hkllist): this function checks if a particular g vector is allowed by lattice centering, screw axis or glide plane """ - sg_hmsymbol = symbols.pstr_spacegroup[sgnum-1].strip() + sg_hmsymbol = symbols.pstr_spacegroup[sgnum - 1].strip() symmorphic = False - if(sgnum in constants.sgnum_symmorphic): + if sgnum in constants.sgnum_symmorphic: symmorphic = True hkllist = np.atleast_2d(hkllist) centering = sg_hmsymbol[0] - if(centering == 'P'): + if centering == 'P': # all reflections are allowed - mask = np.ones([hkllist.shape[0], ], dtype=bool) - elif(centering == 'F'): + mask = np.ones( + [ + hkllist.shape[0], + ], + dtype=bool, + ) + elif centering == 'F': # same parity - seo = np.sum(np.mod(hkllist+100, 2), axis=1) + seo = np.sum(np.mod(hkllist + 100, 2), axis=1) mask = np.logical_not(np.logical_or(seo == 1, seo == 2)) - elif(centering == 'I'): + elif centering == 'I': # sum is even - seo = np.mod(np.sum(hkllist, axis=1)+100, 2) - mask = (seo == 0) - elif(centering == 'A'): + seo = np.mod(np.sum(hkllist, axis=1) + 100, 2) + mask = seo == 0 + elif centering == 'A': # k+l is even - seo = np.mod(np.sum(hkllist[:, 1:3], axis=1)+100, 2) + seo = np.mod(np.sum(hkllist[:, 1:3], axis=1) + 100, 2) mask = seo == 0 - elif(centering == 'B'): + elif centering == 'B': # h+l is even - seo = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 2) + seo = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 2) mask = seo == 0 - elif(centering == 'C'): + elif centering == 'C': # h+k is even - seo = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 2) + seo = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 2) mask = seo == 0 - elif(centering == 'R'): + elif centering == 'R': # -h+k+l is divisible by 3 - seo = np.mod(-hkllist[:, 0]+hkllist[:, 1]+hkllist[:, 2]+90, 3) + seo = np.mod(-hkllist[:, 0] + hkllist[:, 1] + hkllist[:, 2] + 90, 3) mask = seo == 0 else: raise RuntimeError( - 'IsGAllowed: unknown lattice centering encountered.') + 'IsGAllowed: unknown lattice centering encountered.' + ) hkls = hkllist[mask, :] - if(not symmorphic): + if not symmorphic: hkls = NonSymmorphicAbsences(sgnum, hkls) return hkls.astype(np.int32) @@ -434,118 +444,123 @@ def omitscrewaxisabsences(sgnum, hkllist, ax, iax): """ latticeType = symmetry.latticeType(sgnum) - if(latticeType == 'triclinic'): + if latticeType == 'triclinic': """ - no systematic absences for the triclinic crystals + no systematic absences for the triclinic crystals """ pass - elif(latticeType == 'monoclinic'): - if(ax != '2_1'): + elif latticeType == 'monoclinic': + if ax != '2_1': raise RuntimeError( 'omitscrewaxisabsences: monoclinic systems\ - can only have 2_1 screw axis') + can only have 2_1 screw axis' + ) """ only unique b-axis will be encoded it is the users responsibility to input lattice parameters in the standard setting with b-axis having the 2-fold symmetry """ - if(iax == 1): + if iax == 1: mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 2] == 0) - mask2 = np.mod(hkllist[:, 1]+100, 2) != 0 + mask2 = np.mod(hkllist[:, 1] + 100, 2) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] else: raise RuntimeError( 'omitscrewaxisabsences: only b-axis\ - can have 2_1 screw axis') + can have 2_1 screw axis' + ) - elif(latticeType == 'orthorhombic'): - if(ax != '2_1'): + elif latticeType == 'orthorhombic': + if ax != '2_1': raise RuntimeError( 'omitscrewaxisabsences: orthorhombic systems\ - can only have 2_1 screw axis') + can only have 2_1 screw axis' + ) """ 2_1 screw on primary axis h00 ; h = 2n """ - if(iax == 0): + if iax == 0: mask1 = np.logical_and(hkllist[:, 1] == 0, hkllist[:, 2] == 0) - mask2 = np.mod(hkllist[:, 0]+100, 2) != 0 + mask2 = np.mod(hkllist[:, 0] + 100, 2) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(iax == 1): + elif iax == 1: mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 2] == 0) - mask2 = np.mod(hkllist[:, 1]+100, 2) != 0 + mask2 = np.mod(hkllist[:, 1] + 100, 2) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(iax == 2): + elif iax == 2: mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 1] == 0) - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'tetragonal'): - if(iax == 0): + elif latticeType == 'tetragonal': + if iax == 0: mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 1] == 0) - if(ax == '4_2'): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(ax in ['4_1', '4_3']): - mask2 = np.mod(hkllist[:, 2]+100, 4) != 0 + if ax == '4_2': + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif ax in ['4_1', '4_3']: + mask2 = np.mod(hkllist[:, 2] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(iax == 1): + elif iax == 1: mask1 = np.logical_and(hkllist[:, 1] == 0, hkllist[:, 2] == 0) mask2 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 2] == 0) - if(ax == '2_1'): - mask3 = np.mod(hkllist[:, 0]+100, 2) != 0 - mask4 = np.mod(hkllist[:, 1]+100, 2) != 0 + if ax == '2_1': + mask3 = np.mod(hkllist[:, 0] + 100, 2) != 0 + mask4 = np.mod(hkllist[:, 1] + 100, 2) != 0 mask1 = np.logical_not(np.logical_and(mask1, mask3)) mask2 = np.logical_not(np.logical_and(mask2, mask4)) mask = ~np.logical_or(~mask1, ~mask2) hkllist = hkllist[mask, :] - elif(latticeType == 'trigonal'): + elif latticeType == 'trigonal': mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 1] == 0) - if(iax == 0): - if(ax in ['3_1', '3_2']): - mask2 = np.mod(hkllist[:, 2]+90, 3) != 0 + if iax == 0: + if ax in ['3_1', '3_2']: + mask2 = np.mod(hkllist[:, 2] + 90, 3) != 0 else: raise RuntimeError( 'omitscrewaxisabsences: trigonal \ - systems can only have screw axis') + systems can only have screw axis' + ) mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'hexagonal'): + elif latticeType == 'hexagonal': mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 1] == 0) - if(iax == 0): - if(ax == '6_3'): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(ax in ['3_1', '3_2', '6_2', '6_4']): - mask2 = np.mod(hkllist[:, 2]+90, 3) != 0 - elif(ax in ['6_1', '6_5']): - mask2 = np.mod(hkllist[:, 2]+120, 6) != 0 + if iax == 0: + if ax == '6_3': + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif ax in ['3_1', '3_2', '6_2', '6_4']: + mask2 = np.mod(hkllist[:, 2] + 90, 3) != 0 + elif ax in ['6_1', '6_5']: + mask2 = np.mod(hkllist[:, 2] + 120, 6) != 0 else: raise RuntimeError( 'omitscrewaxisabsences: hexagonal \ - systems can only have screw axis') + systems can only have screw axis' + ) mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'cubic'): + elif latticeType == 'cubic': mask1 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 1] == 0) mask2 = np.logical_and(hkllist[:, 0] == 0, hkllist[:, 2] == 0) mask3 = np.logical_and(hkllist[:, 1] == 0, hkllist[:, 2] == 0) - if(ax in ['2_1', '4_2']): - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 - mask5 = np.mod(hkllist[:, 1]+100, 2) != 0 - mask6 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(ax in ['4_1', '4_3']): - mask4 = np.mod(hkllist[:, 2]+100, 4) != 0 - mask5 = np.mod(hkllist[:, 1]+100, 4) != 0 - mask6 = np.mod(hkllist[:, 0]+100, 4) != 0 + if ax in ['2_1', '4_2']: + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 + mask5 = np.mod(hkllist[:, 1] + 100, 2) != 0 + mask6 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif ax in ['4_1', '4_3']: + mask4 = np.mod(hkllist[:, 2] + 100, 4) != 0 + mask5 = np.mod(hkllist[:, 1] + 100, 4) != 0 + mask6 = np.mod(hkllist[:, 0] + 100, 4) != 0 mask1 = np.logical_not(np.logical_and(mask1, mask4)) mask2 = np.logical_not(np.logical_and(mask2, mask5)) mask3 = np.logical_not(np.logical_and(mask3, mask6)) @@ -569,229 +584,239 @@ def omitglideplaneabsences(sgnum, hkllist, plane, ip): """ latticeType = symmetry.latticeType(sgnum) - if(latticeType == 'triclinic'): + if latticeType == 'triclinic': pass - elif(latticeType == 'monoclinic'): - if(ip == 1): + elif latticeType == 'monoclinic': + if ip == 1: mask1 = hkllist[:, 1] == 0 - if(plane == 'c'): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(plane == 'a'): - mask2 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(plane == 'n'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 2) != 0 + if plane == 'c': + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif plane == 'a': + mask2 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif plane == 'n': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 2) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'orthorhombic'): - if(ip == 0): + elif latticeType == 'orthorhombic': + if ip == 0: mask1 = hkllist[:, 0] == 0 - if(plane == 'b'): - mask2 = np.mod(hkllist[:, 1]+100, 2) != 0 - elif(plane == 'c'): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(plane == 'n'): - mask2 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 2) != 0 - elif(plane == 'd'): - mask2 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 4) != 0 + if plane == 'b': + mask2 = np.mod(hkllist[:, 1] + 100, 2) != 0 + elif plane == 'c': + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif plane == 'n': + mask2 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 2) != 0 + elif plane == 'd': + mask2 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(ip == 1): + elif ip == 1: mask1 = hkllist[:, 1] == 0 - if(plane == 'c'): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(plane == 'a'): - mask2 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(plane == 'n'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 2) != 0 - elif(plane == 'd'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 4) != 0 + if plane == 'c': + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif plane == 'a': + mask2 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif plane == 'n': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 2) != 0 + elif plane == 'd': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(ip == 2): + elif ip == 2: mask1 = hkllist[:, 2] == 0 - if(plane == 'a'): - mask2 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(plane == 'b'): - mask2 = np.mod(hkllist[:, 1]+100, 2) != 0 - elif(plane == 'n'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 2) != 0 - elif(plane == 'd'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 4) != 0 + if plane == 'a': + mask2 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif plane == 'b': + mask2 = np.mod(hkllist[:, 1] + 100, 2) != 0 + elif plane == 'n': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 2) != 0 + elif plane == 'd': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'tetragonal'): - if(ip == 0): + elif latticeType == 'tetragonal': + if ip == 0: mask1 = hkllist[:, 2] == 0 - if(plane == 'a'): - mask2 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(plane == 'b'): - mask2 = np.mod(hkllist[:, 1]+100, 2) != 0 - elif(plane == 'n'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 2) != 0 - elif(plane == 'd'): - mask2 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 4) != 0 + if plane == 'a': + mask2 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif plane == 'b': + mask2 = np.mod(hkllist[:, 1] + 100, 2) != 0 + elif plane == 'n': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 2) != 0 + elif plane == 'd': + mask2 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(ip == 1): + elif ip == 1: mask1 = hkllist[:, 0] == 0 mask2 = hkllist[:, 1] == 0 - if(plane in ['a', 'b']): - mask3 = np.mod(hkllist[:, 1]+100, 2) != 0 - mask4 = np.mod(hkllist[:, 0]+100, 2) != 0 - elif(plane == 'c'): - mask3 = np.mod(hkllist[:, 2]+100, 2) != 0 + if plane in ['a', 'b']: + mask3 = np.mod(hkllist[:, 1] + 100, 2) != 0 + mask4 = np.mod(hkllist[:, 0] + 100, 2) != 0 + elif plane == 'c': + mask3 = np.mod(hkllist[:, 2] + 100, 2) != 0 mask4 = mask3 - elif(plane == 'n'): - mask3 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 2) != 0 - mask4 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 2) != 0 - elif(plane == 'd'): - mask3 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 4) != 0 - mask4 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 4) != 0 + elif plane == 'n': + mask3 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 2) != 0 + mask4 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 2) != 0 + elif plane == 'd': + mask3 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 4) != 0 + mask4 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 4) != 0 mask1 = np.logical_not(np.logical_and(mask1, mask3)) mask2 = np.logical_not(np.logical_and(mask2, mask4)) mask = ~np.logical_or(~mask1, ~mask2) hkllist = hkllist[mask, :] - elif(ip == 2): + elif ip == 2: mask1 = np.abs(hkllist[:, 0]) == np.abs(hkllist[:, 1]) - if(plane in ['c', 'n']): - mask2 = np.mod(hkllist[:, 2]+100, 2) != 0 - elif(plane == 'd'): - mask2 = np.mod(2*hkllist[:, 0]+hkllist[:, 2]+100, 4) != 0 + if plane in ['c', 'n']: + mask2 = np.mod(hkllist[:, 2] + 100, 2) != 0 + elif plane == 'd': + mask2 = np.mod(2 * hkllist[:, 0] + hkllist[:, 2] + 100, 4) != 0 mask = np.logical_not(np.logical_and(mask1, mask2)) hkllist = hkllist[mask, :] - elif(latticeType == 'trigonal'): - if(plane != 'c'): + elif latticeType == 'trigonal': + if plane != 'c': raise RuntimeError( 'omitglideplaneabsences: only c-glide \ - allowed for trigonal systems') - if(ip == 1): + allowed for trigonal systems' + ) + if ip == 1: mask1 = hkllist[:, 0] == 0 mask2 = hkllist[:, 1] == 0 mask3 = hkllist[:, 0] == -hkllist[:, 1] - if(plane == 'c'): - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 + if plane == 'c': + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 else: raise RuntimeError( 'omitglideplaneabsences: only c-glide \ - allowed for trigonal systems') + allowed for trigonal systems' + ) - elif(ip == 2): + elif ip == 2: mask1 = hkllist[:, 1] == hkllist[:, 0] - mask2 = hkllist[:, 0] == -2*hkllist[:, 1] - mask3 = -2*hkllist[:, 0] == hkllist[:, 1] - if(plane == 'c'): - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 + mask2 = hkllist[:, 0] == -2 * hkllist[:, 1] + mask3 = -2 * hkllist[:, 0] == hkllist[:, 1] + if plane == 'c': + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 else: raise RuntimeError( 'omitglideplaneabsences: only c-glide \ - allowed for trigonal systems') + allowed for trigonal systems' + ) mask1 = np.logical_and(mask1, mask4) mask2 = np.logical_and(mask2, mask4) mask3 = np.logical_and(mask3, mask4) - mask = np.logical_not(np.logical_or( - mask1, np.logical_or(mask2, mask3))) + mask = np.logical_not( + np.logical_or(mask1, np.logical_or(mask2, mask3)) + ) hkllist = hkllist[mask, :] - elif(latticeType == 'hexagonal'): - if(plane != 'c'): + elif latticeType == 'hexagonal': + if plane != 'c': raise RuntimeError( 'omitglideplaneabsences: only c-glide \ - allowed for hexagonal systems') - if(ip == 2): + allowed for hexagonal systems' + ) + if ip == 2: mask1 = hkllist[:, 0] == hkllist[:, 1] - mask2 = hkllist[:, 0] == -2*hkllist[:, 1] - mask3 = -2*hkllist[:, 0] == hkllist[:, 1] - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 + mask2 = hkllist[:, 0] == -2 * hkllist[:, 1] + mask3 = -2 * hkllist[:, 0] == hkllist[:, 1] + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 mask1 = np.logical_and(mask1, mask4) mask2 = np.logical_and(mask2, mask4) mask3 = np.logical_and(mask3, mask4) - mask = np.logical_not(np.logical_or( - mask1, np.logical_or(mask2, mask3))) + mask = np.logical_not( + np.logical_or(mask1, np.logical_or(mask2, mask3)) + ) - elif(ip == 1): + elif ip == 1: mask1 = hkllist[:, 1] == 0 mask2 = hkllist[:, 0] == 0 mask3 = hkllist[:, 1] == -hkllist[:, 0] - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 mask1 = np.logical_and(mask1, mask4) mask2 = np.logical_and(mask2, mask4) mask3 = np.logical_and(mask3, mask4) - mask = np.logical_not(np.logical_or( - mask1, np.logical_or(mask2, mask3))) + mask = np.logical_not( + np.logical_or(mask1, np.logical_or(mask2, mask3)) + ) hkllist = hkllist[mask, :] - elif(latticeType == 'cubic'): - if(ip == 0): + elif latticeType == 'cubic': + if ip == 0: mask1 = hkllist[:, 0] == 0 mask2 = hkllist[:, 1] == 0 mask3 = hkllist[:, 2] == 0 - mask4 = np.mod(hkllist[:, 0]+100, 2) != 0 - mask5 = np.mod(hkllist[:, 1]+100, 2) != 0 - mask6 = np.mod(hkllist[:, 2]+100, 2) != 0 - if(plane == 'a'): - mask1 = np.logical_or(np.logical_and( - mask1, mask5), np.logical_and(mask1, mask6)) - mask2 = np.logical_or(np.logical_and( - mask2, mask4), np.logical_and(mask2, mask6)) + mask4 = np.mod(hkllist[:, 0] + 100, 2) != 0 + mask5 = np.mod(hkllist[:, 1] + 100, 2) != 0 + mask6 = np.mod(hkllist[:, 2] + 100, 2) != 0 + if plane == 'a': + mask1 = np.logical_or( + np.logical_and(mask1, mask5), np.logical_and(mask1, mask6) + ) + mask2 = np.logical_or( + np.logical_and(mask2, mask4), np.logical_and(mask2, mask6) + ) mask3 = np.logical_and(mask3, mask4) - mask = np.logical_not(np.logical_or( - mask1, np.logical_or(mask2, mask3))) - elif(plane == 'b'): + mask = np.logical_not( + np.logical_or(mask1, np.logical_or(mask2, mask3)) + ) + elif plane == 'b': mask1 = np.logical_and(mask1, mask5) mask3 = np.logical_and(mask3, mask5) mask = np.logical_not(np.logical_or(mask1, mask3)) - elif(plane == 'c'): + elif plane == 'c': mask1 = np.logical_and(mask1, mask6) mask2 = np.logical_and(mask2, mask6) mask = np.logical_not(np.logical_or(mask1, mask2)) - elif(plane == 'n'): - mask4 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 2) != 0 - mask5 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 2) != 0 - mask6 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 2) != 0 + elif plane == 'n': + mask4 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 2) != 0 + mask5 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 2) != 0 + mask6 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 2) != 0 mask1 = np.logical_not(np.logical_and(mask1, mask4)) mask2 = np.logical_not(np.logical_and(mask2, mask5)) mask3 = np.logical_not(np.logical_and(mask3, mask6)) - mask = ~np.logical_or( - ~mask1, np.logical_or(~mask2, ~mask3)) - elif(plane == 'd'): - mask4 = np.mod(hkllist[:, 1]+hkllist[:, 2]+100, 4) != 0 - mask5 = np.mod(hkllist[:, 0]+hkllist[:, 2]+100, 4) != 0 - mask6 = np.mod(hkllist[:, 0]+hkllist[:, 1]+100, 4) != 0 + mask = ~np.logical_or(~mask1, np.logical_or(~mask2, ~mask3)) + elif plane == 'd': + mask4 = np.mod(hkllist[:, 1] + hkllist[:, 2] + 100, 4) != 0 + mask5 = np.mod(hkllist[:, 0] + hkllist[:, 2] + 100, 4) != 0 + mask6 = np.mod(hkllist[:, 0] + hkllist[:, 1] + 100, 4) != 0 mask1 = np.logical_not(np.logical_and(mask1, mask4)) mask2 = np.logical_not(np.logical_and(mask2, mask5)) mask3 = np.logical_not(np.logical_and(mask3, mask6)) - mask = ~np.logical_or( - ~mask1, np.logical_or(~mask2, ~mask3)) + mask = ~np.logical_or(~mask1, np.logical_or(~mask2, ~mask3)) else: raise RuntimeError( 'omitglideplaneabsences: unknown glide \ - plane encountered.') + plane encountered.' + ) hkllist = hkllist[mask, :] - if(ip == 2): + if ip == 2: mask1 = np.abs(hkllist[:, 0]) == np.abs(hkllist[:, 1]) mask2 = np.abs(hkllist[:, 1]) == np.abs(hkllist[:, 2]) mask3 = np.abs(hkllist[:, 0]) == np.abs(hkllist[:, 2]) - if(plane in ['a', 'b', 'c', 'n']): - mask4 = np.mod(hkllist[:, 2]+100, 2) != 0 - mask5 = np.mod(hkllist[:, 0]+100, 2) != 0 - mask6 = np.mod(hkllist[:, 1]+100, 2) != 0 - elif(plane == 'd'): - mask4 = np.mod(2*hkllist[:, 0]+hkllist[:, 2]+100, 4) != 0 - mask5 = np.mod(hkllist[:, 0]+2*hkllist[:, 1]+100, 4) != 0 - mask6 = np.mod(2*hkllist[:, 0]+hkllist[:, 1]+100, 4) != 0 + if plane in ['a', 'b', 'c', 'n']: + mask4 = np.mod(hkllist[:, 2] + 100, 2) != 0 + mask5 = np.mod(hkllist[:, 0] + 100, 2) != 0 + mask6 = np.mod(hkllist[:, 1] + 100, 2) != 0 + elif plane == 'd': + mask4 = np.mod(2 * hkllist[:, 0] + hkllist[:, 2] + 100, 4) != 0 + mask5 = np.mod(hkllist[:, 0] + 2 * hkllist[:, 1] + 100, 4) != 0 + mask6 = np.mod(2 * hkllist[:, 0] + hkllist[:, 1] + 100, 4) != 0 else: raise RuntimeError( 'omitglideplaneabsences: unknown glide \ - plane encountered.') + plane encountered.' + ) mask1 = np.logical_not(np.logical_and(mask1, mask4)) mask2 = np.logical_not(np.logical_and(mask2, mask5)) mask3 = np.logical_not(np.logical_and(mask3, mask6)) @@ -808,14 +833,15 @@ def NonSymmorphicAbsences(sgnum, hkllist): """ planes = constants.SYS_AB[sgnum][0] for ip, p in enumerate(planes): - if(p != ''): + if p != '': hkllist = omitglideplaneabsences(sgnum, hkllist, p, ip) axes = constants.SYS_AB[sgnum][1] for iax, ax in enumerate(axes): - if(ax != ''): + if ax != '': hkllist = omitscrewaxisabsences(sgnum, hkllist, ax, iax) return hkllist + # # ================================================== HKL Enumeration # @@ -826,28 +852,32 @@ def _getHKLsBySS(ss): ss - (int) sum of squares -""" + """ + # # NOTE: the loop below could be speeded up by requiring # h >= k > = l, and then applying all permutations # and sign changes. Could possibly save up to # a factor of 48. # - def pmrange(n): return list(range(n, -(n+1), -1)) # plus/minus range - def iroot(n): return int(floor(sqrt(n))) # integer square root + def pmrange(n): + return list(range(n, -(n + 1), -1)) # plus/minus range + + def iroot(n): + return int(floor(sqrt(n))) # integer square root hkls = [] hmax = iroot(ss) for h in pmrange(hmax): - ss2 = ss - h*h + ss2 = ss - h * h kmax = iroot(ss2) for k in pmrange(kmax): - rem = ss2 - k*k + rem = ss2 - k * k if rem == 0: hkls.append((h, k, 0)) else: l = iroot(rem) - if l*l == rem: + if l * l == rem: hkls += [(h, k, l), (h, k, -l)] return hkls @@ -868,10 +898,12 @@ def testHKLs(): print('==================== Titanium (194)') ssmax = 20 myHKLs = sg.getHKLs(ssmax) - print('Number of HKLs with sum of square %d or less: %d' - % (ssmax, len(myHKLs))) + print( + 'Number of HKLs with sum of square %d or less: %d' + % (ssmax, len(myHKLs)) + ) for hkl in myHKLs: - ss = hkl[0]**2 + hkl[1]**2 + hkl[2]**2 + ss = hkl[0] ** 2 + hkl[1] ** 2 + hkl[2] ** 2 print((hkl, ss)) # @@ -881,10 +913,12 @@ def testHKLs(): print('==================== Ruby (167)') ssmax = 10 myHKLs = sg.getHKLs(ssmax) - print('Number of HKLs with sum of square %d or less: %d' - % (ssmax, len(myHKLs))) + print( + 'Number of HKLs with sum of square %d or less: %d' + % (ssmax, len(myHKLs)) + ) for hkl in myHKLs: - ss = hkl[0]**2 + hkl[1]**2 + hkl[2]**2 + ss = hkl[0] ** 2 + hkl[1] ** 2 + hkl[2] ** 2 print((hkl, ss)) # # Test Generic HKLs @@ -899,6 +933,7 @@ def testHKLs(): if __name__ == '__main__': # import sys + # if 'testHKLs' in sys.argv: testHKLs() diff --git a/hexrd/material/symbols.py b/hexrd/core/material/symbols.py similarity index 94% rename from hexrd/material/symbols.py rename to hexrd/core/material/symbols.py index 637a5136a..45c55b292 100644 --- a/hexrd/material/symbols.py +++ b/hexrd/core/material/symbols.py @@ -1,4 +1,3 @@ - pstr_mkxtal = "\n\n This is a program to create a HDF5 file for storing crystallographic information.\n " pstr_mkxtal = pstr_mkxtal + " The following inputs are required:\n " pstr_mkxtal = pstr_mkxtal + " Crystal System:\n" @@ -10,19 +9,47 @@ pstr_mkxtal = pstr_mkxtal + " 6. Monoclinic\n" pstr_mkxtal = pstr_mkxtal + " 7. Triclinic\n\n" pstr_mkxtal = pstr_mkxtal + " Space group number\n" -pstr_mkxtal = pstr_mkxtal + " Atomic number (Z) for all species in unit cell\n" -pstr_mkxtal = pstr_mkxtal + " Asymmetric positions for all atoms in unit cell\n" -pstr_mkxtal = pstr_mkxtal + " Debye-Waller factors for all atoms in the unit cell\n" -pstr_mkxtal = pstr_mkxtal + " You'll be prompted for these values now\n\n" +pstr_mkxtal = ( + pstr_mkxtal + " Atomic number (Z) for all species in unit cell\n" +) +pstr_mkxtal = ( + pstr_mkxtal + " Asymmetric positions for all atoms in unit cell\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " Debye-Waller factors for all atoms in the unit cell\n" +) +pstr_mkxtal = ( + pstr_mkxtal + " You'll be prompted for these values now\n\n" +) pstr_mkxtal = pstr_mkxtal + "\n Note about the trigonal system:\n" pstr_mkxtal = pstr_mkxtal + " -------------------------------\n" -pstr_mkxtal = pstr_mkxtal + " Primitive trigonal crystals are defined with respect to a HEXAGONAL\n" -pstr_mkxtal = pstr_mkxtal + " reference frame. Rhombohedral crystals can be referenced with\n" -pstr_mkxtal = pstr_mkxtal + " respect to a HEXAGONAL basis (first setting), or with respect to\n" -pstr_mkxtal = pstr_mkxtal + " a RHOMBOHEDRAL basis (second setting). The default setting for\n" -pstr_mkxtal = pstr_mkxtal + " trigonal symmetry is the hexagonal setting. When you select\n" -pstr_mkxtal = pstr_mkxtal + " crystal system 5 above, you will be prompted for the setting. \n" +pstr_mkxtal = ( + pstr_mkxtal + + " Primitive trigonal crystals are defined with respect to a HEXAGONAL\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " reference frame. Rhombohedral crystals can be referenced with\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " respect to a HEXAGONAL basis (first setting), or with respect to\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " a RHOMBOHEDRAL basis (second setting). The default setting for\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " trigonal symmetry is the hexagonal setting. When you select\n" +) +pstr_mkxtal = ( + pstr_mkxtal + + " crystal system 5 above, you will be prompted for the setting. \n" +) +# fmt: off pstr_spacegroup = [ " P 1 ", " P -1 ", \ # MONOCLINIC SPACE GROUPS @@ -93,18 +120,30 @@ # TRIGONAL GROUPS RHOMBOHEDRAL SETTING " R 3 |146", " R -3 |148", " R 3 2 |155", " R 3 m |160", \ " R 3 c |161", " R -3 m|166", " R -3 c|167"] +# fmt: on - -xtal_dict = {1: 'cubic', 2: 'tetragonal', 3: 'orthorhombic', - 4: 'hexagonal', 5: 'trigonal', 6: 'monoclinic', - 7: 'triclinic'} -xtal_sys_dict = {'cubic': 1, 'tetragonal': 2, 'orthorhombic': 3, - 'hexagonal': 4, 'trigonal': 5, 'monoclinic': 6, - 'triclinic': 7} - +xtal_dict = { + 1: 'cubic', + 2: 'tetragonal', + 3: 'orthorhombic', + 4: 'hexagonal', + 5: 'trigonal', + 6: 'monoclinic', + 7: 'triclinic', +} +xtal_sys_dict = { + 'cubic': 1, + 'tetragonal': 2, + 'orthorhombic': 3, + 'hexagonal': 4, + 'trigonal': 5, + 'monoclinic': 6, + 'triclinic': 7, +} +# fmt: off pstr_pointgroup = [ ' 1', ' -1', ' 2', ' m', ' 2/m', ' 222', ' mm2', ' mmm', ' 4', ' -4', ' 4/m', ' 422', @@ -112,12 +151,13 @@ ' 3m', ' -3m', ' 6', ' -6', ' 6/m', ' 622', ' 6mm', ' -6m2', '6/mmm', ' 23', ' m3', ' 432', ' -43m', ' m-3m', ' 532', ' 822', ' 1022', ' 1222'] - +# fmt: on TRIG = [146, 148, 155, 160, 161, 166, 167] +# fmt: off # symbols and Z for all elements pstr_Elements = ' ------------------------------------ Periodic Table of the Elements --------------------------------------' + "\n" \ '1:H 2:He' + "\n" \ @@ -135,7 +175,7 @@ which have two origin choices the two values are the site symmetries of the origin. There are 24 such space groups''' - +# fmt: on two_origin_choice = { 48: ['222', '-1'], @@ -166,32 +206,32 @@ def PrintPossibleSG(xtal_sys): - if(xtal_sys == 1): + if xtal_sys == 1: sgmax = 230 sgmin = 195 - elif(xtal_sys == 2): + elif xtal_sys == 2: sgmax = 142 sgmin = 75 - elif(xtal_sys == 3): + elif xtal_sys == 3: sgmax = 74 sgmin = 16 - elif(xtal_sys == 4): + elif xtal_sys == 4: sgmax = 194 sgmin = 168 - elif(xtal_sys == 5): + elif xtal_sys == 5: sgmax = 167 sgmin = 143 - elif(xtal_sys == 6): + elif xtal_sys == 6: sgmax = 15 sgmin = 3 - elif(xtal_sys == 7): + elif xtal_sys == 7: sgmax = 2 sgmin = 1 - for i in range(sgmin, sgmax+1): + for i in range(sgmin, sgmax + 1): j = i - sgmin + 1 - pstr = str(i) + ":" + pstr_spacegroup[i-1] + "\t" - if(j % 4 == 0 or j == sgmax): + pstr = str(i) + ":" + pstr_spacegroup[i - 1] + "\t" + if j % 4 == 0 or j == sgmax: print(pstr) else: print(pstr, end='') @@ -1299,4 +1339,4 @@ def _buildDict(hstr): lookupHall, Hall_to_sgnum = _buildDict(HALL_STR) -lookupHM, HM_to_sgnum = _buildDict(HM_STR) +lookupHM, HM_to_sgnum = _buildDict(HM_STR) diff --git a/hexrd/material/symmetry.py b/hexrd/core/material/symmetry.py similarity index 81% rename from hexrd/material/symmetry.py rename to hexrd/core/material/symmetry.py index 0d614f7ff..ad312ea9f 100644 --- a/hexrd/material/symmetry.py +++ b/hexrd/core/material/symmetry.py @@ -31,19 +31,19 @@ import numpy as np from numba import njit -from numpy import (array, sqrt, pi, - vstack, c_, dot, - argmax) +from numpy import array, sqrt, pi, vstack, c_, dot, argmax -# from hexrd.rotations import quatOfAngleAxis, quatProductMatrix, fixQuat -from hexrd import rotations as rot -from hexrd import constants -from hexrd.utils.decorators import memoize +# from hexrd.core.rotations import quatOfAngleAxis, quatProductMatrix, fixQuat +from hexrd.core import rotations as rot +from hexrd.core import constants +from hexrd.core.utils.decorators import memoize # Imports in case others are importing from here -from hexrd.rotations import (toFundamentalRegion, - ltypeOfLaueGroup, - quatOfLaueGroup) +from hexrd.core.rotations import ( + toFundamentalRegion, + ltypeOfLaueGroup, + quatOfLaueGroup, +) # ============================================================================= @@ -51,11 +51,11 @@ # ============================================================================= eps = constants.sqrt_epsf -sq3by2 = sqrt(3.)/2. -piby2 = pi/2. -piby3 = pi/3. -piby4 = pi/4. -piby6 = pi/6. +sq3by2 = sqrt(3.0) / 2.0 +piby2 = pi / 2.0 +piby3 = pi / 3.0 +piby4 = pi / 4.0 +piby6 = pi / 6.0 # ============================================================================= @@ -72,7 +72,7 @@ def GeneratorString(sgnum): ... and so on ''' - sg = sgnum-1 + sg = sgnum - 1 # sgdict = {146:231, 148:232, 155:233, 160:234, 161:235, 166:236, 167:237} # if(sgnum in sgdict): # sg = sgdict[sgnum]-1 @@ -90,17 +90,17 @@ def MakeGenerators(genstr, setting): centrosymmetric = False # check if space group has inversion symmetry - if(genstr[0] == '1'): + if genstr[0] == '1': t = 'hOOO' mat = SYM_fillgen(t) genmat = np.concatenate((genmat, mat)) centrosymmetric = True n = int(genstr[1]) - if(n > 0): + if n > 0: for i in range(n): istart = 2 + i * 4 - istop = 2 + (i+1) * 4 + istop = 2 + (i + 1) * 4 t = genstr[istart:istop] @@ -112,20 +112,20 @@ def MakeGenerators(genstr, setting): if there is an alternate setting for this space group check if the alternate setting needs to be used ''' - if(genstr[istop] != '0'): - if(setting != 0): - t = genstr[istop+1:istop+4] + if genstr[istop] != '0': + if setting != 0: + t = genstr[istop + 1 : istop + 4] t = 'a' + t # get the translation without any rotation sym = np.squeeze(SYM_fillgen(t, sgn=-1)) sym2 = np.squeeze(SYM_fillgen(t)) for i in range(1, genmat.shape[0]): - generator = np.dot(sym2, np.dot( - np.squeeze(genmat[i, :, :]), - sym)) + generator = np.dot( + sym2, np.dot(np.squeeze(genmat[i, :, :]), sym) + ) frac = np.modf(generator[0:3, 3])[0] - frac[frac < 0.] += 1. - frac[np.abs(frac) < 1E-5] = 0.0 - frac[np.abs(frac-1.0) < 1E-5] = 0.0 + frac[frac < 0.0] += 1.0 + frac[np.abs(frac) < 1e-5] = 0.0 + frac[np.abs(frac - 1.0) < 1e-5] = 0.0 generator[0:3, 3] = frac genmat[i, :, :] = generator @@ -134,13 +134,16 @@ def MakeGenerators(genstr, setting): def SYM_fillgen(t, sgn=1): mat = np.zeros([4, 4]) - mat[3, 3] = 1. + mat[3, 3] = 1.0 mat[0:3, 0:3] = constants.SYM_GENERATORS[t[0]] - mat[0:3, 3] = sgn*np.array([constants.SYM_GENERATORS[t[1]], - constants.SYM_GENERATORS[t[2]], - constants.SYM_GENERATORS[t[3]] - ]) + mat[0:3, 3] = sgn * np.array( + [ + constants.SYM_GENERATORS[t[1]], + constants.SYM_GENERATORS[t[2]], + constants.SYM_GENERATORS[t[3]], + ] + ) mat = np.broadcast_to(mat, [1, 4, 4]) return mat @@ -155,7 +158,7 @@ def GenerateSGSym(sgnum, setting=0): genstr = GeneratorString(sgnum) genmat, centrosymmetric = MakeGenerators(genstr, setting) symmorphic = False - if(sgnum in constants.sgnum_symmorphic): + if sgnum in constants.sgnum_symmorphic: symmorphic = True ''' use the generator string to get the rest of the @@ -183,17 +186,17 @@ def GenerateSGSym(sgnum, setting=0): # only fractional parts frac = np.modf(gnew[0:3, 3])[0] - frac[frac < 0.] += 1. - frac[np.abs(frac) < 1E-5] = 0.0 - frac[np.abs(frac-1.0) < 1E-5] = 0.0 + frac[frac < 0.0] += 1.0 + frac[np.abs(frac) < 1e-5] = 0.0 + frac[np.abs(frac - 1.0) < 1e-5] = 0.0 gnew[0:3, 3] = frac - if(isnew(gnew, SYM_SG)): + if isnew(gnew, SYM_SG): gnew = np.broadcast_to(gnew, [1, 4, 4]) SYM_SG = np.concatenate((SYM_SG, gnew)) nsym += 1 - if (nsym >= 192): + if nsym >= 192: k2 = nsym k1 = nsym @@ -204,7 +207,7 @@ def GenerateSGSym(sgnum, setting=0): SYM_PG_d_laue = GeneratePGSym_Laue(SYM_PG_d) for s in SYM_PG_d: - if(np.allclose(-np.eye(3), s)): + if np.allclose(-np.eye(3), s): centrosymmetric = True return SYM_SG, SYM_PG_d, SYM_PG_d_laue, centrosymmetric, symmorphic @@ -230,7 +233,7 @@ def GeneratePGSym(SYM_SG): g = SYM_SG[i, :, :] t = g[0:3, 3] g = g[0:3, 0:3] - if(isnew(g, SYM_PG_d)): + if isnew(g, SYM_PG_d): g = np.broadcast_to(g, [1, 3, 3]) SYM_PG_d = np.concatenate((SYM_PG_d, g)) @@ -250,7 +253,7 @@ def GeneratePGSym_Laue(SYM_PG_d): first check if the group already has the inversion symmetry ''' for s in SYM_PG_d: - if(np.allclose(s, -np.eye(3))): + if np.allclose(s, -np.eye(3)): return SYM_PG_d ''' @@ -274,12 +277,12 @@ def GeneratePGSym_Laue(SYM_PG_d): g2 = np.squeeze(SYM_PG_d_laue[k2, :, :]) gnew = np.dot(g1, g2) - if(isnew(gnew, SYM_PG_d_laue)): + if isnew(gnew, SYM_PG_d_laue): gnew = np.broadcast_to(gnew, [1, 3, 3]) SYM_PG_d_laue = np.concatenate((SYM_PG_d_laue, gnew)) nsym += 1 - if (nsym >= 48): + if nsym >= 48: k2 = nsym k1 = nsym @@ -300,19 +303,19 @@ def isnew(mat, sym_mats): def latticeType(sgnum): - if(sgnum <= 2): + if sgnum <= 2: return 'triclinic' - elif(sgnum > 2 and sgnum <= 15): + elif sgnum > 2 and sgnum <= 15: return 'monoclinic' - elif(sgnum > 15 and sgnum <= 74): + elif sgnum > 15 and sgnum <= 74: return 'orthorhombic' - elif(sgnum > 74 and sgnum <= 142): + elif sgnum > 74 and sgnum <= 142: return 'tetragonal' - elif(sgnum > 142 and sgnum <= 167): + elif sgnum > 142 and sgnum <= 167: return 'trigonal' - elif(sgnum > 167 and sgnum <= 194): + elif sgnum > 167 and sgnum <= 194: return 'hexagonal' - elif(sgnum > 194 and sgnum <= 230): + elif sgnum > 194 and sgnum <= 230: return 'cubic' else: raise RuntimeError('symmetry.latticeType: unknown space group number') @@ -329,7 +332,7 @@ def MakeGenerators_PGSYM(pggenstr): SYM_GEN_PG = np.zeros([ngen, 3, 3]) for i in range(ngen): - s = pggenstr[i+1] + s = pggenstr[i + 1] SYM_GEN_PG[i, :, :] = constants.SYM_GENERATORS[s] return SYM_GEN_PG @@ -362,18 +365,18 @@ def GeneratePGSYM(pgsym): g2 = np.squeeze(SYM_GEN_PG[k2, :, :]) gnew = np.dot(g1, g2) - if(isnew(gnew, SYM_GEN_PG)): + if isnew(gnew, SYM_GEN_PG): gnew = np.broadcast_to(gnew, [1, 3, 3]) SYM_GEN_PG = np.concatenate((SYM_GEN_PG, gnew)) nsym += 1 - if (nsym >= 48): + if nsym >= 48: k2 = nsym k1 = nsym k2 += 1 k1 += 1 - SYM_GEN_PG[np.abs(SYM_GEN_PG) < eps] = 0. + SYM_GEN_PG[np.abs(SYM_GEN_PG) < eps] = 0.0 return SYM_GEN_PG diff --git a/hexrd/material/unitcell.py b/hexrd/core/material/unitcell.py similarity index 80% rename from hexrd/material/unitcell.py rename to hexrd/core/material/unitcell.py index 576f65fde..74bffed30 100644 --- a/hexrd/material/unitcell.py +++ b/hexrd/core/material/unitcell.py @@ -1,11 +1,13 @@ import importlib.resources import numpy as np from numba import njit -from hexrd import constants -from hexrd.material import spacegroup, symbols, symmetry -from hexrd.ipfcolor import sphere_sector, colorspace -from hexrd.valunits import valWUnit -import hexrd.resources +from hexrd.core import constants +from hexrd.core.material import spacegroup, symbols, symmetry + +# TODO: Resolve extra-core-dependency +from hexrd.hedm.ipfcolor import sphere_sector, colorspace +from hexrd.core.valunits import valWUnit +import hexrd.core.resources import warnings import h5py from pathlib import Path @@ -39,7 +41,7 @@ def _calcstar(v, sym, mat): for vec in vsym: vv = vp - vec dist = _calclength(vv, mat) - if dist < 1E-3: + if dist < 1e-3: isnew = False break if isnew: @@ -50,7 +52,6 @@ def _calcstar(v, sym, mat): class unitcell: - ''' >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov >> @DATE: 10/09/2018 SS 1.0 original @@ -61,11 +62,18 @@ class unitcell: # initialize the unitcell class # need lattice parameters and space group data from HDF5 file - def __init__(self, lp, sgnum, - atomtypes, charge, - atominfo, - U, dmin, beamenergy, - sgsetting=0): + def __init__( + self, + lp, + sgnum, + atomtypes, + charge, + atominfo, + U, + dmin, + beamenergy, + sgsetting=0, + ): self._tstart = time.time() self.pref = 0.4178214 @@ -115,10 +123,12 @@ def GetPgLg(self): def CalcWavelength(self): # wavelength in nm - self.wavelength = constants.cPlanck * \ - constants.cLight / \ - constants.cCharge / \ - self.voltage + self.wavelength = ( + constants.cPlanck + * constants.cLight + / constants.cCharge + / self.voltage + ) self.wavelength *= 1e9 def calcBetaij(self): @@ -126,11 +136,11 @@ def calcBetaij(self): self.betaij = np.zeros([3, 3, self.atom_ntype]) for i in range(self.U.shape[0]): U = self.U[i, :] - self.betaij[:, :, i] = np.array([[U[0], U[3], U[4]], - [U[3], U[1], U[5]], - [U[4], U[5], U[2]]]) + self.betaij[:, :, i] = np.array( + [[U[0], U[3], U[4]], [U[3], U[1], U[5]], [U[4], U[5], U[2]]] + ) - self.betaij[:, :, i] *= 2. * np.pi**2 * self._aij + self.betaij[:, :, i] *= 2.0 * np.pi**2 * self._aij def calcmatrices(self): @@ -153,9 +163,13 @@ def calcmatrices(self): ''' direct metric tensor ''' - self._dmt = np.array([[a**2, a*b*cg, a*c*cb], - [a*b*cg, b**2, b*c*ca], - [a*c*cb, b*c*ca, c**2]]) + self._dmt = np.array( + [ + [a**2, a * b * cg, a * c * cb], + [a * b * cg, b**2, b * c * ca], + [a * c * cb, b * c * ca, c**2], + ] + ) self._vol = np.sqrt(np.linalg.det(self.dmt)) if self.vol < 1e-5: @@ -169,30 +183,44 @@ def calcmatrices(self): ''' direct structure matrix ''' - self._dsm = np.array([[a, b*cg, c*cb], - [0., b*sg, -c*(cb*cg - ca)/sg], - [0., 0., self.vol/(a*b*sg)]]) + self._dsm = np.array( + [ + [a, b * cg, c * cb], + [0.0, b * sg, -c * (cb * cg - ca) / sg], + [0.0, 0.0, self.vol / (a * b * sg)], + ] + ) - self._dsm[np.abs(self._dsm) < eps] = 0. + self._dsm[np.abs(self._dsm) < eps] = 0.0 ''' reciprocal structure matrix ''' - self._rsm = np.array([[1./a, 0., 0.], - [-1./(a*tg), 1./(b*sg), 0.], - [b*c*(cg*ca - cb)/(self.vol*sg), - a*c*(cb*cg - ca)/(self.vol*sg), - a*b*sg/self.vol]]) + self._rsm = np.array( + [ + [1.0 / a, 0.0, 0.0], + [-1.0 / (a * tg), 1.0 / (b * sg), 0.0], + [ + b * c * (cg * ca - cb) / (self.vol * sg), + a * c * (cb * cg - ca) / (self.vol * sg), + a * b * sg / self.vol, + ], + ] + ) - self._rsm[np.abs(self._rsm) < eps] = 0. + self._rsm[np.abs(self._rsm) < eps] = 0.0 ast = self.CalcLength([1, 0, 0], 'r') bst = self.CalcLength([0, 1, 0], 'r') cst = self.CalcLength([0, 0, 1], 'r') - self._aij = np.array([[ast**2, ast*bst, ast*cst], - [bst*ast, bst**2, bst*cst], - [cst*ast, cst*bst, cst**2]]) + self._aij = np.array( + [ + [ast**2, ast * bst, ast * cst], + [bst * ast, bst**2, bst * cst], + [cst * ast, cst * bst, cst**2], + ] + ) ''' transform between any crystal space to any other space. choices are 'd' (direct), 'r' (reciprocal) and 'c' (cartesian)''' @@ -207,7 +235,8 @@ def TransSpace(self, v_in, inspace, outspace): v_out = np.dot(self.dsm, v_in) else: raise ValueError( - 'inspace in "d" but outspace can\'t be identified') + 'inspace in "d" but outspace can\'t be identified' + ) elif inspace == 'r': if outspace == 'd': @@ -216,7 +245,8 @@ def TransSpace(self, v_in, inspace, outspace): v_out = np.dot(self.rsm, v_in) else: raise ValueError( - 'inspace in "r" but outspace can\'t be identified') + 'inspace in "r" but outspace can\'t be identified' + ) elif inspace == 'c': if outspace == 'r': @@ -225,7 +255,8 @@ def TransSpace(self, v_in, inspace, outspace): v_out = np.dot(v_in, self.rsm) else: raise ValueError( - 'inspace in "c" but outspace can\'t be identified') + 'inspace in "c" but outspace can\'t be identified' + ) else: raise ValueError('incorrect inspace argument') @@ -268,7 +299,7 @@ def CalcLength(self, u, space): def NormVec(self, u, space): ulen = self.CalcLength(u, space) - return u/ulen + return u / ulen ''' calculate angle between two vectors in any space''' @@ -277,7 +308,7 @@ def CalcAngle(self, u, v, space): ulen = self.CalcLength(u, space) vlen = self.CalcLength(v, space) - dot = self.CalcDot(u, v, space)/ulen/vlen + dot = self.CalcDot(u, v, space) / ulen / vlen if np.isclose(np.abs(dot), 1.0): dot = np.sign(dot) angle = np.arccos(dot) @@ -304,9 +335,13 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): else: vol = 1.0 - pxq = np.array([p[1]*q[2]-p[2]*q[1], - p[2]*q[0]-p[0]*q[2], - p[0]*q[1]-p[1]*q[0]]) + pxq = np.array( + [ + p[1] * q[2] - p[2] * q[1], + p[2] * q[0] - p[0] * q[2], + p[0] * q[1] - p[1] * q[0], + ] + ) if inspace == 'd': ''' @@ -323,7 +358,8 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): pxq = self.TransSpace(pxq, 'r', 'c') else: raise ValueError( - 'inspace is ''d'' but outspace is unidentified') + 'inspace is ' 'd' ' but outspace is unidentified' + ) elif inspace == 'r': ''' @@ -339,7 +375,8 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): pxq = self.TransSpace(pxq, 'd', 'c') else: raise ValueError( - 'inspace is ''r'' but outspace is unidentified') + 'inspace is ' 'r' ' but outspace is unidentified' + ) elif inspace == 'c': ''' @@ -355,7 +392,8 @@ def CalcCross(self, p, q, inspace, outspace, vol_divide=False): pass else: raise ValueError( - 'inspace is ''c'' but outspace is unidentified') + 'inspace is ' 'c' ' but outspace is unidentified' + ) else: raise ValueError('inspace is unidentified') @@ -398,16 +436,17 @@ def GenerateCartesianPGSym(self): self.SYM_PG_c.append(np.dot(self.dsm, np.dot(sop, self.rsm.T))) self.SYM_PG_c = np.array(self.SYM_PG_c) - self.SYM_PG_c[np.abs(self.SYM_PG_c) < eps] = 0. + self.SYM_PG_c[np.abs(self.SYM_PG_c) < eps] = 0.0 if self._pointGroup == self._laueGroup: self.SYM_PG_c_laue = self.SYM_PG_c else: for sop in self.SYM_PG_d_laue: self.SYM_PG_c_laue.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T))) + np.dot(self.dsm, np.dot(sop, self.rsm.T)) + ) self.SYM_PG_c_laue = np.array(self.SYM_PG_c_laue) - self.SYM_PG_c_laue[np.abs(self.SYM_PG_c_laue) < eps] = 0. + self.SYM_PG_c_laue[np.abs(self.SYM_PG_c_laue) < eps] = 0.0 ''' use the point group symmetry of the supergroup @@ -440,18 +479,21 @@ def GenerateCartesianPGSym(self): for sop in sym_supergroup: self.SYM_PG_supergroup.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T))) + np.dot(self.dsm, np.dot(sop, self.rsm.T)) + ) self.SYM_PG_supergroup = np.array(self.SYM_PG_supergroup) - self.SYM_PG_supergroup[np.abs(self.SYM_PG_supergroup) < eps] = 0. + self.SYM_PG_supergroup[np.abs(self.SYM_PG_supergroup) < eps] = 0.0 for sop in sym_supergroup_laue: self.SYM_PG_supergroup_laue.append( - np.dot(self.dsm, np.dot(sop, self.rsm.T))) + np.dot(self.dsm, np.dot(sop, self.rsm.T)) + ) self.SYM_PG_supergroup_laue = np.array(self.SYM_PG_supergroup_laue) - self.SYM_PG_supergroup_laue[np.abs( - self.SYM_PG_supergroup_laue) < eps] = 0. + self.SYM_PG_supergroup_laue[ + np.abs(self.SYM_PG_supergroup_laue) < eps + ] = 0.0 ''' the standard setting for the monoclinic system has the b-axis aligned @@ -465,7 +507,7 @@ def GenerateCartesianPGSym(self): ''' if self.latticeType == 'monoclinic': - om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]]) + om = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]]) for i, s in enumerate(self.SYM_PG_c): ss = np.dot(om, np.dot(s, om.T)) @@ -482,7 +524,7 @@ def GenerateCartesianPGSym(self): SS 12/10/2020 ''' if self._pointGroup == 'c1': - om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]]) + om = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]]) for i, s in enumerate(self.SYM_PG_supergroup): ss = np.dot(om, np.dot(s, om.T)) @@ -511,7 +553,7 @@ def CalcOrbit(self, v, reduceToUC=True): raise RuntimeError("fractional coordinate in not 3-d") r = v # using wigner-sietz notation - r = np.hstack((r, 1.)) + r = np.hstack((r, 1.0)) asym_pos = np.broadcast_to(r[0:3], [1, 3]) @@ -524,15 +566,15 @@ def CalcOrbit(self, v, reduceToUC=True): # reduce to fundamental unitcell with fractional # coordinates between 0-1 rr = np.modf(rr)[0] - rr[rr < 0.] += 1. - rr[np.abs(rr) < 1.0E-6] = 0. + rr[rr < 0.0] += 1.0 + rr[np.abs(rr) < 1.0e-6] = 0.0 # check if this is new isnew = True for j in range(n): v = rr - asym_pos[j] dist = self.CalcLength(v, 'd') - if dist < 1E-3: + if dist < 1e-3: isnew = False break @@ -584,9 +626,7 @@ def CalcPositions(self): self.numat = np.array(numat) self.asym_pos = asym_pos - def remove_duplicate_atoms(self, - atom_pos=None, - tol=1e-3): + def remove_duplicate_atoms(self, atom_pos=None, tol=1e-3): """ @date 03/04/2021 SS 1.0 original @@ -623,12 +663,11 @@ def remove_duplicate_atoms(self, idx.append(i) else: isclose = False - for j, uniqpos in enumerate(atom_pos_fixed): + for j, uniqpos in enumerate(atom_pos_fixed): pos2 = uniqpos[0:3] occ2 = uniqpos[3] # cases with fractional occupancy on same site - if (np.all(np.isclose(pos, pos2)) and - (occ+occ2 <= 1.)): + if np.all(np.isclose(pos, pos2)) and (occ + occ2 <= 1.0): atom_pos_fixed.append(np.hstack([pos, occ])) idx.append(i) isclose = True @@ -642,7 +681,7 @@ def remove_duplicate_atoms(self, for vvv in vv: # check if distance less than tol # the factor of 10 is for A --> nm - if self.CalcLength(vvv, 'd') < tol/10.: + if self.CalcLength(vvv, 'd') < tol / 10.0: # if true then its a repeated atom isclose = True break @@ -697,11 +736,11 @@ def CalcDensity(self): occ = self.atom_pos[i, 3] # -1 due to 0 indexing in python - self.avA += numat * constants.atom_weights[atype-1] * occ + self.avA += numat * constants.atom_weights[atype - 1] * occ self.avZ += numat * atype - self.density = self.avA / (self.vol * 1.0E-21 * constants.cAvogadro) + self.density = self.avA / (self.vol * 1.0e-21 * constants.cAvogadro) av_natom = np.dot(self.numat, self.atom_pos[:, 3]) @@ -723,19 +762,25 @@ def init_max_g_index(self): def CalcMaxGIndex(self): self.init_max_g_index() - while (1.0 / self.CalcLength( - np.array([self.ih, 0, 0], - dtype=np.float64), 'r') > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([self.ih, 0, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ih = self.ih + 1 - while (1.0 / self.CalcLength( - np.array([0, self.ik, 0], - dtype=np.float64), 'r') > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([0, self.ik, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ik = self.ik + 1 - while (1.0 / self.CalcLength( - np.array([0, 0, self.il], - dtype=np.float64), 'r') > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([0, 0, self.il], dtype=np.float64), 'r') + > self.dmin + ): self.il = self.il + 1 def InitializeInterpTable(self): @@ -743,7 +788,7 @@ def InitializeInterpTable(self): f_anomalous_data = [] self.pe_cs = {} data = ( - importlib.resources.files(hexrd.resources) + importlib.resources.files(hexrd.core.resources) .joinpath('Anomalous.h5') .open('rb') ) @@ -754,15 +799,16 @@ def InitializeInterpTable(self): elem = constants.ptableinverse[Z] if Z <= 92: - gid = fid.get('/'+elem) + gid = fid.get('/' + elem) data = np.array(gid.get('data')) - self.pe_cs[elem] = interp1d(data[:, WAV_ID], - data[:, MU_ID]+data[:,COH_INCOH_ID]) + self.pe_cs[elem] = interp1d( + data[:, WAV_ID], data[:, MU_ID] + data[:, COH_INCOH_ID] + ) data = data[:, [WAV_ID, REAL_F1_ID, IMAG_F2_ID]] f_anomalous_data.append(data) else: - wav = np.linspace(1.16E2, 2.86399992e-03, 189) - zs = np.ones_like(wav)*Z + wav = np.linspace(1.16e2, 2.86399992e-03, 189) + zs = np.ones_like(wav) * Z zrs = np.zeros_like(wav) data_zs = np.vstack((wav, zs, zrs)).T self.pe_cs[elem] = interp1d(wav, zrs) @@ -771,7 +817,11 @@ def InitializeInterpTable(self): n = max([x.shape[0] for x in f_anomalous_data]) self.f_anomalous_data = np.zeros([self.atom_ntype, n, 3]) self.f_anomalous_data_sizes = np.zeros( - [self.atom_ntype, ], dtype=np.int32) + [ + self.atom_ntype, + ], + dtype=np.int32, + ) for i in range(self.atom_ntype): nd = f_anomalous_data[i].shape[0] @@ -779,20 +829,34 @@ def InitializeInterpTable(self): self.f_anomalous_data[i, :nd, :] = f_anomalous_data[i] def CalcXRSF(self, hkl): - from hexrd.wppf.xtal import _calcxrsf + # TODO: Resolve extra-core dependency + from hexrd.powder.wppf.xtal import _calcxrsf + ''' the 1E-2 is to convert to A^-2 since the fitting is done in those units ''' - fNT = np.zeros([self.atom_ntype, ]) - frel = np.zeros([self.atom_ntype, ]) + fNT = np.zeros( + [ + self.atom_ntype, + ] + ) + frel = np.zeros( + [ + self.atom_ntype, + ] + ) scatfac = np.zeros([self.atom_ntype, 11]) f_anomalous_data = self.f_anomalous_data hkl2d = np.atleast_2d(hkl).astype(np.float64) nref = hkl2d.shape[0] - multiplicity = np.ones([nref, ]) + multiplicity = np.ones( + [ + nref, + ] + ) w_int = 1.0 occ = self.atom_pos[:, 3] @@ -817,23 +881,25 @@ def CalcXRSF(self, hkl): frel[i] = constants.frel[elem] fNT[i] = constants.fNT[elem] - sf, sf_raw = _calcxrsf(hkl2d, - nref, - multiplicity, - w_int, - self.wavelength, - self.rmt.astype(np.float64), - self.atom_type, - self.atom_ntype, - betaij, - occ, - self.asym_pos_arr, - self.numat, - scatfac, - fNT, - frel, - f_anomalous_data, - self.f_anomalous_data_sizes) + sf, sf_raw = _calcxrsf( + hkl2d, + nref, + multiplicity, + w_int, + self.wavelength, + self.rmt.astype(np.float64), + self.atom_type, + self.atom_ntype, + betaij, + occ, + self.asym_pos_arr, + self.numat, + scatfac, + fNT, + frel, + f_anomalous_data, + self.f_anomalous_data_sizes, + ) return sf_raw @@ -844,8 +910,8 @@ def CalcXRSF(self, hkl): """ def calc_unitcell_mass(self): - a_mass = constants.atom_weights[self.atom_type-1] - return np.sum(a_mass*self.numat) + a_mass = constants.atom_weights[self.atom_type - 1] + return np.sum(a_mass * self.numat) """ calculate the number density in 1/micron^3 @@ -861,12 +927,15 @@ def calc_number_density(self): def calc_absorption_cross_sec(self): - abs_cs_total = 0. + abs_cs_total = 0.0 for i in range(self.atom_ntype): Z = self.atom_type[i] elem = constants.ptableinverse[Z] - abs_cs_total += self.pe_cs[elem](self.wavelength) *\ - self.numat[i]/np.sum(self.numat) + abs_cs_total += ( + self.pe_cs[elem](self.wavelength) + * self.numat[i] + / np.sum(self.numat) + ) return abs_cs_total """ @@ -890,7 +959,7 @@ def calc_absorption_length(self): abs_cs_total = self.calc_absorption_cross_sec() # the 1e4 factor converts wavelength from cm -> micron - self.absorption_length = 1e4/(abs_cs_total*self.density) + self.absorption_length = 1e4 / (abs_cs_total * self.density) """ calculate bragg angle for a reflection. returns Nan if @@ -917,7 +986,7 @@ def ChooseSymmetric(self, hkllist, InversionSymmetry=True): geqv = self.CalcStar(g, 'r', applyLaue=laue) - for r in geqv[1:, ]: + for r in geqv[1:,]: rid = np.where(np.all(r == hkllist, axis=1)) mask[rid] = False @@ -945,8 +1014,14 @@ def SortHKL(self, hkllist): glen.append(np.round(self.CalcLength(g, 'r'), 8)) # glen = np.atleast_2d(np.array(glen,dtype=float)).T - dtype = [('glen', float), ('max', int), ('sum', int), - ('h', int), ('k', int), ('l', int)] + dtype = [ + ('glen', float), + ('max', int), + ('sum', int), + ('h', int), + ('k', int), + ('l', int), + ] a = [] for i, gl in enumerate(glen): @@ -970,16 +1045,21 @@ def getHKLs(self, dmin): ignore all l < 0 ''' - hmin = -self.ih-1 + hmin = -self.ih - 1 hmax = self.ih - kmin = -self.ik-1 + kmin = -self.ik - 1 kmax = self.ik lmin = -1 lmax = self.il - hkllist = np.array([[ih, ik, il] for ih in np.arange(hmax, hmin, -1) - for ik in np.arange(kmax, kmin, -1) - for il in np.arange(lmax, lmin, -1)]) + hkllist = np.array( + [ + [ih, ik, il] + for ih in np.arange(hmax, hmin, -1) + for ik in np.arange(kmax, kmin, -1) + for il in np.arange(lmax, lmin, -1) + ] + ) hkl_allowed = spacegroup.Allowed_HKLs(self.sgnum, hkllist) @@ -993,7 +1073,7 @@ def getHKLs(self, dmin): # ignore [0 0 0] as it is the direct beam if np.sum(np.abs(g)) != 0: - dspace = 1./self.CalcLength(g, 'r') + dspace = 1.0 / self.CalcLength(g, 'r') if dspace >= dmin: hkl_dsp.append(g) @@ -1020,6 +1100,7 @@ def getHKLs(self, dmin): self.hkls = self.SortHKL(hkl) return self.hkls + ''' set some properties for the unitcell class. only the lattice parameters, space group and asymmetric positions can change, @@ -1035,8 +1116,10 @@ def Required_C(self, C): def MakeStiffnessMatrix(self, inp_Cvals): if len(inp_Cvals) != len(_StiffnessDict[self._laueGroup][0]): x = len(_StiffnessDict[self._laueGroup][0]) - msg = (f"number of constants entered is not correct." - f" need a total of {x} independent constants.") + msg = ( + f"number of constants entered is not correct." + f" need a total of {x} independent constants." + ) raise IOError(msg) # initialize all zeros and fill the supplied values @@ -1104,15 +1187,15 @@ def inside_spheretriangle(self, conn, dir3, hemisphere, switch): number ''' if np.abs(d1) < eps: - d1 = 0. + d1 = 0.0 if np.abs(d2) < eps: - d2 = 0. + d2 = 0.0 if np.abs(d3) < eps: - d3 = 0. + d3 = 0.0 ss = np.unique(np.sign([d1, d2, d3])) if hemisphere == 'upper': - if np.all(ss >= 0.): + if np.all(ss >= 0.0): mask.append(True) else: mask.append(False) @@ -1173,11 +1256,12 @@ def reduce_dirvector(self, dir3, switch='pg'): dir3n = dir3 else: if np.all(np.linalg.norm(dir3) > eps): - dir3n = dir3/np.tile(np.linalg.norm(dir3, axis=1), [3, 1]).T + dir3n = dir3 / np.tile(np.linalg.norm(dir3, axis=1), [3, 1]).T else: raise RuntimeError( "atleast one of the input direction seems \ - to be a null vector") + to be a null vector" + ) ''' we need both the symmetry reductions for the point group and laue group @@ -1220,18 +1304,19 @@ def reduce_dirvector(self, dir3, switch='pg'): if hemisphere == 'both': mask = np.ones(dir3_sym.shape[0], dtype=bool) elif hemisphere == 'upper': - mask = dir3_sym[:, 2] >= 0. + mask = dir3_sym[:, 2] >= 0.0 else: for ii in range(ntriangle): tmpmask = self.inside_spheretriangle( - connectivity[:, ii], dir3_sym, - hemisphere, switch) + connectivity[:, ii], dir3_sym, hemisphere, switch + ) mask = np.logical_or(mask, tmpmask) if np.sum(mask) > 0: if dir3_reduced.size != 0: dir3_reduced = np.vstack( - (dir3_reduced, dir3_sym[mask, :])) + (dir3_reduced, dir3_sym[mask, :]) + ) idx_red = np.hstack((idx_red, idx[mask])) else: dir3_reduced = np.copy(dir3_sym[mask, :]) @@ -1273,7 +1358,8 @@ class which correctly color the orientations for this crystal class. the ''' dir3_red = self.reduce_dirvector(dir3, switch='laue') dir3_red_supergroup = self.reduce_dirvector( - dir3, switch='superlaue') + dir3, switch='superlaue' + ) switch = 'superlaue' else: @@ -1290,10 +1376,9 @@ class which correctly color the orientations for this crystal class. the rgb = colorspace.hsl2rgb(hsl) return rgb - def color_orientations(self, - rmats, - ref_dir=np.array([0., 0., 1.]), - laueswitch=True): + def color_orientations( + self, rmats, ref_dir=np.array([0.0, 0.0, 1.0]), laueswitch=True + ): ''' @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov @DATE 11/12/2020 SS 1.0 original @@ -1316,7 +1401,9 @@ def color_orientations(self, if rmats.ndim == 2: rmats = np.atleast_3d(rmats).T else: - assert rmats.ndim == 3, "rotations matrices need to \ + assert ( + rmats.ndim == 3 + ), "rotations matrices need to \ be nx3x3. Please check size." ''' @@ -1355,12 +1442,10 @@ def convert_lp_to_valunits(self, lp): lp_valunit = [] for i in range(6): if i < 3: - lp_valunit.append( - valWUnit('lp', 'length', lp[i], 'nm')) + lp_valunit.append(valWUnit('lp', 'length', lp[i], 'nm')) else: - lp_valunit.append( - valWUnit('lp', 'angle', lp[i], 'degrees')) + lp_valunit.append(valWUnit('lp', 'angle', lp[i], 'degrees')) return lp_valunit @@ -1370,8 +1455,7 @@ def fill_correct_lp_vals(self, lp, val, lp_name): """ index = list(_lpname).index(lp_name) lp[index] = val - lp_red = [lp[i] for i in - _rqpDict[self.latticeType][0]] + lp_red = [lp[i] for i in _rqpDict[self.latticeType][0]] lp = _rqpDict[self.latticeType][1](lp_red) lp_valunit = self.convert_lp_to_valunits(lp) return lp_valunit @@ -1382,20 +1466,18 @@ def compliance(self): if not hasattr(self, 'stiffness'): raise AttributeError('Stiffness not set on unit cell') - return np.linalg.inv(self.stiffness / 1.e3) + return np.linalg.inv(self.stiffness / 1.0e3) @compliance.setter def compliance(self, v): # Compliance in TPa⁻¹. Stiffness is in GPa. - self.stiffness = np.linalg.inv(v) * 1.e3 + self.stiffness = np.linalg.inv(v) * 1.0e3 # lattice constants as properties @property def lparms(self): - return [self.a, self.b, - self.c, self.alpha, self.beta, - self.gamma] + return [self.a, self.b, self.c, self.alpha, self.beta, self.gamma] @lparms.setter def lparms(self, lp): @@ -1417,8 +1499,7 @@ def lparms(self, lp): @property def lparms_reduced(self): lp = self.lparms - lp_red = [lp[i] for i in - _rqpDict[self.latticeType][0]] + lp_red = [lp[i] for i in _rqpDict[self.latticeType][0]] return lp_red @property @@ -1429,12 +1510,10 @@ def a(self): def a(self, val): if self.is_editable("a"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "a") + lp_valunit = self.fill_correct_lp_vals(lp, val, "a") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1445,12 +1524,10 @@ def b(self): def b(self, val): if self.is_editable("b"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "b") + lp_valunit = self.fill_correct_lp_vals(lp, val, "b") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1461,12 +1538,10 @@ def c(self): def c(self, val): if self.is_editable("c"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "c") + lp_valunit = self.fill_correct_lp_vals(lp, val, "c") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1477,12 +1552,10 @@ def alpha(self): def alpha(self, val): if self.is_editable("alpha"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "alpha") + lp_valunit = self.fill_correct_lp_vals(lp, val, "alpha") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1493,12 +1566,10 @@ def beta(self): def beta(self, val): if self.is_editable("beta"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "beta") + lp_valunit = self.fill_correct_lp_vals(lp, val, "beta") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1509,12 +1580,10 @@ def gamma(self): def gamma(self, val): if self.is_editable("gamma"): lp = self.lparms - lp_valunit = self.fill_correct_lp_vals( - lp, val, "gamma") + lp_valunit = self.fill_correct_lp_vals(lp, val, "gamma") self.lparms = lp_valunit else: - msg = (f"not an editable field" - f" for this space group") + msg = f"not an editable field" f" for this space group" raise RuntimeError(msg) @property @@ -1565,17 +1634,21 @@ def sgnum(self): @sgnum.setter def sgnum(self, val): - if not(isinstance(val, int)): + if not (isinstance(val, int)): raise ValueError('space group should be integer') - if not((val >= 1) and (val <= 230)): + if not ((val >= 1) and (val <= 230)): raise ValueError('space group number should be between 1 and 230.') self._sym_sgnum = val - self.sg_hmsymbol = symbols.pstr_spacegroup[val-1].strip() + self.sg_hmsymbol = symbols.pstr_spacegroup[val - 1].strip() - self.SYM_SG, self.SYM_PG_d, self.SYM_PG_d_laue, \ - self.centrosymmetric, self.symmorphic = \ - symmetry.GenerateSGSym(self.sgnum, self.sgsetting) + ( + self.SYM_SG, + self.SYM_PG_d, + self.SYM_PG_d_laue, + self.centrosymmetric, + self.symmorphic, + ) = symmetry.GenerateSGSym(self.sgnum, self.sgsetting) self.latticeType = symmetry.latticeType(self.sgnum) @@ -1600,10 +1673,12 @@ def sgnum(self, val): ''' SS 11/11/2020 adding the sphere_sector class initialization here ''' - self.sphere_sector = sphere_sector.sector(self._pointGroup, - self._laueGroup, - self._supergroup, - self._supergroup_laue) + self.sphere_sector = sphere_sector.sector( + self._pointGroup, + self._laueGroup, + self._supergroup, + self._supergroup_laue, + ) self.CalcDensity() self.calc_absorption_length() @@ -1629,10 +1704,12 @@ def atom_pos(self, val): """ if hasattr(self, 'atom_type'): if self.atom_ntype != val.shape[0]: - msg = (f"incorrect number of atom positions." - f" number of atom type = {self.atom_ntype} " - f" and number of" - f" atom positions = {val.shape[0]}.") + msg = ( + f"incorrect number of atom positions." + f" number of atom type = {self.atom_ntype} " + f" and number of" + f" atom positions = {val.shape[0]}." + ) raise ValueError(msg) self._atom_pos = val @@ -1657,8 +1734,9 @@ def asym_pos(self): @asym_pos.setter def asym_pos(self, val): - assert(type(val) == list),\ - 'input type to asymmetric positions should be list' + assert ( + type(val) == list + ), 'input type to asymmetric positions should be list' self._asym_pos = val @property @@ -1667,8 +1745,9 @@ def numat(self): @numat.setter def numat(self, val): - assert(val.shape[0] == - self.atom_ntype), 'shape of numat is not consistent' + assert ( + val.shape[0] == self.atom_ntype + ), 'shape of numat is not consistent' self._numat = val # direct metric tensor is read only @@ -1702,7 +1781,7 @@ def vol(self): @property def vol_per_atom(self): # vol per atom in A^3 - return 1e3*self.vol/self.num_atom + return 1e3 * self.vol / self.num_atom @property @@ -1734,11 +1813,11 @@ def chemical_formula(self): 'triclinic': (tuple(range(6)), lambda p: p), # all 6 # note beta 'monoclinic': ((0, 1, 2, 4), lambda p: (p[0], p[1], p[2], 90, p[3], 90)), - 'orthorhombic': ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), - 'tetragonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), - 'trigonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - 'hexagonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), - 'cubic': ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), + 'orthorhombic': ((0, 1, 2), lambda p: (p[0], p[1], p[2], 90, 90, 90)), + 'tetragonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 90)), + 'trigonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + 'hexagonal': ((0, 2), lambda p: (p[0], p[0], p[1], 90, 90, 120)), + 'cubic': ((0,), lambda p: (p[0], p[0], p[0], 90, 90, 90)), } _lpname = np.array(['a', 'b', 'c', 'alpha', 'beta', 'gamma']) @@ -1778,78 +1857,68 @@ def chemical_formula(self): supergroup_11 = 'oh' -def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range +def _sgrange(min, max): + return tuple(range(min, max + 1)) # inclusive range + ''' 11/20/2020 SS added supergroup to the list which is used for coloring the fundamental zone IPF ''' _pgDict = { - _sgrange(1, 1): ('c1', laue_1, - supergroup_1, supergroup_00), # Triclinic - _sgrange(2, 2): ('ci', laue_1, \ - supergroup_00, supergroup_00), # laue 1 - _sgrange(3, 5): ('c2', laue_2, \ - supergroup_2, supergroup_3), # Monoclinic - _sgrange(6, 9): ('cs', laue_2, \ - supergroup_1, supergroup_3), - _sgrange(10, 15): ('c2h', laue_2, \ - supergroup_3, supergroup_3), # laue 2 - _sgrange(16, 24): ('d2', laue_3, \ - supergroup_3, supergroup_3), # Orthorhombic - _sgrange(25, 46): ('c2v', laue_3, \ - supergroup_2, supergroup_3), - _sgrange(47, 74): ('d2h', laue_3, \ - supergroup_3, supergroup_3), # laue 3 - _sgrange(75, 80): ('c4', laue_4, \ - supergroup_4, supergroup_5), # Tetragonal - _sgrange(81, 82): ('s4', laue_4, \ - supergroup_01, supergroup_5), - _sgrange(83, 88): ('c4h', laue_4, \ - supergroup_5, supergroup_5), # laue 4 - _sgrange(89, 98): ('d4', laue_5, \ - supergroup_5, supergroup_5), - _sgrange(99, 110): ('c4v', laue_5, \ - supergroup_4, supergroup_5), - _sgrange(111, 122): ('d2d', laue_5, \ - supergroup_5, supergroup_5), - _sgrange(123, 142): ('d4h', laue_5, \ - supergroup_5, supergroup_5), # laue 5 + _sgrange(1, 1): ('c1', laue_1, supergroup_1, supergroup_00), # Triclinic + _sgrange(2, 2): ('ci', laue_1, supergroup_00, supergroup_00), # laue 1 + _sgrange(3, 5): ('c2', laue_2, supergroup_2, supergroup_3), # Monoclinic + _sgrange(6, 9): ('cs', laue_2, supergroup_1, supergroup_3), + _sgrange(10, 15): ('c2h', laue_2, supergroup_3, supergroup_3), # laue 2 + _sgrange(16, 24): ( + 'd2', + laue_3, + supergroup_3, + supergroup_3, + ), # Orthorhombic + _sgrange(25, 46): ('c2v', laue_3, supergroup_2, supergroup_3), + _sgrange(47, 74): ('d2h', laue_3, supergroup_3, supergroup_3), # laue 3 + _sgrange(75, 80): ('c4', laue_4, supergroup_4, supergroup_5), # Tetragonal + _sgrange(81, 82): ('s4', laue_4, supergroup_01, supergroup_5), + _sgrange(83, 88): ('c4h', laue_4, supergroup_5, supergroup_5), # laue 4 + _sgrange(89, 98): ('d4', laue_5, supergroup_5, supergroup_5), + _sgrange(99, 110): ('c4v', laue_5, supergroup_4, supergroup_5), + _sgrange(111, 122): ('d2d', laue_5, supergroup_5, supergroup_5), + _sgrange(123, 142): ('d4h', laue_5, supergroup_5, supergroup_5), # laue 5 # Trigonal # laue 6 [also c3i] - _sgrange(143, 146): ('c3', laue_6, \ - supergroup_6, supergroup_02), - _sgrange(147, 148): ('s6', laue_6, \ - supergroup_02, supergroup_02), - _sgrange(149, 155): ('d3', laue_7, \ - supergroup_7, supergroup_9), - _sgrange(156, 161): ('c3v', laue_7, \ - supergroup_6, supergroup_9), - _sgrange(162, 167): ('d3d', laue_7, \ - supergroup_9, supergroup_9), # laue 7 - _sgrange(168, 173): ('c6', laue_8, \ - supergroup_7, supergroup_9), # Hexagonal - _sgrange(174, 174): ('c3h', laue_8, \ - supergroup_7, supergroup_9), - _sgrange(175, 176): ('c6h', laue_8, \ - supergroup_9, supergroup_9), # laue 8 - _sgrange(177, 182): ('d6', laue_9, \ - supergroup_9, supergroup_9), - _sgrange(183, 186): ('c6v', laue_9, \ - supergroup_7, supergroup_9), - _sgrange(187, 190): ('d3h', laue_9, \ - supergroup_9, supergroup_9), - _sgrange(191, 194): ('d6h', laue_9, \ - supergroup_9, supergroup_9), # laue 9 - _sgrange(195, 199): ('t', laue_10, \ - supergroup_10, supergroup_11), # Cubic - _sgrange(200, 206): ('th', laue_10, \ - supergroup_11, supergroup_11), # laue 10 - _sgrange(207, 214): ('o', laue_11, \ - supergroup_11, supergroup_11), - _sgrange(215, 220): ('td', laue_11, \ - supergroup_10, supergroup_11), - _sgrange(221, 230): ('oh', laue_11, \ - supergroup_11, supergroup_11) # laue 11 + _sgrange(143, 146): ('c3', laue_6, supergroup_6, supergroup_02), + _sgrange(147, 148): ('s6', laue_6, supergroup_02, supergroup_02), + _sgrange(149, 155): ('d3', laue_7, supergroup_7, supergroup_9), + _sgrange(156, 161): ('c3v', laue_7, supergroup_6, supergroup_9), + _sgrange(162, 167): ('d3d', laue_7, supergroup_9, supergroup_9), # laue 7 + _sgrange(168, 173): ( + 'c6', + laue_8, + supergroup_7, + supergroup_9, + ), # Hexagonal + _sgrange(174, 174): ('c3h', laue_8, supergroup_7, supergroup_9), + _sgrange(175, 176): ('c6h', laue_8, supergroup_9, supergroup_9), # laue 8 + _sgrange(177, 182): ('d6', laue_9, supergroup_9, supergroup_9), + _sgrange(183, 186): ('c6v', laue_9, supergroup_7, supergroup_9), + _sgrange(187, 190): ('d3h', laue_9, supergroup_9, supergroup_9), + _sgrange(191, 194): ('d6h', laue_9, supergroup_9, supergroup_9), # laue 9 + _sgrange(195, 199): ('t', laue_10, supergroup_10, supergroup_11), # Cubic + _sgrange(200, 206): ( + 'th', + laue_10, + supergroup_11, + supergroup_11, + ), # laue 10 + _sgrange(207, 214): ('o', laue_11, supergroup_11, supergroup_11), + _sgrange(215, 220): ('td', laue_11, supergroup_10, supergroup_11), + _sgrange(221, 230): ( + 'oh', + laue_11, + supergroup_11, + supergroup_11, + ), # laue 11 } ''' @@ -1954,7 +2023,8 @@ def _sgrange(min, max): return tuple(range(min, max + 1)) # inclusive range ''' -def identity(x): return x +def identity(x): + return x def C_cyclictet_eq(x): @@ -1973,7 +2043,7 @@ def C_trigonal_eq(x): x[3, 5] = -x[0, 4] x[4, 4] = x[3, 3] x[4, 5] = x[0, 3] - x[5, 5] = 0.5*(x[0, 0]-x[0, 1]) + x[5, 5] = 0.5 * (x[0, 0] - x[0, 1]) return x @@ -2001,5 +2071,5 @@ def C_cubic_eq(x): laue_8: [type8, C_trigonal_eq], # cyclic hexagonal, 5 components needed laue_9: [type8, C_trigonal_eq], # dihedral hexagonal, 5 components laue_10: [type9, C_cubic_eq], # cubic, 3 components - laue_11: [type9, C_cubic_eq] # cubic, 3 components + laue_11: [type9, C_cubic_eq], # cubic, 3 components } diff --git a/hexrd/material/utils.py b/hexrd/core/material/utils.py similarity index 73% rename from hexrd/material/utils.py rename to hexrd/core/material/utils.py index da51c1b84..69f85030a 100644 --- a/hexrd/material/utils.py +++ b/hexrd/core/material/utils.py @@ -1,19 +1,24 @@ import importlib.resources -import hexrd.resources -from hexrd.constants import cClassicalelectronRad as re,\ -cAvogadro, ATOM_WEIGHTS_DICT +import hexrd.core.resources +from hexrd.core.constants import ( + cClassicalelectronRad as re, + cAvogadro, + ATOM_WEIGHTS_DICT, +) import chemparse import numpy as np import h5py from copy import deepcopy from scipy.interpolate import interp1d -from hexrd import constants +from hexrd.core import constants """ calculate the molecular weight given the formula unit @author Saransh Singh, LLNL @date 1.0 original 02/16/2022 """ + + def interpret_formula(formula): """ first interpret if the formula is a dictionary @@ -28,33 +33,35 @@ def interpret_formula(formula): return chemparse.parse_formula(formula) + def calculate_molecular_mass(formula): """ interpret the formula as either a dictionary or a chemical formula """ formula_dict = interpret_formula(formula) - M = 0. - for k,v in formula_dict.items(): + M = 0.0 + for k, v in formula_dict.items(): M += v * ATOM_WEIGHTS_DICT[k] return M + """ calculate the number density of element or compound number density is the number of atoms per unit volume @author Saransh Singh, LLNL @date 1.0 original 02/16/2022 """ -def calculate_number_density(density, - formula): + + +def calculate_number_density(density, formula): molecular_mass = calculate_molecular_mass(formula) - return 1e-21*density*cAvogadro/molecular_mass + return 1e-21 * density * cAvogadro / molecular_mass + -def calculate_linear_absorption_length(density, - formula, - energy_vector): +def calculate_linear_absorption_length(density, formula, energy_vector): """ this function calculates the absorption length (in mm) based on both coherent and incoherent scattering cross @@ -84,41 +91,42 @@ def calculate_linear_absorption_length(density, the attenuation length in microns """ - data = importlib.resources.open_binary(hexrd.resources, 'mu_en.h5') + data = importlib.resources.open_binary(hexrd.core.resources, 'mu_en.h5') fid = h5py.File(data, 'r') - formula_dict = interpret_formula(formula) + formula_dict = interpret_formula(formula) molecular_mass = calculate_molecular_mass(formula) density_conv = density mu_rho = 0.0 for k, v in formula_dict.items(): - wi = v*ATOM_WEIGHTS_DICT[k]/molecular_mass + wi = v * ATOM_WEIGHTS_DICT[k] / molecular_mass d = np.array(fid[f"/{k}/data"]) - E = d[:,0] - mu_rho_tab = d[:,1] + E = d[:, 0] + mu_rho_tab = d[:, 1] - val = np.interp(np.log(energy_vector), - np.log(E), - np.log(mu_rho_tab), - left=0.0, - right=0.0) + val = np.interp( + np.log(energy_vector), + np.log(E), + np.log(mu_rho_tab), + left=0.0, + right=0.0, + ) val = np.exp(val) mu_rho += wi * val - mu = mu_rho * density_conv # this is in cm^-1 - mu = mu * 1E-4 # this is in mm^-1 - absorption_length = 1./mu + mu = mu_rho * density_conv # this is in cm^-1 + mu = mu * 1e-4 # this is in mm^-1 + absorption_length = 1.0 / mu return absorption_length -def calculate_energy_absorption_length(density, - formula, - energy_vector): + +def calculate_energy_absorption_length(density, formula, energy_vector): """ this function calculates the absorption length (in mm) based on the total energy absorbed by the medium. this @@ -146,35 +154,37 @@ def calculate_energy_absorption_length(density, the attenuation length in microns """ - data = importlib.resources.open_binary(hexrd.resources, 'mu_en.h5') + data = importlib.resources.open_binary(hexrd.core.resources, 'mu_en.h5') fid = h5py.File(data, 'r') - formula_dict = interpret_formula(formula) + formula_dict = interpret_formula(formula) molecular_mass = calculate_molecular_mass(formula) density_conv = density mu_rho = 0.0 for k, v in formula_dict.items(): - wi = v*ATOM_WEIGHTS_DICT[k]/molecular_mass + wi = v * ATOM_WEIGHTS_DICT[k] / molecular_mass d = np.array(fid[f"/{k}/data"]) - E = d[:,0] - mu_rho_tab = d[:,2] + E = d[:, 0] + mu_rho_tab = d[:, 2] - val = np.interp(np.log(energy_vector), - np.log(E), - np.log(mu_rho_tab), - left=0.0, - right=0.0) + val = np.interp( + np.log(energy_vector), + np.log(E), + np.log(mu_rho_tab), + left=0.0, + right=0.0, + ) val = np.exp(val) mu_rho += wi * val - mu = mu_rho * density_conv # this is in cm^-1 - mu = mu * 1E-4 # this is in microns^-1 - absorption_length = 1./mu + mu = mu_rho * density_conv # this is in cm^-1 + mu = mu * 1e-4 # this is in microns^-1 + absorption_length = 1.0 / mu return absorption_length @@ -209,20 +219,20 @@ def convert_density_to_atoms_per_cubic_angstrom( """ # get_smallest abundance if composition is None: - return 0. + return 0.0 norm_elemental_abundances = normalize_composition(composition) mean_z = 0.0 for element, concentration in norm_elemental_abundances.items(): mean_z += concentration * constants.ATOM_WEIGHTS_DICT[element] - return density / mean_z * .602214129 + return density / mean_z * 0.602214129 def calculate_coherent_scattering_factor( element: str, Q: np.ndarray, ) -> np.ndarray: - s = Q/(4. * np.pi) + s = Q / (4.0 * np.pi) sfact = constants.scatfac[element] fe = sfact[5] for jj in range(5): @@ -262,10 +272,7 @@ def calculate_f_squared_mean( norm_elemental_abundances = normalize_composition(formula) res = 0 for key, value in norm_elemental_abundances.items(): - res += ( - value * - calculate_coherent_scattering_factor(key, Q) ** 2 - ) + res += value * calculate_coherent_scattering_factor(key, Q) ** 2 return res @@ -281,11 +288,8 @@ def calculate_f_mean_squared( norm_elemental_abundances = normalize_composition(formula) res = 0 for key, value in norm_elemental_abundances.items(): - res += ( - value * - calculate_coherent_scattering_factor(key, Q) - ) - return res ** 2 + res += value * calculate_coherent_scattering_factor(key, Q) + return res**2 def calculate_incoherent_scattering( @@ -297,12 +301,8 @@ def calculate_incoherent_scattering( return np.zeros_like(Q) formula = interpret_formula(composition) - norm_elemental_abundances = normalize_composition( - formula) + norm_elemental_abundances = normalize_composition(formula) res = 0 for key, value in norm_elemental_abundances.items(): - res += ( - value * - calculate_incoherent_scattering_factor(key, Q) - ) ** 2 + res += (value * calculate_incoherent_scattering_factor(key, Q)) ** 2 return res diff --git a/hexrd/matrixutil.py b/hexrd/core/matrixutil.py similarity index 88% rename from hexrd/matrixutil.py rename to hexrd/core/matrixutil.py index 03157b945..0c6b6c682 100644 --- a/hexrd/matrixutil.py +++ b/hexrd/core/matrixutil.py @@ -34,18 +34,18 @@ import numba -from hexrd import constants +from hexrd.core import constants # module variables -sqr6i = 1./np.sqrt(6.) -sqr3i = 1./np.sqrt(3.) -sqr2i = 1./np.sqrt(2.) -sqr2 = np.sqrt(2.) -sqr3 = np.sqrt(3.) -sqr2b3 = np.sqrt(2./3.) +sqr6i = 1.0 / np.sqrt(6.0) +sqr3i = 1.0 / np.sqrt(3.0) +sqr2i = 1.0 / np.sqrt(2.0) +sqr2 = np.sqrt(2.0) +sqr3 = np.sqrt(3.0) +sqr2b3 = np.sqrt(2.0 / 3.0) fpTol = constants.epsf # 2.220446049250313e-16 -vTol = 100*fpTol +vTol = 100 * fpTol def columnNorm(a): @@ -78,26 +78,27 @@ def unitVector(a): """ normalize array of column vectors (hstacked, axis = 0) """ - assert a.ndim in [1, 2], \ - "incorrect arg shape; must be 1-d or 2-d, yours is %d-d" % (a.ndim) + assert a.ndim in [ + 1, + 2, + ], "incorrect arg shape; must be 1-d or 2-d, yours is %d-d" % (a.ndim) ztol = constants.ten_epsf m = a.shape[0] - nrm = np.tile(np.sqrt(np.sum(np.asarray(a)**2, axis=0)), (m, 1)) + nrm = np.tile(np.sqrt(np.sum(np.asarray(a) ** 2, axis=0)), (m, 1)) # prevent divide by zero nrm[nrm <= ztol] = 1.0 - return a/nrm + return a / nrm def nullSpace(A, tol=vTol): """ computes the null space of the real matrix A """ - assert A.ndim == 2, \ - 'input must be 2-d; yours is %d-d' % (A.ndim) + assert A.ndim == 2, 'input must be 2-d; yours is %d-d' % (A.ndim) n, m = A.shape @@ -108,7 +109,7 @@ def nullSpace(A, tol=vTol): S = np.hstack([S, np.zeros(m - n)]) - null_mask = (S <= tol) + null_mask = S <= tol null_space = V[null_mask, :] return null_space @@ -135,10 +136,10 @@ def blockSparseOfMatArray(matArray): m = matArray.shape[1] n = matArray.shape[2] - mn = m*n - jmax = p*n - imax = p*m - ntot = p*m*n + mn = m * n + jmax = p * n + imax = p * m + ntot = p * m * n rl = np.arange(p) rm = np.arange(m) @@ -146,8 +147,9 @@ def blockSparseOfMatArray(matArray): sij = matArray.transpose(0, 2, 1).reshape(1, ntot).squeeze() j = np.reshape(np.tile(rjmax, (m, 1)).T, (1, ntot)) - i = np.reshape(np.tile(rm, (1, jmax)), (1, ntot)) + \ - np.reshape(np.tile(m*rl, (mn, 1)).T, (1, ntot)) + i = np.reshape(np.tile(rm, (1, jmax)), (1, ntot)) + np.reshape( + np.tile(m * rl, (mn, 1)).T, (1, ntot) + ) ij = np.concatenate((i, j), axis=0) @@ -166,7 +168,7 @@ def symmToVecMV(A, scale=True): if scale: fac = sqr2 else: - fac = 1. + fac = 1.0 mvvec = np.zeros(6, dtype='float64') mvvec[0] = A[0, 0] mvvec[1] = A[1, 1] @@ -186,7 +188,7 @@ def vecMVToSymm(A, scale=True): if scale: fac = sqr2 else: - fac = 1. + fac = 1.0 symm_mat = np.zeros((3, 3), dtype='float64') symm_mat[0, 0] = A[0] symm_mat[1, 1] = A[1] @@ -214,13 +216,15 @@ def nrmlProjOfVecMV(vec): n = unitVector(vec) nmat = np.array( - [n[0, :]**2, - n[1, :]**2, - n[2, :]**2, - sqr2 * n[1, :] * n[2, :], - sqr2 * n[0, :] * n[2, :], - sqr2 * n[0, :] * n[1, :]], - dtype='float64' + [ + n[0, :] ** 2, + n[1, :] ** 2, + n[2, :] ** 2, + sqr2 * n[1, :] * n[2, :], + sqr2 * n[0, :] * n[2, :], + sqr2 * n[0, :] * n[1, :], + ], + dtype='float64', ) return nmat.T @@ -301,7 +305,7 @@ def skew(A): else: raise RuntimeError("this function only works for square arrays") - return np.squeeze(0.5*(A - A.transpose(0, 2, 1))) + return np.squeeze(0.5 * (A - A.transpose(0, 2, 1))) def symm(A): @@ -328,7 +332,7 @@ def symm(A): else: raise RuntimeError("this function only works for square arrays") - return np.squeeze(0.5*(A + A.transpose(0, 2, 1))) + return np.squeeze(0.5 * (A + A.transpose(0, 2, 1))) def skewMatrixOfVector(w): @@ -357,21 +361,11 @@ def skewMatrixOfVector(w): else: stackdim = w.shape[1] else: - raise RuntimeError( - 'input is incorrect shape; expecting ndim = 1 or 2' - ) + raise RuntimeError('input is incorrect shape; expecting ndim = 1 or 2') zs = np.zeros((1, stackdim), dtype='float64') W = np.vstack( - [zs, - -w[2, :], - w[1, :], - w[2, :], - zs, - -w[0, :], - -w[1, :], - w[0, :], - zs] + [zs, -w[2, :], w[1, :], w[2, :], zs, -w[0, :], -w[1, :], w[0, :], zs] ) return np.squeeze(np.reshape(W.T, (stackdim, 3, 3))) @@ -530,7 +524,7 @@ def findDuplicateVectors_old(vec, tol=vTol, equivPM=False): if not equivPM: diff = abs(tvec - dupl.T).sum(0) - match = abs(diff[1:]) <= tol # logical to find duplicates + match = abs(diff[1:]) <= tol # logical to find duplicates else: diffn = abs(tvec - dupl.T).sum(0) matchn = abs(diffn[1:]) <= tol @@ -538,7 +532,7 @@ def findDuplicateVectors_old(vec, tol=vTol, equivPM=False): matchp = abs(diffp[1:]) <= tol match = matchn + matchp - kick = np.hstack([True, match]) # pick self too + kick = np.hstack([True, match]) # pick self too if kick.sum() > 1: eqv += [torid[kick].tolist()] @@ -575,6 +569,7 @@ def findDuplicateVectors_old(vec, tol=vTol, equivPM=False): return eqv, uid + def findDuplicateVectors(vec, tol=vTol, equivPM=False): eqv = _findduplicatevectors(vec, tol, equivPM) uid = np.arange(0, vec.shape[1], dtype=np.int64) @@ -640,18 +635,18 @@ def _findduplicatevectors(vec, tol, equivPM): for ii in range(m): ctr = 0 - eqv_elem = np.zeros((m, ), dtype=np.int64) - for jj in range(ii+1, m): + eqv_elem = np.zeros((m,), dtype=np.int64) + for jj in range(ii + 1, m): if not jj in eqv_elem_master: if equivPM: - diff = np.sum(np.abs(vec[:, ii]-vec2[:, jj])) - diff2 = np.sum(np.abs(vec[:, ii]-vec[:, jj])) + diff = np.sum(np.abs(vec[:, ii] - vec2[:, jj])) + diff2 = np.sum(np.abs(vec[:, ii] - vec[:, jj])) if diff < tol or diff2 < tol: eqv_elem[ctr] = jj eqv_elem_master.append(jj) ctr += 1 else: - diff = np.sum(np.abs(vec[:, ii]-vec[:, jj])) + diff = np.sum(np.abs(vec[:, ii] - vec[:, jj])) if diff < tol: eqv_elem[ctr] = jj eqv_elem_master.append(jj) @@ -678,9 +673,9 @@ def strainTenToVec(strainTen): strainVec[0] = strainTen[0, 0] strainVec[1] = strainTen[1, 1] strainVec[2] = strainTen[2, 2] - strainVec[3] = 2*strainTen[1, 2] - strainVec[4] = 2*strainTen[0, 2] - strainVec[5] = 2*strainTen[0, 1] + strainVec[3] = 2 * strainTen[1, 2] + strainVec[4] = 2 * strainTen[0, 2] + strainVec[5] = 2 * strainTen[0, 1] strainVec = np.atleast_2d(strainVec).T return strainVec @@ -690,12 +685,12 @@ def strainVecToTen(strainVec): strainTen[0, 0] = strainVec[0] strainTen[1, 1] = strainVec[1] strainTen[2, 2] = strainVec[2] - strainTen[1, 2] = strainVec[3] / 2. - strainTen[0, 2] = strainVec[4] / 2. - strainTen[0, 1] = strainVec[5] / 2. - strainTen[2, 1] = strainVec[3] / 2. - strainTen[2, 0] = strainVec[4] / 2. - strainTen[1, 0] = strainVec[5] / 2. + strainTen[1, 2] = strainVec[3] / 2.0 + strainTen[0, 2] = strainVec[4] / 2.0 + strainTen[0, 1] = strainVec[5] / 2.0 + strainTen[2, 1] = strainVec[3] / 2.0 + strainTen[2, 0] = strainVec[4] / 2.0 + strainTen[1, 0] = strainVec[5] / 2.0 return strainTen @@ -734,13 +729,13 @@ def ale3dStrainOutToV(vecds): """ eps = np.zeros([3, 3], dtype='float64') # Akk_by_3 = sqr3i * vecds[5] # -p - a = np.exp(vecds[5])**(1./3.) # -p - t1 = sqr2i*vecds[0] - t2 = sqr6i*vecds[1] + a = np.exp(vecds[5]) ** (1.0 / 3.0) # -p + t1 = sqr2i * vecds[0] + t2 = sqr6i * vecds[1] eps[0, 0] = t1 - t2 eps[1, 1] = -t1 - t2 - eps[2, 2] = sqr2b3*vecds[1] + eps[2, 2] = sqr2b3 * vecds[1] eps[1, 0] = vecds[2] * sqr2i eps[2, 0] = vecds[3] * sqr2i eps[2, 1] = vecds[4] * sqr2i @@ -749,10 +744,10 @@ def ale3dStrainOutToV(vecds): eps[0, 2] = eps[2, 0] eps[1, 2] = eps[2, 1] - epstar = eps/a + epstar = eps / a - V = (constants.identity_3x3 + epstar)*a - Vinv = (constants.identity_3x3 - epstar)/a + V = (constants.identity_3x3 + epstar) * a + Vinv = (constants.identity_3x3 - epstar) / a return V, Vinv @@ -761,12 +756,12 @@ def vecdsToSymm(vecds): """convert from vecds representation to symmetry matrix""" A = np.zeros([3, 3], dtype='float64') Akk_by_3 = sqr3i * vecds[5] # -p - t1 = sqr2i*vecds[0] - t2 = sqr6i*vecds[1] + t1 = sqr2i * vecds[0] + t2 = sqr6i * vecds[1] A[0, 0] = t1 - t2 + Akk_by_3 A[1, 1] = -t1 - t2 + Akk_by_3 - A[2, 2] = sqr2b3*vecds[1] + Akk_by_3 + A[2, 2] = sqr2b3 * vecds[1] + Akk_by_3 A[1, 0] = vecds[2] * sqr2i A[2, 0] = vecds[3] * sqr2i A[2, 1] = vecds[4] * sqr2i @@ -789,7 +784,7 @@ def symmToVecds(A): """convert from symmetry matrix to vecds representation""" vecds = np.zeros(6, dtype='float64') vecds[0] = sqr2i * (A[0, 0] - A[1, 1]) - vecds[1] = sqr6i * (2. * A[2, 2] - A[0, 0] - A[1, 1]) + vecds[1] = sqr6i * (2.0 * A[2, 2] - A[0, 0] - A[1, 1]) vecds[2] = sqr2 * A[1, 0] vecds[3] = sqr2 * A[2, 0] vecds[4] = sqr2 * A[2, 1] @@ -834,15 +829,16 @@ def solve_wahba(v, w, weights=None): # compute weighted outer product sum B = np.zeros((3, 3)) for i in range(n_vecs): - B += weights[i]*np.dot(w[i].reshape(3, 1), v[i].reshape(1, 3)) + B += weights[i] * np.dot(w[i].reshape(3, 1), v[i].reshape(1, 3)) # compute svd Us, _, VsT = svd(B) # form diagonal matrix for solution - M = np.diag([1., 1., np.linalg.det(Us)*np.linalg.det(VsT)]) + M = np.diag([1.0, 1.0, np.linalg.det(Us) * np.linalg.det(VsT)]) return np.dot(Us, np.dot(M, VsT)) + # ============================================================================= # Numba-fied frame cache writer # ============================================================================= diff --git a/hexrd/extensions/__init__.py b/hexrd/core/projections/__init__.py similarity index 100% rename from hexrd/extensions/__init__.py rename to hexrd/core/projections/__init__.py diff --git a/hexrd/projections/polar.py b/hexrd/core/projections/polar.py similarity index 85% rename from hexrd/projections/polar.py rename to hexrd/core/projections/polar.py index d8f171a2b..c8129e0a7 100644 --- a/hexrd/projections/polar.py +++ b/hexrd/core/projections/polar.py @@ -2,14 +2,14 @@ import numpy as np -from hexrd import constants -from hexrd.instrument.detector import _interpolate_bilinear_in_place -from hexrd.material.crystallography import PlaneData -from hexrd.xrdutil.utils import ( +from hexrd.core import constants +from hexrd.core.instrument.detector import _interpolate_bilinear_in_place +from hexrd.core.material.crystallography import PlaneData +from hexrd.hed.xrdutil.utils import ( _project_on_detector_cylinder, _project_on_detector_plane, ) -from hexrd.utils.panel_buffer import panel_buffer_as_2d_array +from hexrd.core.utils.panel_buffer import panel_buffer_as_2d_array class PolarView: @@ -17,10 +17,15 @@ class PolarView: Create (two-theta, eta) plot of detector images. """ - def __init__(self, plane_data, instrument, - eta_min=0., eta_max=360., - pixel_size=(0.1, 0.25), - cache_coordinate_map=False): + def __init__( + self, + plane_data, + instrument, + eta_min=0.0, + eta_max=360.0, + pixel_size=(0.1, 0.25), + cache_coordinate_map=False, + ): """ Instantiates a PolarView class. @@ -32,7 +37,7 @@ def __init__(self, plane_data, instrument, as defined but the active hkls and the tThWidth (or strainMag). If array_like, the input must be (2, ) specifying the [min, maz] 2theta values explicitly in degrees. - instrument : hexrd.instrument.HEDMInstrument + instrument : hexrd.core.instrument.HEDMInstrument The instruemnt object. eta_min : scalar, optional The minimum azimuthal extent in degrees. The default is 0. @@ -72,8 +77,9 @@ def __init__(self, plane_data, instrument, self._eta_min = np.radians(eta_min) self._eta_max = np.radians(eta_max) - assert np.all(np.asarray(pixel_size) > 0), \ - 'pixel sizes must be non-negative' + assert np.all( + np.asarray(pixel_size) > 0 + ), 'pixel sizes must be non-negative' self._tth_pixel_size = pixel_size[0] self._eta_pixel_size = pixel_size[1] @@ -175,12 +181,12 @@ def eta_pixel_size(self, x): @property def ntth(self): # return int(np.ceil(np.degrees(self.tth_range)/self.tth_pixel_size)) - return int(round(np.degrees(self.tth_range)/self.tth_pixel_size)) + return int(round(np.degrees(self.tth_range) / self.tth_pixel_size)) @property def neta(self): # return int(np.ceil(np.degrees(self.eta_range)/self.eta_pixel_size)) - return int(round(np.degrees(self.eta_range)/self.eta_pixel_size)) + return int(round(np.degrees(self.eta_range) / self.eta_pixel_size)) @property def shape(self): @@ -188,19 +194,29 @@ def shape(self): @property def angular_grid(self): - tth_vec = np.radians(self.tth_pixel_size*(np.arange(self.ntth)))\ - + self.tth_min + 0.5*np.radians(self.tth_pixel_size) - eta_vec = np.radians(self.eta_pixel_size*(np.arange(self.neta)))\ - + self.eta_min + 0.5*np.radians(self.eta_pixel_size) + tth_vec = ( + np.radians(self.tth_pixel_size * (np.arange(self.ntth))) + + self.tth_min + + 0.5 * np.radians(self.tth_pixel_size) + ) + eta_vec = ( + np.radians(self.eta_pixel_size * (np.arange(self.neta))) + + self.eta_min + + 0.5 * np.radians(self.eta_pixel_size) + ) return np.meshgrid(eta_vec, tth_vec, indexing='ij') @property def extent(self): ev, tv = self.angular_grid - heps = np.radians(0.5*self.eta_pixel_size) - htps = np.radians(0.5*self.tth_pixel_size) - return [np.min(tv) - htps, np.max(tv) + htps, - np.max(ev) + heps, np.min(ev) - heps] + heps = np.radians(0.5 * self.eta_pixel_size) + htps = np.radians(0.5 * self.tth_pixel_size) + return [ + np.min(tv) - htps, + np.max(tv) + htps, + np.max(ev) + heps, + np.min(ev) - heps, + ] def _func_project_on_detector(self, detector): ''' @@ -214,32 +230,37 @@ def _func_project_on_detector(self, detector): def _args_project_on_detector(self, gvec_angs, detector): kwargs = {'beamVec': detector.bvec} - arg = (gvec_angs, - detector.rmat, - constants.identity_3x3, - self.chi, - detector.tvec, - constants.zeros_3, - self.tvec, - detector.distortion) + arg = ( + gvec_angs, + detector.rmat, + constants.identity_3x3, + self.chi, + detector.tvec, + constants.zeros_3, + self.tvec, + detector.distortion, + ) if detector.detector_type == 'cylindrical': - arg = (gvec_angs, - self.chi, - detector.tvec, - detector.caxis, - detector.paxis, - detector.radius, - detector.physical_size, - detector.angle_extent, - detector.distortion) + arg = ( + gvec_angs, + self.chi, + detector.tvec, + detector.caxis, + detector.paxis, + detector.radius, + detector.physical_size, + detector.angle_extent, + detector.distortion, + ) return arg, kwargs # ========================================================================= # ####### METHODS ####### # ========================================================================= - def warp_image(self, image_dict, pad_with_nans=False, - do_interpolation=True): + def warp_image( + self, image_dict, pad_with_nans=False, do_interpolation=True + ): """ Performs the polar mapping of the input images. @@ -306,22 +327,21 @@ def _generate_coordinate_mapping(self) -> dict[str, dict[str, np.ndarray]]: respective arrays as the values. """ angpts = self.angular_grid - dummy_ome = np.zeros((self.ntth*self.neta)) + dummy_ome = np.zeros((self.ntth * self.neta)) mapping = {} for detector_id, panel in self.detectors.items(): _project_on_detector = self._func_project_on_detector(panel) - gvec_angs = np.vstack([ - angpts[1].flatten(), - angpts[0].flatten(), - dummy_ome]).T + gvec_angs = np.vstack( + [angpts[1].flatten(), angpts[0].flatten(), dummy_ome] + ).T - args, kwargs = self._args_project_on_detector(gvec_angs, - panel) + args, kwargs = self._args_project_on_detector(gvec_angs, panel) - xypts = np.nan*np.ones((len(gvec_angs), 2)) - valid_xys, rmats_s, on_plane = _project_on_detector(*args, - **kwargs) + xypts = np.nan * np.ones((len(gvec_angs), 2)) + valid_xys, rmats_s, on_plane = _project_on_detector( + *args, **kwargs + ) xypts[on_plane, :] = valid_xys _, on_panel = panel.clip_to_panel(xypts, buffer_edges=True) diff --git a/hexrd/projections/spherical.py b/hexrd/core/projections/spherical.py similarity index 59% rename from hexrd/projections/spherical.py rename to hexrd/core/projections/spherical.py index e0a35fe56..0ca57acdd 100644 --- a/hexrd/projections/spherical.py +++ b/hexrd/core/projections/spherical.py @@ -1,20 +1,28 @@ import numpy as np from skimage.transform import PiecewiseAffineTransform, warp -from hexrd import constants -from hexrd.xrdutils.util import zproject_sph_angles +from hexrd.core import constants + +# TODO: Resolve extra-core-dependency +from hexrd.hedm.xrdutil.utils import zproject_sph_angles class SphericalView: """ Creates a spherical mapping of detector images. """ + MAPPING_TYPES = ('stereographic', 'equal-area') VECTOR_TYPES = ('d', 'q') - PROJ_IMG_DIM = 3. # 2*np.sqrt(2) rounded up - - def __init__(self, mapping='stereographic', vector_type='d', - output_dim=512, rmat=constants.identity_3x3): + PROJ_IMG_DIM = 3.0 # 2*np.sqrt(2) rounded up + + def __init__( + self, + mapping='stereographic', + vector_type='d', + output_dim=512, + rmat=constants.identity_3x3, + ): self._mapping = mapping self._vector_type = vector_type @@ -61,8 +69,10 @@ def rmat(self): def rmat(self, x): x = np.atleast_2d(x) assert x.shape == (3, 3), "rmat must be (3, 3)" - assert np.linalg.norm(np.dot(x.T, x) - constants.identity_3x3) \ - < constants.ten_epsf, "input matrix is not orthogonal" + assert ( + np.linalg.norm(np.dot(x.T, x) - constants.identity_3x3) + < constants.ten_epsf + ), "input matrix is not orthogonal" self._rmat = x def warp_eta_ome_map(self, eta_ome, map_ids=None, skip=10): @@ -79,14 +89,14 @@ def warp_eta_ome_map(self, eta_ome, map_ids=None, skip=10): etas = eta_ome.etas[::skip] # make grid of angular values - op, ep = np.meshgrid(omes, - etas, - indexing='ij') + op, ep = np.meshgrid(omes, etas, indexing='ij') # make grid of output pixel values - oc, ec = np.meshgrid(np.arange(nrows_in)[::skip], - np.arange(ncols_in)[::skip], - indexing='ij') + oc, ec = np.meshgrid( + np.arange(nrows_in)[::skip], + np.arange(ncols_in)[::skip], + indexing='ij', + ) ps = self.PROJ_IMG_DIM / self.output_dim # output pixel size @@ -98,30 +108,45 @@ def warp_eta_ome_map(self, eta_ome, map_ids=None, skip=10): img = eta_ome.dataStore[map_id] # ??? do we need to use iHKLlist? - angs = np.vstack([ - tths[map_id]*np.ones_like(ep.flatten()), - ep.flatten(), - op.flatten() - ]).T + angs = np.vstack( + [ + tths[map_id] * np.ones_like(ep.flatten()), + ep.flatten(), + op.flatten(), + ] + ).T ppts, nmask = zproject_sph_angles( - angs, method=self.mapping, source=self.vector_type, - invert_z=self.invert_z, use_mask=True + angs, + method=self.mapping, + source=self.vector_type, + invert_z=self.invert_z, + use_mask=True, ) # pixel coords in output image - rp = 0.5*self.output_dim - ppts[:, 1]/ps - cp = ppts[:, 0]/ps + 0.5*self.output_dim + rp = 0.5 * self.output_dim - ppts[:, 1] / ps + cp = ppts[:, 0] / ps + 0.5 * self.output_dim # compute piecewise affine transform - src = np.vstack([ec.flatten(), oc.flatten(), ]).T - dst = np.vstack([cp.flatten(), rp.flatten(), ]).T + src = np.vstack( + [ + ec.flatten(), + oc.flatten(), + ] + ).T + dst = np.vstack( + [ + cp.flatten(), + rp.flatten(), + ] + ).T paxf.estimate(src, dst) wimg = warp( img, inverse_map=paxf.inverse, - output_shape=(self.output_dim, self.output_dim) + output_shape=(self.output_dim, self.output_dim), ) if len(map_ids) == 1: return wimg @@ -144,38 +169,49 @@ def warp_polar_image(self, pimg, skip=10): tth_cen = np.array(pimg['tth_coordinates'])[0, :] eta_cen = np.array(pimg['eta_coordinates'])[:, 0] - tp, ep = np.meshgrid(tth_cen[::skip], - eta_cen[::skip]) - tc, ec = np.meshgrid(np.arange(ncols_in)[::skip], - np.arange(nrows_in)[::skip]) + tp, ep = np.meshgrid(tth_cen[::skip], eta_cen[::skip]) + tc, ec = np.meshgrid( + np.arange(ncols_in)[::skip], np.arange(nrows_in)[::skip] + ) op = np.zeros_like(tp.flatten()) angs = np.radians( - np.vstack([tp.flatten(), - ep.flatten(), - op.flatten()]).T + np.vstack([tp.flatten(), ep.flatten(), op.flatten()]).T ) ppts = zproject_sph_angles( - angs, method='stereographic', source='d', invert_z=self.invert_z, - rmat=self.rmat + angs, + method='stereographic', + source='d', + invert_z=self.invert_z, + rmat=self.rmat, ) # output pixel size ps = self.PROJ_IMG_DIM / self.output_dim # pixel coords in output image - rp = 0.5*self.output_dim - ppts[:, 1]/ps - cp = ppts[:, 0]/ps + 0.5*self.output_dim - - src = np.vstack([tc.flatten(), ec.flatten(), ]).T - dst = np.vstack([cp.flatten(), rp.flatten(), ]).T + rp = 0.5 * self.output_dim - ppts[:, 1] / ps + cp = ppts[:, 0] / ps + 0.5 * self.output_dim + + src = np.vstack( + [ + tc.flatten(), + ec.flatten(), + ] + ).T + dst = np.vstack( + [ + cp.flatten(), + rp.flatten(), + ] + ).T paxf.estimate(src, dst) wimg = warp( img, inverse_map=paxf.inverse, - output_shape=(self.output_dim, self.output_dim) + output_shape=(self.output_dim, self.output_dim), ) return wimg diff --git a/hexrd/resources/Anomalous.h5 b/hexrd/core/resources/Anomalous.h5 similarity index 100% rename from hexrd/resources/Anomalous.h5 rename to hexrd/core/resources/Anomalous.h5 diff --git a/hexrd/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt b/hexrd/core/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt similarity index 100% rename from hexrd/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt rename to hexrd/core/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt diff --git a/hexrd/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt b/hexrd/core/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt similarity index 100% rename from hexrd/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt rename to hexrd/core/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt diff --git a/hexrd/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt b/hexrd/core/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt similarity index 100% rename from hexrd/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt rename to hexrd/core/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt diff --git a/hexrd/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt b/hexrd/core/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt similarity index 100% rename from hexrd/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt rename to hexrd/core/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt diff --git a/hexrd/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt b/hexrd/core/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt similarity index 100% rename from hexrd/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt rename to hexrd/core/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt diff --git a/hexrd/resources/FIDDLE_IMAGE-PLATE-1_bnd.txt b/hexrd/core/resources/FIDDLE_IMAGE-PLATE-1_bnd.txt similarity index 100% rename from hexrd/resources/FIDDLE_IMAGE-PLATE-1_bnd.txt rename to hexrd/core/resources/FIDDLE_IMAGE-PLATE-1_bnd.txt diff --git a/hexrd/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt b/hexrd/core/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt similarity index 100% rename from hexrd/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt rename to hexrd/core/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt diff --git a/hexrd/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt b/hexrd/core/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt similarity index 100% rename from hexrd/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt rename to hexrd/core/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt diff --git a/hexrd/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt b/hexrd/core/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt similarity index 100% rename from hexrd/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt rename to hexrd/core/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt diff --git a/hexrd/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt b/hexrd/core/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt similarity index 100% rename from hexrd/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt rename to hexrd/core/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt diff --git a/hexrd/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt b/hexrd/core/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt similarity index 100% rename from hexrd/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt rename to hexrd/core/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt diff --git a/hexrd/resources/TARDIS_IMAGE-PLATE-2_bnd.txt b/hexrd/core/resources/TARDIS_IMAGE-PLATE-2_bnd.txt similarity index 100% rename from hexrd/resources/TARDIS_IMAGE-PLATE-2_bnd.txt rename to hexrd/core/resources/TARDIS_IMAGE-PLATE-2_bnd.txt diff --git a/hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd.txt b/hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd.txt similarity index 100% rename from hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd.txt rename to hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd.txt diff --git a/hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt b/hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt similarity index 100% rename from hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt rename to hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt diff --git a/hexrd/resources/TARDIS_IMAGE-PLATE-4_bnd.txt b/hexrd/core/resources/TARDIS_IMAGE-PLATE-4_bnd.txt similarity index 100% rename from hexrd/resources/TARDIS_IMAGE-PLATE-4_bnd.txt rename to hexrd/core/resources/TARDIS_IMAGE-PLATE-4_bnd.txt diff --git a/hexrd/ipfcolor/__init__.py b/hexrd/core/resources/__init__.py similarity index 100% rename from hexrd/ipfcolor/__init__.py rename to hexrd/core/resources/__init__.py diff --git a/hexrd/resources/characteristic_xray_energies.h5 b/hexrd/core/resources/characteristic_xray_energies.h5 similarity index 100% rename from hexrd/resources/characteristic_xray_energies.h5 rename to hexrd/core/resources/characteristic_xray_energies.h5 diff --git a/hexrd/resources/detector_templates/GE-detector.yml b/hexrd/core/resources/detector_templates/GE-detector.yml similarity index 100% rename from hexrd/resources/detector_templates/GE-detector.yml rename to hexrd/core/resources/detector_templates/GE-detector.yml diff --git a/hexrd/resources/detector_templates/Hydra_Feb19.yml b/hexrd/core/resources/detector_templates/Hydra_Feb19.yml similarity index 100% rename from hexrd/resources/detector_templates/Hydra_Feb19.yml rename to hexrd/core/resources/detector_templates/Hydra_Feb19.yml diff --git a/hexrd/resources/detector_templates/Pilatus3X_2M-detector.yml b/hexrd/core/resources/detector_templates/Pilatus3X_2M-detector.yml similarity index 100% rename from hexrd/resources/detector_templates/Pilatus3X_2M-detector.yml rename to hexrd/core/resources/detector_templates/Pilatus3X_2M-detector.yml diff --git a/hexrd/resources/detector_templates/Pixirad2-detector.yml b/hexrd/core/resources/detector_templates/Pixirad2-detector.yml similarity index 100% rename from hexrd/resources/detector_templates/Pixirad2-detector.yml rename to hexrd/core/resources/detector_templates/Pixirad2-detector.yml diff --git a/hexrd/resources/detector_templates/Varex_4343CT-detector.yml b/hexrd/core/resources/detector_templates/Varex_4343CT-detector.yml similarity index 100% rename from hexrd/resources/detector_templates/Varex_4343CT-detector.yml rename to hexrd/core/resources/detector_templates/Varex_4343CT-detector.yml diff --git a/hexrd/preprocess/__init__.py b/hexrd/core/resources/detector_templates/__init__.py similarity index 100% rename from hexrd/preprocess/__init__.py rename to hexrd/core/resources/detector_templates/__init__.py diff --git a/hexrd/resources/detector_templates/dexela-2923-detector-subpanel.yml b/hexrd/core/resources/detector_templates/dexela-2923-detector-subpanel.yml similarity index 100% rename from hexrd/resources/detector_templates/dexela-2923-detector-subpanel.yml rename to hexrd/core/resources/detector_templates/dexela-2923-detector-subpanel.yml diff --git a/hexrd/resources/detector_templates/dexela-2923-detector.yml b/hexrd/core/resources/detector_templates/dexela-2923-detector.yml similarity index 100% rename from hexrd/resources/detector_templates/dexela-2923-detector.yml rename to hexrd/core/resources/detector_templates/dexela-2923-detector.yml diff --git a/hexrd/resources/fiddle_reference_config.yml b/hexrd/core/resources/fiddle_reference_config.yml similarity index 100% rename from hexrd/resources/fiddle_reference_config.yml rename to hexrd/core/resources/fiddle_reference_config.yml diff --git a/hexrd/projections/__init__.py b/hexrd/core/resources/instrument_templates/__init__.py similarity index 100% rename from hexrd/projections/__init__.py rename to hexrd/core/resources/instrument_templates/__init__.py diff --git a/hexrd/resources/instrument_templates/dcs.yml b/hexrd/core/resources/instrument_templates/dcs.yml similarity index 100% rename from hexrd/resources/instrument_templates/dcs.yml rename to hexrd/core/resources/instrument_templates/dcs.yml diff --git a/hexrd/resources/instrument_templates/dual_dexelas.yml b/hexrd/core/resources/instrument_templates/dual_dexelas.yml similarity index 100% rename from hexrd/resources/instrument_templates/dual_dexelas.yml rename to hexrd/core/resources/instrument_templates/dual_dexelas.yml diff --git a/hexrd/resources/instrument_templates/rigaku.hexrd b/hexrd/core/resources/instrument_templates/rigaku.hexrd similarity index 100% rename from hexrd/resources/instrument_templates/rigaku.hexrd rename to hexrd/core/resources/instrument_templates/rigaku.hexrd diff --git a/hexrd/resources/instrument_templates/varex.yml b/hexrd/core/resources/instrument_templates/varex.yml similarity index 100% rename from hexrd/resources/instrument_templates/varex.yml rename to hexrd/core/resources/instrument_templates/varex.yml diff --git a/hexrd/resources/mu_en.h5 b/hexrd/core/resources/mu_en.h5 similarity index 100% rename from hexrd/resources/mu_en.h5 rename to hexrd/core/resources/mu_en.h5 diff --git a/hexrd/resources/pinhole_materials.h5 b/hexrd/core/resources/pinhole_materials.h5 similarity index 100% rename from hexrd/resources/pinhole_materials.h5 rename to hexrd/core/resources/pinhole_materials.h5 diff --git a/hexrd/resources/pxrdip_reference_config.yml b/hexrd/core/resources/pxrdip_reference_config.yml old mode 100755 new mode 100644 similarity index 100% rename from hexrd/resources/pxrdip_reference_config.yml rename to hexrd/core/resources/pxrdip_reference_config.yml diff --git a/hexrd/resources/tardis_2xrs_reference_config.yml b/hexrd/core/resources/tardis_2xrs_reference_config.yml similarity index 100% rename from hexrd/resources/tardis_2xrs_reference_config.yml rename to hexrd/core/resources/tardis_2xrs_reference_config.yml diff --git a/hexrd/resources/tardis_reference_config.yml b/hexrd/core/resources/tardis_reference_config.yml similarity index 100% rename from hexrd/resources/tardis_reference_config.yml rename to hexrd/core/resources/tardis_reference_config.yml diff --git a/hexrd/resources/window_materials.h5 b/hexrd/core/resources/window_materials.h5 similarity index 100% rename from hexrd/resources/window_materials.h5 rename to hexrd/core/resources/window_materials.h5 diff --git a/hexrd/rotations.py b/hexrd/core/rotations.py similarity index 97% rename from hexrd/rotations.py rename to hexrd/core/rotations.py index bf8cfc828..8e8fc4c27 100644 --- a/hexrd/rotations.py +++ b/hexrd/core/rotations.py @@ -35,16 +35,16 @@ from scipy.optimize import leastsq from scipy.spatial.transform import Rotation as R -from hexrd.deprecation import deprecated -from hexrd import constants as cnst -from hexrd.matrixutil import ( +from hexrd.core.deprecation import deprecated +from hexrd.core import constants as cnst +from hexrd.core.matrixutil import ( columnNorm, unitVector, findDuplicateVectors, multMatArray, nullSpace, ) -from hexrd.utils.warnings import ignore_warnings +from hexrd.core.utils.warnings import ignore_warnings # ============================================================================= @@ -95,6 +95,7 @@ def arccosSafe(cosines): raise RuntimeError("unrecoverable error") return np.arccos(np.clip(cosines, -1.0, 1.0)) + # # ==================== Quaternions # @@ -127,7 +128,7 @@ def fixQuat(q): qfix = unitVector(q) - q0negative = qfix[0, ] < 0 + q0negative = qfix[0,] < 0 qfix[:, q0negative] = -1 * qfix[:, q0negative] if qdims == 3: @@ -316,15 +317,47 @@ def quatProductMatrix(quats, mult='right'): q2 = quats[2, :].copy() q3 = quats[3, :].copy() if mult == 'right': - qmats = np.array([[q0], [q1], [q2], [q3], - [-q1], [q0], [-q3], [q2], - [-q2], [q3], [q0], [-q1], - [-q3], [-q2], [q1], [q0]]) + qmats = np.array( + [ + [q0], + [q1], + [q2], + [q3], + [-q1], + [q0], + [-q3], + [q2], + [-q2], + [q3], + [q0], + [-q1], + [-q3], + [-q2], + [q1], + [q0], + ] + ) elif mult == 'left': - qmats = np.array([[q0], [q1], [q2], [q3], - [-q1], [q0], [q3], [-q2], - [-q2], [-q3], [q0], [q1], - [-q3], [q2], [-q1], [q0]]) + qmats = np.array( + [ + [q0], + [q1], + [q2], + [q3], + [-q1], + [q0], + [q3], + [-q2], + [-q2], + [-q3], + [q0], + [q1], + [-q3], + [q2], + [-q1], + [q0], + ] + ) # some fancy reshuffling... qmats = qmats.T.reshape((nq, 4, 4)).transpose(0, 2, 1) return qmats @@ -637,8 +670,7 @@ def angleAxisOfRotMat(rot_mat): else: raise RuntimeError( "rot_mat array must be (3, 3) or (n, 3, 3); " - "input has dimension %d" - % (rdim) + "input has dimension %d" % (rdim) ) rot_vec = R.from_matrix(rot_mat).as_rotvec() @@ -1095,9 +1127,7 @@ def mapAngle(ang, ang_range=None, units=angularUnits): elif units.lower() == 'radians': period = 2.0 * np.pi else: - raise RuntimeError( - "unknown angular units: " + units - ) + raise RuntimeError("unknown angular units: " + units) ang = np.nan_to_num(np.atleast_1d(np.float_(ang))) @@ -1111,7 +1141,7 @@ def mapAngle(ang, ang_range=None, units=angularUnits): min_val = ang_range.min() max_val = ang_range.max() - if not np.allclose(max_val-min_val, period): + if not np.allclose(max_val - min_val, period): raise RuntimeError('range is incomplete!') val = np.mod(ang - min_val, max_val - min_val) + min_val @@ -1504,8 +1534,8 @@ def quatOfLaueGroup(tag): + "Oh, and have a great day ;-)" ) - angle = angleAxis[0, ] - axis = angleAxis[1:, ] + angle = angleAxis[0,] + axis = angleAxis[1:,] # Note: Axis does not need to be normalized in call to quatOfAngleAxis # 05/01/2014 JVB -- made output a contiguous C-ordered array diff --git a/hexrd/transforms/Makefile b/hexrd/core/transforms/Makefile similarity index 100% rename from hexrd/transforms/Makefile rename to hexrd/core/transforms/Makefile diff --git a/hexrd/transforms/__init__.py b/hexrd/core/transforms/__init__.py similarity index 82% rename from hexrd/transforms/__init__.py rename to hexrd/core/transforms/__init__.py index a39f26377..12ff60c85 100644 --- a/hexrd/transforms/__init__.py +++ b/hexrd/core/transforms/__init__.py @@ -1,29 +1,28 @@ # ============================================================ -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. # All rights reserved. -# +# # This file is part of HEXRD. For details on dowloading the source, # see the file COPYING. -# +# # Please also see the file LICENSE. -# +# # This program is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License (as published by the Free Software # Foundation) version 2.1 dated February 1999. -# +# # This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA or visit . # ============================================================ -"""Tools or X-ray diffraction analysis -""" +"""Tools or X-ray diffraction analysis""" from . import xfcapi diff --git a/hexrd/transforms/cpp_sublibrary/Makefile b/hexrd/core/transforms/cpp_sublibrary/Makefile similarity index 100% rename from hexrd/transforms/cpp_sublibrary/Makefile rename to hexrd/core/transforms/cpp_sublibrary/Makefile diff --git a/hexrd/transforms/cpp_sublibrary/src/inverse_distortion.cpp b/hexrd/core/transforms/cpp_sublibrary/src/inverse_distortion.cpp similarity index 100% rename from hexrd/transforms/cpp_sublibrary/src/inverse_distortion.cpp rename to hexrd/core/transforms/cpp_sublibrary/src/inverse_distortion.cpp diff --git a/hexrd/transforms/cpp_sublibrary/src/transforms.cpp b/hexrd/core/transforms/cpp_sublibrary/src/transforms.cpp similarity index 100% rename from hexrd/transforms/cpp_sublibrary/src/transforms.cpp rename to hexrd/core/transforms/cpp_sublibrary/src/transforms.cpp diff --git a/hexrd/transforms/debug_helpers.h b/hexrd/core/transforms/debug_helpers.h similarity index 100% rename from hexrd/transforms/debug_helpers.h rename to hexrd/core/transforms/debug_helpers.h diff --git a/hexrd/transforms/new_capi/README.md b/hexrd/core/transforms/new_capi/README.md similarity index 100% rename from hexrd/transforms/new_capi/README.md rename to hexrd/core/transforms/new_capi/README.md diff --git a/hexrd/transforms/new_capi/angles_to_dvec.c b/hexrd/core/transforms/new_capi/angles_to_dvec.c similarity index 100% rename from hexrd/transforms/new_capi/angles_to_dvec.c rename to hexrd/core/transforms/new_capi/angles_to_dvec.c diff --git a/hexrd/transforms/new_capi/angles_to_gvec.c b/hexrd/core/transforms/new_capi/angles_to_gvec.c similarity index 100% rename from hexrd/transforms/new_capi/angles_to_gvec.c rename to hexrd/core/transforms/new_capi/angles_to_gvec.c diff --git a/hexrd/transforms/new_capi/gvec_to_xy.c b/hexrd/core/transforms/new_capi/gvec_to_xy.c similarity index 100% rename from hexrd/transforms/new_capi/gvec_to_xy.c rename to hexrd/core/transforms/new_capi/gvec_to_xy.c diff --git a/hexrd/transforms/new_capi/make_beam_rmat.c b/hexrd/core/transforms/new_capi/make_beam_rmat.c similarity index 100% rename from hexrd/transforms/new_capi/make_beam_rmat.c rename to hexrd/core/transforms/new_capi/make_beam_rmat.c diff --git a/hexrd/transforms/new_capi/make_binary_rmat.c b/hexrd/core/transforms/new_capi/make_binary_rmat.c similarity index 100% rename from hexrd/transforms/new_capi/make_binary_rmat.c rename to hexrd/core/transforms/new_capi/make_binary_rmat.c diff --git a/hexrd/transforms/new_capi/make_detector_rmat.c b/hexrd/core/transforms/new_capi/make_detector_rmat.c similarity index 100% rename from hexrd/transforms/new_capi/make_detector_rmat.c rename to hexrd/core/transforms/new_capi/make_detector_rmat.c diff --git a/hexrd/transforms/new_capi/make_rmat_of_expmap.c b/hexrd/core/transforms/new_capi/make_rmat_of_expmap.c similarity index 100% rename from hexrd/transforms/new_capi/make_rmat_of_expmap.c rename to hexrd/core/transforms/new_capi/make_rmat_of_expmap.c diff --git a/hexrd/transforms/new_capi/make_sample_rmat.c b/hexrd/core/transforms/new_capi/make_sample_rmat.c similarity index 100% rename from hexrd/transforms/new_capi/make_sample_rmat.c rename to hexrd/core/transforms/new_capi/make_sample_rmat.c diff --git a/hexrd/transforms/new_capi/module.c b/hexrd/core/transforms/new_capi/module.c similarity index 100% rename from hexrd/transforms/new_capi/module.c rename to hexrd/core/transforms/new_capi/module.c diff --git a/hexrd/transforms/new_capi/ndargs_helper.c b/hexrd/core/transforms/new_capi/ndargs_helper.c similarity index 100% rename from hexrd/transforms/new_capi/ndargs_helper.c rename to hexrd/core/transforms/new_capi/ndargs_helper.c diff --git a/hexrd/transforms/new_capi/ndargs_helper.h b/hexrd/core/transforms/new_capi/ndargs_helper.h similarity index 100% rename from hexrd/transforms/new_capi/ndargs_helper.h rename to hexrd/core/transforms/new_capi/ndargs_helper.h diff --git a/hexrd/transforms/new_capi/new_func.c b/hexrd/core/transforms/new_capi/new_func.c similarity index 100% rename from hexrd/transforms/new_capi/new_func.c rename to hexrd/core/transforms/new_capi/new_func.c diff --git a/hexrd/transforms/new_capi/oscill_angles_of_HKLs.c b/hexrd/core/transforms/new_capi/oscill_angles_of_HKLs.c similarity index 100% rename from hexrd/transforms/new_capi/oscill_angles_of_HKLs.c rename to hexrd/core/transforms/new_capi/oscill_angles_of_HKLs.c diff --git a/hexrd/transforms/new_capi/quat_distance.c b/hexrd/core/transforms/new_capi/quat_distance.c similarity index 100% rename from hexrd/transforms/new_capi/quat_distance.c rename to hexrd/core/transforms/new_capi/quat_distance.c diff --git a/hexrd/transforms/new_capi/reference.py b/hexrd/core/transforms/new_capi/reference.py similarity index 97% rename from hexrd/transforms/new_capi/reference.py rename to hexrd/core/transforms/new_capi/reference.py index e90e49e06..36dfe681e 100644 --- a/hexrd/transforms/new_capi/reference.py +++ b/hexrd/core/transforms/new_capi/reference.py @@ -4,6 +4,7 @@ # easily testable and clear. They will be use to test against in unit tests. # They may be slow and not vectorized. + def intersect_ray_plane(ro, rv, p): ''' ray-plane intersection @@ -51,6 +52,6 @@ def intersect_ray_plane(ro, rv, p): # the behavior of the function actually relies in IEEE754 with a division # by 0 generating the appropriate infinity, or a NAN if it is a 0/0. with np.errstate(divide='ignore', invalid='ignore'): - t = (D - normal@ro)/(normal@rv) + t = (D - normal @ ro) / (normal @ rv) return t diff --git a/hexrd/transforms/new_capi/rotate_vecs_about_axis.c b/hexrd/core/transforms/new_capi/rotate_vecs_about_axis.c similarity index 100% rename from hexrd/transforms/new_capi/rotate_vecs_about_axis.c rename to hexrd/core/transforms/new_capi/rotate_vecs_about_axis.c diff --git a/hexrd/transforms/new_capi/transforms_prototypes.h b/hexrd/core/transforms/new_capi/transforms_prototypes.h similarity index 100% rename from hexrd/transforms/new_capi/transforms_prototypes.h rename to hexrd/core/transforms/new_capi/transforms_prototypes.h diff --git a/hexrd/transforms/new_capi/transforms_types.h b/hexrd/core/transforms/new_capi/transforms_types.h similarity index 100% rename from hexrd/transforms/new_capi/transforms_types.h rename to hexrd/core/transforms/new_capi/transforms_types.h diff --git a/hexrd/transforms/new_capi/transforms_utils.h b/hexrd/core/transforms/new_capi/transforms_utils.h similarity index 100% rename from hexrd/transforms/new_capi/transforms_utils.h rename to hexrd/core/transforms/new_capi/transforms_utils.h diff --git a/hexrd/transforms/new_capi/unit_row_vector.c b/hexrd/core/transforms/new_capi/unit_row_vector.c similarity index 100% rename from hexrd/transforms/new_capi/unit_row_vector.c rename to hexrd/core/transforms/new_capi/unit_row_vector.c diff --git a/hexrd/transforms/new_capi/validate_angle_ranges.c b/hexrd/core/transforms/new_capi/validate_angle_ranges.c similarity index 100% rename from hexrd/transforms/new_capi/validate_angle_ranges.c rename to hexrd/core/transforms/new_capi/validate_angle_ranges.c diff --git a/hexrd/transforms/new_capi/xf_new_capi.py b/hexrd/core/transforms/new_capi/xf_new_capi.py similarity index 99% rename from hexrd/transforms/new_capi/xf_new_capi.py rename to hexrd/core/transforms/new_capi/xf_new_capi.py index 33f31da75..287a829c7 100644 --- a/hexrd/transforms/new_capi/xf_new_capi.py +++ b/hexrd/core/transforms/new_capi/xf_new_capi.py @@ -26,10 +26,10 @@ from typing import Optional, Tuple, Union import numpy as np -from hexrd.extensions import _new_transforms_capi as _impl -from hexrd.extensions import transforms as cpp_transforms -from hexrd.distortion.distortionabc import DistortionABC -from hexrd import constants as cnst +from hexrd.core.extensions import _new_transforms_capi as _impl +from hexrd.core.extensions import transforms as cpp_transforms +from hexrd.core.distortion.distortionabc import DistortionABC +from hexrd.core import constants as cnst def angles_to_gvec( diff --git a/hexrd/transforms/new_capi/xy_to_gvec.c b/hexrd/core/transforms/new_capi/xy_to_gvec.c similarity index 100% rename from hexrd/transforms/new_capi/xy_to_gvec.c rename to hexrd/core/transforms/new_capi/xy_to_gvec.c diff --git a/hexrd/transforms/old_xfcapi.py b/hexrd/core/transforms/old_xfcapi.py similarity index 83% rename from hexrd/transforms/old_xfcapi.py rename to hexrd/core/transforms/old_xfcapi.py index 1b328af6a..220094c68 100644 --- a/hexrd/transforms/old_xfcapi.py +++ b/hexrd/core/transforms/old_xfcapi.py @@ -29,28 +29,29 @@ import numpy as np import sys -from hexrd.extensions import _transforms_CAPI +from hexrd.core.extensions import _transforms_CAPI + # Imports so that others can import from this module -from hexrd.rotations import mapAngle -from hexrd.matrixutil import columnNorm, rowNorm +from hexrd.core.rotations import mapAngle +from hexrd.core.matrixutil import columnNorm, rowNorm # ###################################################################### # Module Data -epsf = np.finfo(float).eps # ~2.2e-16 -ten_epsf = 10 * epsf # ~2.2e-15 -sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 +epsf = np.finfo(float).eps # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 +sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 -periodDict = {'degrees': 360.0, 'radians': 2*np.pi} -angularUnits = 'radians' # module-level angle units +periodDict = {'degrees': 360.0, 'radians': 2 * np.pi} +angularUnits = 'radians' # module-level angle units # basis vectors -I3 = np.eye(3) # (3, 3) identity -Xl = np.ascontiguousarray(I3[:, 0].reshape(3, 1)) # X in the lab frame -Yl = np.ascontiguousarray(I3[:, 1].reshape(3, 1)) # Y in the lab frame -Zl = np.ascontiguousarray(I3[:, 2].reshape(3, 1)) # Z in the lab frame +I3 = np.eye(3) # (3, 3) identity +Xl = np.ascontiguousarray(I3[:, 0].reshape(3, 1)) # X in the lab frame +Yl = np.ascontiguousarray(I3[:, 1].reshape(3, 1)) # Y in the lab frame +Zl = np.ascontiguousarray(I3[:, 2].reshape(3, 1)) # Z in the lab frame # reference stretch -vInv_ref = np.array([[1., 1., 1., 0., 0., 0.]], order='C').T +vInv_ref = np.array([[1.0, 1.0, 1.0, 0.0, 0.0, 0.0]], order='C').T # reference beam direction and eta=0 ref in LAB FRAME for standard geometry bVec_ref = -Zl @@ -60,7 +61,7 @@ # Funtions -def anglesToGVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0., rMat_c=I3): +def anglesToGVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0.0, rMat_c=I3): """ from 'eta' frame out to lab (with handy kwargs to go to crystal or sample) @@ -72,12 +73,10 @@ def anglesToGVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0., rMat_c=I3): eHat_l = np.ascontiguousarray(eHat_l.flatten()) rMat_c = np.ascontiguousarray(rMat_c) chi = float(chi) - return _transforms_CAPI.anglesToGVec(angs, - bHat_l, eHat_l, - chi, rMat_c) + return _transforms_CAPI.anglesToGVec(angs, bHat_l, eHat_l, chi, rMat_c) -def anglesToDVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0., rMat_c=I3): +def anglesToDVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0.0, rMat_c=I3): """ from 'eta' frame out to lab (with handy kwargs to go to crystal or sample) @@ -89,9 +88,7 @@ def anglesToDVec(angs, bHat_l=bVec_ref, eHat_l=eta_ref, chi=0., rMat_c=I3): eHat_l = np.ascontiguousarray(eHat_l.flatten()) rMat_c = np.ascontiguousarray(rMat_c) chi = float(chi) - return _transforms_CAPI.anglesToDVec(angs, - bHat_l, eHat_l, - chi, rMat_c) + return _transforms_CAPI.anglesToDVec(angs, bHat_l, eHat_l, chi, rMat_c) def makeGVector(hkl, bMat): @@ -117,10 +114,9 @@ def makeGVector(hkl, bMat): return unitRowVector(np.dot(bMat, hkl)) -def gvecToDetectorXY(gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec=bVec_ref): +def gvecToDetectorXY( + gVec_c, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec=bVec_ref +): """ Takes a list of unit reciprocal lattice vectors in crystal frame to the specified detector-relative frame, subject to the conditions: @@ -156,16 +152,14 @@ def gvecToDetectorXY(gVec_c, tVec_s = np.ascontiguousarray(tVec_s.flatten()) tVec_c = np.ascontiguousarray(tVec_c.flatten()) beamVec = np.ascontiguousarray(beamVec.flatten()) - return _transforms_CAPI.gvecToDetectorXY(gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec) + return _transforms_CAPI.gvecToDetectorXY( + gVec_c, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec + ) -def gvecToDetectorXYArray(gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec=bVec_ref): +def gvecToDetectorXYArray( + gVec_c, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec=bVec_ref +): """ Takes a list of unit reciprocal lattice vectors in crystal frame to the specified detector-relative frame, subject to the conditions: @@ -201,16 +195,21 @@ def gvecToDetectorXYArray(gVec_c, tVec_s = np.ascontiguousarray(tVec_s.flatten()) tVec_c = np.ascontiguousarray(tVec_c.flatten()) beamVec = np.ascontiguousarray(beamVec.flatten()) - return _transforms_CAPI.gvecToDetectorXYArray(gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec) + return _transforms_CAPI.gvecToDetectorXYArray( + gVec_c, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec + ) -def detectorXYToGvec(xy_det, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - beamVec=bVec_ref, etaVec=eta_ref): +def detectorXYToGvec( + xy_det, + rMat_d, + rMat_s, + tVec_d, + tVec_s, + tVec_c, + beamVec=bVec_ref, + etaVec=eta_ref, +): """ Takes a list cartesian (x, y) pairs in the detector coordinates and calculates the associated reciprocal lattice (G) vectors and @@ -249,16 +248,21 @@ def detectorXYToGvec(xy_det, tVec_c = np.ascontiguousarray(tVec_c.flatten()) beamVec = np.ascontiguousarray(beamVec.flatten()) etaVec = np.ascontiguousarray(etaVec.flatten()) - return _transforms_CAPI.detectorXYToGvec(xy_det, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - beamVec, etaVec) + return _transforms_CAPI.detectorXYToGvec( + xy_det, rMat_d, rMat_s, tVec_d, tVec_s, tVec_c, beamVec, etaVec + ) -def detectorXYToGvecArray(xy_det, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - beamVec=bVec_ref, etaVec=eta_ref): +def detectorXYToGvecArray( + xy_det, + rMat_d, + rMat_s, + tVec_d, + tVec_s, + tVec_c, + beamVec=bVec_ref, + etaVec=eta_ref, +): """ Takes a list cartesian (x, y) pairs in the detector coordinates and calculates the associated reciprocal lattice (G) vectors and @@ -297,14 +301,21 @@ def detectorXYToGvecArray(xy_det, tVec_c = np.ascontiguousarray(tVec_c.flatten()) beamVec = np.ascontiguousarray(beamVec.flatten()) etaVec = np.ascontiguousarray(etaVec.flatten()) - return _transforms_CAPI.detectorXYToGvec(xy_det, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - beamVec, etaVec) + return _transforms_CAPI.detectorXYToGvec( + xy_det, rMat_d, rMat_s, tVec_d, tVec_s, tVec_c, beamVec, etaVec + ) -def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, - vInv=None, beamVec=bVec_ref, etaVec=eta_ref): +def oscillAnglesOfHKLs( + hkls, + chi, + rMat_c, + bMat, + wavelength, + vInv=None, + beamVec=bVec_ref, + etaVec=eta_ref, +): """ Takes a list of unit reciprocal lattice vectors in crystal frame to the specified detector-relative frame, subject to the conditions: @@ -416,8 +427,8 @@ def arccosSafe(temp): print("attempt to take arccos of %s" % temp, file=sys.stderr) raise RuntimeError("unrecoverable error") - gte1 = temp >= 1. - lte1 = temp <= -1. + gte1 = temp >= 1.0 + lte1 = temp <= -1.0 temp[gte1] = 1 temp[lte1] = -1 @@ -437,7 +448,7 @@ def angularDifference(angList0, angList1, units=angularUnits): # take difference as arrays diffAngles = np.atleast_1d(angList0) - np.atleast_1d(angList1) - return abs(np.remainder(diffAngles + 0.5*period, period) - 0.5*period) + return abs(np.remainder(diffAngles + 0.5 * period, period) - 0.5 * period) def unitRowVector(vecIn): @@ -447,8 +458,10 @@ def unitRowVector(vecIn): elif vecIn.ndim == 2: return _transforms_CAPI.unitRowVectors(vecIn) else: - assert vecIn.ndim in [1, 2], \ - "arg shape must be 1-d or 2-d, yours is %d-d" % (vecIn.ndim) + assert vecIn.ndim in [ + 1, + 2, + ], "arg shape must be 1-d or 2-d, yours is %d-d" % (vecIn.ndim) def makeDetectorRotMat(tiltAngles): @@ -536,5 +549,6 @@ def homochoricOfQuat(quats): q = np.ascontiguousarray(quats.T) return _transforms_CAPI.homochoricOfQuat(q) + # def rotateVecsAboutAxis(angle, axis, vecs): # return _transforms_CAPI.rotateVecsAboutAxis(angle, axis, vecs) diff --git a/hexrd/transforms/stdbool.h b/hexrd/core/transforms/stdbool.h similarity index 100% rename from hexrd/transforms/stdbool.h rename to hexrd/core/transforms/stdbool.h diff --git a/hexrd/transforms/transforms_CAPI.c b/hexrd/core/transforms/transforms_CAPI.c similarity index 100% rename from hexrd/transforms/transforms_CAPI.c rename to hexrd/core/transforms/transforms_CAPI.c diff --git a/hexrd/transforms/transforms_CAPI.h b/hexrd/core/transforms/transforms_CAPI.h similarity index 100% rename from hexrd/transforms/transforms_CAPI.h rename to hexrd/core/transforms/transforms_CAPI.h diff --git a/hexrd/transforms/transforms_CFUNC.c b/hexrd/core/transforms/transforms_CFUNC.c similarity index 100% rename from hexrd/transforms/transforms_CFUNC.c rename to hexrd/core/transforms/transforms_CFUNC.c diff --git a/hexrd/transforms/transforms_CFUNC.h b/hexrd/core/transforms/transforms_CFUNC.h similarity index 100% rename from hexrd/transforms/transforms_CFUNC.h rename to hexrd/core/transforms/transforms_CFUNC.h diff --git a/hexrd/transforms/xf.py b/hexrd/core/transforms/xf.py similarity index 99% rename from hexrd/transforms/xf.py rename to hexrd/core/transforms/xf.py index 74c62de70..32be30ff3 100644 --- a/hexrd/transforms/xf.py +++ b/hexrd/core/transforms/xf.py @@ -34,14 +34,17 @@ import scipy.sparse as sparse -from hexrd import matrixutil as mutil +from hexrd.core import matrixutil as mutil # Added to not break people importing these methods -from hexrd.rotations import (mapAngle, - quatProductMatrix as quat_product_matrix, - arccosSafe, angularDifference) -from hexrd.matrixutil import columnNorm, rowNorm +from hexrd.core.rotations import ( + mapAngle, + quatProductMatrix as quat_product_matrix, + arccosSafe, + angularDifference, +) +from hexrd.core.matrixutil import columnNorm, rowNorm # ============================================================================= diff --git a/hexrd/core/transforms/xfcapi.py b/hexrd/core/transforms/xfcapi.py new file mode 100644 index 000000000..1e96661b4 --- /dev/null +++ b/hexrd/core/transforms/xfcapi.py @@ -0,0 +1,43 @@ +# We will replace these functions with the new versions as we +# add and test them. +# NOTE: we are only importing what is currently being used in hexrd +# and hexrdgui. This is so that we can see clearly what is in use. +from .old_xfcapi import ( + anglesToDVec, + anglesToGVec, + detectorXYToGvec, + gvecToDetectorXY, + gvecToDetectorXYArray, + oscillAnglesOfHKLs, + angularDifference, + makeDetectorRotMat, + makeEtaFrameRotMat, + makeOscillRotMat, + makeOscillRotMatArray, + makeRotMatOfExpMap, + makeRotMatOfQuat, + mapAngle, + rowNorm, + unitRowVector, + bVec_ref, + eta_ref, + Xl, + Yl, +) + + +from .new_capi.xf_new_capi import ( + angles_to_dvec, + angles_to_gvec, + gvec_to_xy, + make_beam_rmat, + make_detector_rmat, + make_rmat_of_expmap, + make_sample_rmat, + oscill_angles_of_hkls, + quat_distance, + rotate_vecs_about_axis, + unit_vector, + validate_angle_ranges, + xy_to_gvec, +) diff --git a/hexrd/utils/__init__.py b/hexrd/core/utils/__init__.py similarity index 100% rename from hexrd/utils/__init__.py rename to hexrd/core/utils/__init__.py diff --git a/hexrd/utils/compatibility.py b/hexrd/core/utils/compatibility.py similarity index 100% rename from hexrd/utils/compatibility.py rename to hexrd/core/utils/compatibility.py diff --git a/hexrd/utils/concurrent.py b/hexrd/core/utils/concurrent.py similarity index 100% rename from hexrd/utils/concurrent.py rename to hexrd/core/utils/concurrent.py diff --git a/hexrd/utils/decorators.py b/hexrd/core/utils/decorators.py similarity index 94% rename from hexrd/utils/decorators.py rename to hexrd/core/utils/decorators.py index 81c254247..b9aa391eb 100644 --- a/hexrd/utils/decorators.py +++ b/hexrd/core/utils/decorators.py @@ -2,8 +2,8 @@ """Decorators that don't go anywhere else. This module contains decorators that don't really go with another module -in :mod:`hexrd.utils`. Before putting something here please see if it should -go into another topical module in :mod:`hexrd.utils`. +in :mod:`hexrd.core.utils`. Before putting something here please see if it should +go into another topical module in :mod:`hexrd.core.utils`. """ from collections import OrderedDict @@ -112,8 +112,7 @@ def convert(x): # Create an sha1 of the data, and throw in a string # and the shape. x = np.ascontiguousarray(x) - return ('__type_np.ndarray', x.shape, - xxhash.xxh3_128_hexdigest(x)) + return ('__type_np.ndarray', x.shape, xxhash.xxh3_128_hexdigest(x)) elif isinstance(x, (list, tuple)): return _make_hashable(x) elif isinstance(x, dict): diff --git a/hexrd/utils/hdf5.py b/hexrd/core/utils/hdf5.py similarity index 88% rename from hexrd/utils/hdf5.py rename to hexrd/core/utils/hdf5.py index f9380daa0..e95310169 100644 --- a/hexrd/utils/hdf5.py +++ b/hexrd/core/utils/hdf5.py @@ -1,4 +1,4 @@ -from hexrd.utils.compatibility import h5py_read_string +from hexrd.core.utils.compatibility import h5py_read_string import numpy as np @@ -45,8 +45,12 @@ def unwrap_dict_to_h5(grp, d, asattr=False): else: # probably a string badness - if isinstance(item, np.ndarray) and np.issubdtype(item.dtype, 'U'): - item = str(item) # hdf5 files do not support unicode arrays + if isinstance(item, np.ndarray) and np.issubdtype( + item.dtype, 'U' + ): + item = str( + item + ) # hdf5 files do not support unicode arrays grp.create_dataset(key, data=item) diff --git a/hexrd/utils/hkl.py b/hexrd/core/utils/hkl.py similarity index 80% rename from hexrd/utils/hkl.py rename to hexrd/core/utils/hkl.py index 10706c603..ef9a44155 100644 --- a/hexrd/utils/hkl.py +++ b/hexrd/core/utils/hkl.py @@ -1,6 +1,6 @@ import numpy as np -from hexrd.material.crystallography import hklToStr +from hexrd.core.material.crystallography import hklToStr def hkl_to_str(hkl): diff --git a/hexrd/utils/json.py b/hexrd/core/utils/json.py similarity index 92% rename from hexrd/utils/json.py rename to hexrd/core/utils/json.py index f56e707f1..f11f827ba 100644 --- a/hexrd/utils/json.py +++ b/hexrd/core/utils/json.py @@ -25,9 +25,7 @@ def default(self, obj): np.save(bytes_io, obj, allow_pickle=False) data = bytes_io.getvalue() - return { - ndarray_key: data.decode('raw_unicode_escape') - } + return {ndarray_key: data.decode('raw_unicode_escape')} return super().default(obj) diff --git a/hexrd/utils/multiprocess_generic.py b/hexrd/core/utils/multiprocess_generic.py similarity index 80% rename from hexrd/utils/multiprocess_generic.py rename to hexrd/core/utils/multiprocess_generic.py index f45a943e5..04a7a526f 100644 --- a/hexrd/utils/multiprocess_generic.py +++ b/hexrd/core/utils/multiprocess_generic.py @@ -17,8 +17,8 @@ def __init__(self): pass def _func_queue(self, func, q_in, q_out, *args, **kwargs): - """ - Retrive processes from the queue + """ + Retrive processes from the queue """ while True: pos, var = q_in.get() @@ -27,13 +27,18 @@ def _func_queue(self, func, q_in, q_out, *args, **kwargs): res = func(var, *args, **kwargs) q_out.put((pos, res)) - print("finished azimuthal position #", - pos, "with rwp = ", res[2]*100., "%") + print( + "finished azimuthal position #", + pos, + "with rwp = ", + res[2] * 100.0, + "%", + ) return def parallelise_function(self, var, func, *args, **kwargs): - """ - Split evaluations of func across processors + """ + Split evaluations of func across processors """ n = len(var) @@ -48,9 +53,9 @@ def parallelise_function(self, var, func, *args, **kwargs): for i in range(nprocs): pass_args = [func, q_in, q_out] - p = Process(target=self._func_queue, - args=tuple(pass_args), - kwargs=kwargs) + p = Process( + target=self._func_queue, args=tuple(pass_args), kwargs=kwargs + ) processes.append(p) diff --git a/hexrd/utils/panel_buffer.py b/hexrd/core/utils/panel_buffer.py similarity index 68% rename from hexrd/utils/panel_buffer.py rename to hexrd/core/utils/panel_buffer.py index 273eca27d..f88b408d7 100644 --- a/hexrd/utils/panel_buffer.py +++ b/hexrd/core/utils/panel_buffer.py @@ -1,6 +1,6 @@ import numpy as np -from hexrd.instrument.detector import Detector +from hexrd.core.instrument.detector import Detector def panel_buffer_as_2d_array(panel: Detector) -> np.ndarray: @@ -11,18 +11,22 @@ def panel_buffer_as_2d_array(panel: Detector) -> np.ndarray: elif panel.panel_buffer.shape == (2,): # The two floats are specifying the borders in mm for x and y. # Convert to pixel borders. Swap x and y so we have i, j in pixels. - borders = np.round([ - panel.panel_buffer[1] / panel.pixel_size_row, - panel.panel_buffer[0] / panel.pixel_size_col, - ]).astype(int) + borders = np.round( + [ + panel.panel_buffer[1] / panel.pixel_size_row, + panel.panel_buffer[0] / panel.pixel_size_col, + ] + ).astype(int) # Convert to array panel_buffer = np.zeros(panel.shape, dtype=bool) # We can't do `-borders[i]` since that doesn't work for 0, # so we must do `panel.shape[i] - borders[i]` instead. - panel_buffer[borders[0]:panel.shape[0] - borders[0], - borders[1]:panel.shape[1] - borders[1]] = True + panel_buffer[ + borders[0] : panel.shape[0] - borders[0], + borders[1] : panel.shape[1] - borders[1], + ] = True return panel_buffer elif panel.panel_buffer.ndim == 2: return panel.panel_buffer diff --git a/hexrd/utils/profiler.py b/hexrd/core/utils/profiler.py similarity index 91% rename from hexrd/utils/profiler.py rename to hexrd/core/utils/profiler.py index aeb9076ac..142ddc56c 100644 --- a/hexrd/utils/profiler.py +++ b/hexrd/core/utils/profiler.py @@ -21,7 +21,6 @@ pass - def instrument_function(fn_desc): """Interpret a record for an instrumented function, and instrument accordingly. The record, fn_desc, contains: @@ -48,7 +47,7 @@ def instrument_function(fn_desc): # consume as many as possible with import (ignore last part that is # the function name) pos = 0 - for i in range(1, path_parts+1): + for i in range(1, path_parts + 1): try: m = importlib.import_module('.'.join(parts[0:i])) pos = i @@ -71,11 +70,11 @@ def instrument_function(fn_desc): warnings.warn('Could not instrument "{0}"'.format(full_name)) - def parse_file(filename): """Parse a file and instrument the associated functions""" try: import yaml + with open(filename, 'r') as f: cfg = yaml.load(f) @@ -87,8 +86,10 @@ def parse_file(filename): profile_cfg = cfg['profile'] if 'instrument' in profile_cfg: # instrument all - [instrument_function(fn_desc) for fn_desc in - profile_cfg['instrument']] + [ + instrument_function(fn_desc) + for fn_desc in profile_cfg['instrument'] + ] except Exception as e: msg = 'Failed to include profile file: {0}' warnings.warn(msg.format(filename)) @@ -96,18 +97,17 @@ def parse_file(filename): def instrument_all(filenames): - """Instrument functions based on a list of profiler configuration files. - - """ + """Instrument functions based on a list of profiler configuration files.""" [parse_file(filename) for filename in filenames] - + def dump_results(args): print(" STATS ".center(72, '=')) fmt = "{2:>14}, {1:>8}, {0:<40}" print(fmt.format("FUNCTION", "CALLS", "TIME")) fmt = "{2:>14F}, {1:>8}, {0:<40}" - sorted_by_time = sorted(nvtx.getstats().iteritems(), key=lambda tup: tup[1][1]) + sorted_by_time = sorted( + nvtx.getstats().iteritems(), key=lambda tup: tup[1][1] + ) for key, val in sorted_by_time: print(fmt.format(key, *val)) - diff --git a/hexrd/utils/progressbar.py b/hexrd/core/utils/progressbar.py similarity index 93% rename from hexrd/utils/progressbar.py rename to hexrd/core/utils/progressbar.py index af515b5a1..2f9c8bd66 100644 --- a/hexrd/utils/progressbar.py +++ b/hexrd/core/utils/progressbar.py @@ -6,12 +6,13 @@ class ProgressBar(_ProgressBar): "overriding the default to delete the progress bar when finished" + def finish(self): 'Puts the ProgressBar bar in the finished state.' self.finished = True self.update(self.maxval) # clear the progress bar: - self.fd.write('\r'+' '*self.term_width+'\r') + self.fd.write('\r' + ' ' * self.term_width + '\r') if self.signal_set: signal.signal(signal.SIGWINCH, signal.SIG_DFL) diff --git a/hexrd/utils/warnings.py b/hexrd/core/utils/warnings.py similarity index 100% rename from hexrd/utils/warnings.py rename to hexrd/core/utils/warnings.py diff --git a/hexrd/utils/yaml.py b/hexrd/core/utils/yaml.py similarity index 99% rename from hexrd/utils/yaml.py rename to hexrd/core/utils/yaml.py index 88555ec41..89f1eff11 100644 --- a/hexrd/utils/yaml.py +++ b/hexrd/core/utils/yaml.py @@ -11,6 +11,7 @@ class NumpyToNativeDumper(yaml.SafeDumper): For instance, np.float128 will raise an error, since it cannot be converted to a basic type. """ + def represent_data(self, data): if isinstance(data, np.ndarray): return self.represent_list(data.tolist()) diff --git a/hexrd/valunits.py b/hexrd/core/valunits.py similarity index 82% rename from hexrd/valunits.py rename to hexrd/core/valunits.py index 3dd0b9462..e0d00dbe7 100644 --- a/hexrd/valunits.py +++ b/hexrd/core/valunits.py @@ -37,7 +37,7 @@ import doctest import math -from hexrd.constants import keVToAngstrom +from hexrd.core.constants import keVToAngstrom __all__ = ['valWUnit', 'toFloat', 'valWithDflt'] @@ -58,6 +58,7 @@ class UNames(object): """Units used in this module""" + degrees = 'degrees' radians = 'radians' @@ -72,36 +73,31 @@ class UNames(object): cv_dict = { - (UNames.degrees, UNames.radians): math.pi/180.0, - (UNames.radians, UNames.degrees): 180/math.pi, - - (UNames.m, UNames.mm): 1.0e3, - (UNames.m, UNames.meter): 1.0, + (UNames.degrees, UNames.radians): math.pi / 180.0, + (UNames.radians, UNames.degrees): 180 / math.pi, + (UNames.m, UNames.mm): 1.0e3, + (UNames.m, UNames.meter): 1.0, (UNames.m, UNames.nm): 1.0e9, - (UNames.m, UNames.angstrom): 1.0e10, - - (UNames.meter, UNames.mm): 1.0e3, - (UNames.meter, UNames.m): 1.0, + (UNames.m, UNames.angstrom): 1.0e10, + (UNames.meter, UNames.mm): 1.0e3, + (UNames.meter, UNames.m): 1.0, (UNames.meter, UNames.nm): 1.0e9, - (UNames.meter, UNames.angstrom): 1.0e10, - - (UNames.mm, UNames.m): 1.0e-3, - (UNames.mm, UNames.meter): 1.0e-3, + (UNames.meter, UNames.angstrom): 1.0e10, + (UNames.mm, UNames.m): 1.0e-3, + (UNames.mm, UNames.meter): 1.0e-3, (UNames.mm, UNames.nm): 1.0e6, - (UNames.mm, UNames.angstrom): 1.0e7, - - (UNames.angstrom, UNames.m): 1.0e-10, - (UNames.angstrom, UNames.meter): 1.0e-10, - (UNames.angstrom, UNames.mm): 1.0e-7, + (UNames.mm, UNames.angstrom): 1.0e7, + (UNames.angstrom, UNames.m): 1.0e-10, + (UNames.angstrom, UNames.meter): 1.0e-10, + (UNames.angstrom, UNames.mm): 1.0e-7, (UNames.angstrom, UNames.nm): 1.0e-1, - (UNames.keV, UNames.J): 1.60217646e-16, - (UNames.J, UNames.keV): (1/1.60217646e-16) - } + (UNames.J, UNames.keV): (1 / 1.60217646e-16), +} class valWUnit: - "Value with units""" + "Value with units" "" def __init__(self, name, unitType, value, unit): """Initialization @@ -139,14 +135,15 @@ def __repr__(self): def __mul__(self, other): if isinstance(other, float): - new = valWUnit(self.name, self.uT, self.value*other, self.unit) + new = valWUnit(self.name, self.uT, self.value * other, self.unit) return new elif isinstance(other, valWUnit): - new = valWUnit('%s_times_%s' % (self.name, other.name), - '%s %s' % (self.uT, other.uT), - self.value*other.value, - '(%s)*(%s)' % (self.unit, other.unit) - ) + new = valWUnit( + '%s_times_%s' % (self.name, other.name), + '%s %s' % (self.uT, other.uT), + self.value * other.value, + '(%s)*(%s)' % (self.unit, other.unit), + ) # really need to put in here something to resolve new.uT return new else: @@ -154,12 +151,15 @@ def __mul__(self, other): def __add__(self, other): if isinstance(other, float): - new = valWUnit(self.name, self.uT, - self.value + other, self.unit) + new = valWUnit(self.name, self.uT, self.value + other, self.unit) return new elif isinstance(other, valWUnit): - new = valWUnit(self.name, self.uT, - self.value + other.getVal(self.unit), self.unit) + new = valWUnit( + self.name, + self.uT, + self.value + other.getVal(self.unit), + self.unit, + ) return new else: raise RuntimeError("add with unsupported operand") @@ -169,8 +169,12 @@ def __sub__(self, other): new = valWUnit(self.name, self.uT, self.value - other, self.unit) return new elif isinstance(other, valWUnit): - new = valWUnit(self.name, self.uT, self.value - - other.getVal(self.unit), self.unit) + new = valWUnit( + self.name, + self.uT, + self.value - other.getVal(self.unit), + self.unit, + ) return new else: raise RuntimeError("add with unsupported operand") @@ -212,13 +216,15 @@ def _convert(self, toUnit): # from_to = (self.unit, toUnit) try: - return cv_dict[from_to]*self.value - except(KeyError): + return cv_dict[from_to] * self.value + except KeyError: special_case = ('keV', 'angstrom') if from_to == special_case or from_to == special_case[::-1]: return keVToAngstrom(self.value) - raise RuntimeError(f"Unit conversion '{from_to[0]} --> " - + f"{from_to[1]}' not recognized") + raise RuntimeError( + f"Unit conversion '{from_to[0]} --> " + + f"{from_to[1]}' not recognized" + ) def isLength(self): """Return true if quantity is a length""" @@ -332,4 +338,5 @@ def testConversions(): for u in ulist: print((' in ', u, ': ', v.getVal(u))) return + testConversions() diff --git a/hexrd/extensions/.gitignore b/hexrd/extensions/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/hexrd/extensions/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/hexrd/file_table.tsv b/hexrd/file_table.tsv new file mode 100644 index 000000000..ead815f99 --- /dev/null +++ b/hexrd/file_table.tsv @@ -0,0 +1,538 @@ +setup.py setup.py +docs/source/conf.py docs/source/conf.py +scripts/install/install_build_dependencies.py scripts/install/install_build_dependencies.py +hexrd/valunits.py hexrd/core/valunits.py +hexrd/fitgrains.py hexrd/hedm/fitgrains.py +hexrd/imageutil.py hexrd/core/imageutil.py +hexrd/rotations.py hexrd/core/rotations.py +hexrd/findorientations.py hexrd/hedm/findorientations.py +hexrd/deprecation.py hexrd/core/deprecation.py +hexrd/indexer.py hexrd/hedm/indexer.py +hexrd/matrixutil.py hexrd/core/matrixutil.py +hexrd/constants.py hexrd/core/constants.py +hexrd/gridutil.py hexrd/core/gridutil.py +hexrd/resources/__init__.py hexrd/core/resources/__init__.py +hexrd/resources/detector_templates/__init__.py hexrd/core/resources/detector_templates/__init__.py +hexrd/resources/instrument_templates/__init__.py hexrd/core/resources/instrument_templates/__init__.py +hexrd/cli/help.py hexrd/hedm/cli/help.py +hexrd/cli/find_orientations.py hexrd/hedm/cli/find_orientations.py +hexrd/cli/fit_grains.py hexrd/hedm/cli/fit_grains.py +hexrd/cli/__init__.py hexrd/hedm/cli/__init__.py +hexrd/cli/main.py hexrd/hedm/cli/main.py +hexrd/cli/test.py hexrd/hedm/cli/test.py +hexrd/cli/pickle23.py hexrd/hedm/cli/pickle23.py +hexrd/cli/documentation.py hexrd/hedm/cli/documentation.py +hexrd/config/fitgrains.py hexrd/hedm/config/fitgrains.py +hexrd/config/material.py hexrd/core/config/material.py +hexrd/config/root.py hexrd/core/config/root.py +hexrd/config/dumper.py hexrd/core/config/dumper.py +hexrd/config/loader.py hexrd/hedm/config/loader.py +hexrd/config/loader.py hexrd/core/config/loader.py +hexrd/config/__init__.py hexrd/hedm/config/__init__.py +hexrd/config/__init__.py hexrd/core/config/__init__.py +hexrd/config/findorientations.py hexrd/hedm/config/findorientations.py +hexrd/config/config.py hexrd/core/config/config.py +hexrd/config/utils.py hexrd/core/config/utils.py +hexrd/config/instrument.py hexrd/hedm/config/instrument.py +hexrd/config/instrument.py hexrd/core/config/instrument.py +hexrd/config/beam.py hexrd/core/config/beam.py +hexrd/config/imageseries.py hexrd/core/config/imageseries.py +hexrd/sampleOrientations/conversions.py hexrd/hedm/sampleOrientations/conversions.py +hexrd/sampleOrientations/rfz.py hexrd/hedm/sampleOrientations/rfz.py +hexrd/sampleOrientations/__init__.py hexrd/hedm/sampleOrientations/__init__.py +hexrd/sampleOrientations/sampleRFZ.py hexrd/hedm/sampleOrientations/sampleRFZ.py +hexrd/transforms/xf.py hexrd/core/transforms/xf.py +hexrd/transforms/xfcapi.py hexrd/core/transforms/xfcapi.py +hexrd/transforms/__init__.py hexrd/core/transforms/__init__.py +hexrd/transforms/old_xfcapi.py hexrd/core/transforms/old_xfcapi.py +hexrd/transforms/new_capi/xf_new_capi.py hexrd/core/transforms/new_capi/xf_new_capi.py +hexrd/transforms/new_capi/reference.py hexrd/core/transforms/new_capi/reference.py +hexrd/extensions/__init__.py hexrd/core/extensions/__init__.py +hexrd/imageseries/imageseriesiter.py hexrd/core/imageseries/imageseriesiter.py +hexrd/imageseries/process.py hexrd/core/imageseries/process.py +hexrd/imageseries/stats.py hexrd/core/imageseries/stats.py +hexrd/imageseries/baseclass.py hexrd/core/imageseries/baseclass.py +hexrd/imageseries/__init__.py hexrd/core/imageseries/__init__.py +hexrd/imageseries/imageseriesabc.py hexrd/core/imageseries/imageseriesabc.py +hexrd/imageseries/omega.py hexrd/core/imageseries/omega.py +hexrd/imageseries/save.py hexrd/core/imageseries/save.py +hexrd/imageseries/load/hdf5.py hexrd/core/imageseries/load/hdf5.py +hexrd/imageseries/load/eiger_stream_v1.py hexrd/core/imageseries/load/eiger_stream_v1.py +hexrd/imageseries/load/framecache.py hexrd/core/imageseries/load/framecache.py +hexrd/imageseries/load/imagefiles.py hexrd/core/imageseries/load/imagefiles.py +hexrd/imageseries/load/rawimage.py hexrd/core/imageseries/load/rawimage.py +hexrd/imageseries/load/array.py hexrd/core/imageseries/load/array.py +hexrd/imageseries/load/__init__.py hexrd/core/imageseries/load/__init__.py +hexrd/imageseries/load/function.py hexrd/core/imageseries/load/function.py +hexrd/imageseries/load/registry.py hexrd/core/imageseries/load/registry.py +hexrd/imageseries/load/trivial.py hexrd/core/imageseries/load/trivial.py +hexrd/imageseries/load/metadata.py hexrd/core/imageseries/load/metadata.py +hexrd/projections/polar.py hexrd/core/projections/polar.py +hexrd/projections/__init__.py hexrd/core/projections/__init__.py +hexrd/projections/spherical.py hexrd/core/projections/spherical.py +hexrd/wppf/LeBailCalibration.py hexrd/powder/wppf/LeBailCalibration.py +hexrd/wppf/spectrum.py hexrd/powder/wppf/spectrum.py +hexrd/wppf/xtal.py hexrd/powder/wppf/xtal.py +hexrd/wppf/derivatives.py hexrd/powder/wppf/derivatives.py +hexrd/wppf/WPPF.py hexrd/powder/wppf/WPPF.py +hexrd/wppf/RietveldHEDM.py hexrd/powder/wppf/RietveldHEDM.py +hexrd/wppf/__init__.py hexrd/powder/wppf/__init__.py +hexrd/wppf/wppfsupport.py hexrd/powder/wppf/wppfsupport.py +hexrd/wppf/peakfunctions.py hexrd/powder/wppf/peakfunctions.py +hexrd/wppf/texture.py hexrd/powder/wppf/texture.py +hexrd/wppf/parameters.py hexrd/powder/wppf/parameters.py +hexrd/wppf/phase.py hexrd/powder/wppf/phase.py +hexrd/material/mksupport.py hexrd/core/material/mksupport.py +hexrd/material/symmetry.py hexrd/core/material/symmetry.py +hexrd/material/material.py hexrd/core/material/material.py +hexrd/material/crystallography.py hexrd/core/material/crystallography.py +hexrd/material/unitcell.py hexrd/core/material/unitcell.py +hexrd/material/__init__.py hexrd/core/material/__init__.py +hexrd/material/symbols.py hexrd/core/material/symbols.py +hexrd/material/utils.py hexrd/core/material/utils.py +hexrd/material/jcpds.py hexrd/core/material/jcpds.py +hexrd/material/spacegroup.py hexrd/core/material/spacegroup.py +hexrd/utils/profiler.py hexrd/core/utils/profiler.py +hexrd/utils/hdf5.py hexrd/core/utils/hdf5.py +hexrd/utils/progressbar.py hexrd/core/utils/progressbar.py +hexrd/utils/warnings.py hexrd/core/utils/warnings.py +hexrd/utils/concurrent.py hexrd/core/utils/concurrent.py +hexrd/utils/multiprocess_generic.py hexrd/core/utils/multiprocess_generic.py +hexrd/utils/__init__.py hexrd/core/utils/__init__.py +hexrd/utils/json.py hexrd/core/utils/json.py +hexrd/utils/compatibility.py hexrd/core/utils/compatibility.py +hexrd/utils/yaml.py hexrd/core/utils/yaml.py +hexrd/utils/hkl.py hexrd/core/utils/hkl.py +hexrd/utils/decorators.py hexrd/core/utils/decorators.py +hexrd/instrument/hedm_instrument.py hexrd/hed/instrument/hedm_instrument.py +hexrd/instrument/hedm_instrument.py hexrd/hedm/instrument/hedm_instrument.py +hexrd/instrument/hedm_instrument.py hexrd/laue/instrument/hedm_instrument.py +hexrd/instrument/hedm_instrument.py hexrd/powder/instrument/hedm_instrument.py +hexrd/instrument/hedm_instrument.py hexrd/core/instrument/hedm_instrument.py +hexrd/instrument/physics_package.py hexrd/core/instrument/physics_package.py +hexrd/instrument/detector.py hexrd/core/instrument/detector.py +hexrd/instrument/cylindrical_detector.py hexrd/core/instrument/cylindrical_detector.py +hexrd/instrument/__init__.py hexrd/core/instrument/__init__.py +hexrd/instrument/planar_detector.py hexrd/core/instrument/planar_detector.py +hexrd/instrument/detector_coatings.py hexrd/core/instrument/detector_coatings.py +hexrd/instrument/constants.py hexrd/core/instrument/constants.py +hexrd/fitting/fitpeak.py hexrd/core/fitting/fitpeak.py +hexrd/fitting/spectrum.py hexrd/core/fitting/spectrum.py +hexrd/fitting/grains.py hexrd/hedm/fitting/grains.py +hexrd/fitting/__init__.py hexrd/core/fitting/__init__.py +hexrd/fitting/peakfunctions.py hexrd/core/fitting/peakfunctions.py +hexrd/fitting/utils.py hexrd/core/fitting/utils.py +hexrd/fitting/calibration/laue.py hexrd/laue/fitting/calibration/laue.py +hexrd/fitting/calibration/structureless.py hexrd/powder/fitting/calibration/structureless.py +hexrd/fitting/calibration/powder.py hexrd/powder/fitting/calibration/powder.py +hexrd/fitting/calibration/__init__.py hexrd/core/fitting/calibration/__init__.py +hexrd/fitting/calibration/multigrain.py hexrd/hedm/fitting/calibration/multigrain.py +hexrd/fitting/calibration/calibrator.py hexrd/powder/fitting/calibration/calibrator.py +hexrd/fitting/calibration/lmfit_param_handling.py hexrd/powder/fitting/calibration/lmfit_param_handling.py +hexrd/fitting/calibration/instrument.py hexrd/powder/fitting/calibration/instrument.py +hexrd/fitting/calibration/laue.py hexrd/core/fitting/calibration/laue.py +hexrd/fitting/calibration/structureless.py hexrd/core/fitting/calibration/structureless.py +hexrd/fitting/calibration/powder.py hexrd/core/fitting/calibration/powder.py +hexrd/fitting/calibration/multigrain.py hexrd/core/fitting/calibration/multigrain.py +hexrd/fitting/calibration/calibrator.py hexrd/core/fitting/calibration/calibrator.py +hexrd/fitting/calibration/lmfit_param_handling.py hexrd/core/fitting/calibration/lmfit_param_handling.py +hexrd/fitting/calibration/instrument.py hexrd/core/fitting/calibration/instrument.py +hexrd/ipfcolor/sphere_sector.py hexrd/hedm/ipfcolor/sphere_sector.py +hexrd/ipfcolor/colorspace.py hexrd/hedm/ipfcolor/colorspace.py +hexrd/ipfcolor/__init__.py hexrd/hedm/ipfcolor/__init__.py +hexrd/xrdutil/__init__.py hexrd/hedm/xrdutil/__init__.py +hexrd/xrdutil/utils.py hexrd/hedm/xrdutil/utils.py +hexrd/xrdutil/utils.py hexrd/hed/xrdutil/utils.py +hexrd/xrdutil/utils.py hexrd/laue/xrdutil/utils.py +hexrd/xrdutil/phutil.py hexrd/hed/xrdutil/phutil.py +hexrd/distortion/ge_41rt.py hexrd/core/distortion/ge_41rt.py +hexrd/distortion/identity.py hexrd/core/distortion/identity.py +hexrd/distortion/dexela_2923.py hexrd/core/distortion/dexela_2923.py +hexrd/distortion/__init__.py hexrd/core/distortion/__init__.py +hexrd/distortion/registry.py hexrd/core/distortion/registry.py +hexrd/distortion/distortionabc.py hexrd/core/distortion/distortionabc.py +hexrd/distortion/utils.py hexrd/core/distortion/utils.py +hexrd/distortion/nyi.py hexrd/core/distortion/nyi.py +hexrd/grainmap/tomoutil.py hexrd/hedm/grainmap/tomoutil.py +hexrd/grainmap/__init__.py hexrd/hedm/grainmap/__init__.py +hexrd/grainmap/vtkutil.py hexrd/hedm/grainmap/vtkutil.py +hexrd/grainmap/nfutil.py hexrd/hedm/grainmap/nfutil.py +hexrd/convolution/__init__.py hexrd/core/convolution/__init__.py +hexrd/convolution/utils.py hexrd/core/convolution/utils.py +hexrd/convolution/convolve.py hexrd/core/convolution/convolve.py +tests/test_material.py tests/test_material.py +tests/test_graindata.py tests/test_graindata.py +tests/test_utils_json.py tests/test_utils_json.py +tests/test_utils_yaml.py tests/test_utils_yaml.py +tests/common.py tests/common.py +tests/test_rotations.py tests/test_rotations.py +tests/test_inverse_distortion.py tests/test_inverse_distortion.py +tests/conftest.py tests/conftest.py +tests/test_absorption_correction.py tests/test_absorption_correction.py +tests/test_memoize.py tests/test_memoize.py +tests/test_transforms.py tests/test_transforms.py +tests/test_matrix_utils.py tests/test_matrix_utils.py +tests/fit_grains_check.py tests/fit_grains_check.py +tests/test_fit-grains.py tests/test_fit-grains.py +tests/find_orientations_testing.py tests/find_orientations_testing.py +tests/test_concurrent.py tests/test_concurrent.py +tests/test_find_orientations.py tests/test_find_orientations.py +tests/config/test_material.py tests/config/test_material.py +tests/config/test_instrument.py tests/config/test_instrument.py +tests/config/test_fit_grains.py tests/config/test_fit_grains.py +tests/config/common.py tests/config/common.py +tests/config/test_root.py tests/config/test_root.py +tests/config/__init__.py tests/config/__init__.py +tests/config/test_find_orientations.py tests/config/test_find_orientations.py +tests/config/test_image_series.py tests/config/test_image_series.py +tests/transforms/test_rotate_vecs_about_axis.py tests/transforms/test_rotate_vecs_about_axis.py +tests/transforms/test_quat_distance_from_file.py tests/transforms/test_quat_distance_from_file.py +tests/transforms/test_angles_to_dvec_from_file.py tests/transforms/test_angles_to_dvec_from_file.py +tests/transforms/test_xy_to_gvec_from_file.py tests/transforms/test_xy_to_gvec_from_file.py +tests/transforms/test_make_detector_rmat_from_file.py tests/transforms/test_make_detector_rmat_from_file.py +tests/transforms/common.py tests/transforms/common.py +tests/transforms/test_validate_angle_ranges_from_file.py tests/transforms/test_validate_angle_ranges_from_file.py +tests/transforms/test_gvec_to_xy.py tests/transforms/test_gvec_to_xy.py +tests/transforms/test_make_sample_rmat_from_file.py tests/transforms/test_make_sample_rmat_from_file.py +tests/transforms/test_angles_to_gvec_from_file.py tests/transforms/test_angles_to_gvec_from_file.py +tests/transforms/test_gvec_to_xy_from_file.py tests/transforms/test_gvec_to_xy_from_file.py +tests/transforms/test_make_rmat_of_expmap_from_file.py tests/transforms/test_make_rmat_of_expmap_from_file.py +tests/transforms/test_make_beam_rmat_from_file.py tests/transforms/test_make_beam_rmat_from_file.py +tests/transforms/test_unit_vector.py tests/transforms/test_unit_vector.py +tests/transforms/test_xy_to_gvec.py tests/transforms/test_xy_to_gvec.py +tests/unitcell/test_vec_math.py tests/unitcell/test_vec_math.py +tests/planedata/test_init.py tests/planedata/test_init.py +tests/planedata/test_exclusion.py tests/planedata/test_exclusion.py +tests/planedata/test_with_data.py tests/planedata/test_with_data.py +tests/planedata/test_misc.py tests/planedata/test_misc.py +tests/matrix_util/test_strain_stress_reps.py tests/matrix_util/test_strain_stress_reps.py +tests/matrix_util/test_norms.py tests/matrix_util/test_norms.py +tests/matrix_util/test_vector_and_matrix_math.py tests/matrix_util/test_vector_and_matrix_math.py +tests/imageseries/test_stats.py tests/imageseries/test_stats.py +tests/imageseries/common.py tests/imageseries/common.py +tests/imageseries/test_formats.py tests/imageseries/test_formats.py +tests/imageseries/__init__.py tests/imageseries/__init__.py +tests/imageseries/test_omega.py tests/imageseries/test_omega.py +tests/imageseries/test_process.py tests/imageseries/test_process.py +tests/imageseries/test_properties.py tests/imageseries/test_properties.py +tests/rotations/test_eulers.py tests/rotations/test_eulers.py +tests/rotations/test_quat_math.py tests/rotations/test_quat_math.py +.codecov.yml .codecov.yml +.gitattributes .gitattributes +.github/workflows/container_build.sh .github/workflows/container_build.sh +.github/workflows/package.yml .github/workflows/package.yml +.github/workflows/test.yml .github/workflows/test.yml +.gitignore .gitignore +.readthedocs.yml .readthedocs.yml +conda.recipe/bld.bat conda.recipe/bld.bat +conda.recipe/build.sh conda.recipe/build.sh +conda.recipe/conda_build_config.yaml conda.recipe/conda_build_config.yaml +conda.recipe/meta.yaml conda.recipe/meta.yaml +docs/Makefile docs/Makefile +docs/README.md docs/README.md +docs/requirements.txt docs/requirements.txt +docs/run_apidoc.sh docs/run_apidoc.sh +docs/run_sphinx.sh docs/run_sphinx.sh +docs/source/.gitignore docs/source/.gitignore +docs/source/dev/.gitignore docs/source/dev/.gitignore +docs/source/users/transforms.md docs/source/users/transforms.md +docs/source/_static/transforms.pdf docs/source/_static/transforms.pdf +environment.yml environment.yml +hexrd/convolution/src/convolve.c hexrd/core/convolution/src/convolve.c +hexrd/convolution/src/convolve.h hexrd/core/convolution/src/convolve.h +hexrd/copyright.py hexrd/copyright.py +hexrd/fitting/calibration/relative_constraints.py hexrd/core/fitting/calibration/relative_constraints.py +hexrd/resources/Anomalous.h5 hexrd/core/resources/Anomalous.h5 +hexrd/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt hexrd/core/resources/BBXRD_IMAGE-PLATE-BACK_bnd.txt +hexrd/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt hexrd/core/resources/BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt +hexrd/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt hexrd/core/resources/BBXRD_IMAGE-PLATE-LEFT_bnd.txt +hexrd/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt hexrd/core/resources/BBXRD_IMAGE-PLATE-RIGHT_bnd.txt +hexrd/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt hexrd/core/resources/BBXRD_IMAGE-PLATE-TOP_bnd.txt +hexrd/resources/characteristic_xray_energies.h5 hexrd/core/resources/characteristic_xray_energies.h5 +hexrd/resources/detector_templates/dexela-2923-detector-subpanel.yml hexrd/core/resources/detector_templates/dexela-2923-detector-subpanel.yml +hexrd/resources/detector_templates/dexela-2923-detector.yml hexrd/core/resources/detector_templates/dexela-2923-detector.yml +hexrd/resources/detector_templates/GE-detector.yml hexrd/core/resources/detector_templates/GE-detector.yml +hexrd/resources/detector_templates/Hydra_Feb19.yml hexrd/core/resources/detector_templates/Hydra_Feb19.yml +hexrd/resources/detector_templates/Pilatus3X_2M-detector.yml hexrd/core/resources/detector_templates/Pilatus3X_2M-detector.yml +hexrd/resources/detector_templates/Pixirad2-detector.yml hexrd/core/resources/detector_templates/Pixirad2-detector.yml +hexrd/resources/detector_templates/Varex_4343CT-detector.yml hexrd/core/resources/detector_templates/Varex_4343CT-detector.yml +hexrd/resources/instrument_templates/dcs.yml hexrd/core/resources/instrument_templates/dcs.yml +hexrd/resources/instrument_templates/dual_dexelas.yml hexrd/core/resources/instrument_templates/dual_dexelas.yml +hexrd/resources/instrument_templates/rigaku.yml hexrd/core/resources/instrument_templates/rigaku.yml +hexrd/resources/instrument_templates/varex.yml hexrd/core/resources/instrument_templates/varex.yml +hexrd/resources/mu_en.h5 hexrd/core/resources/mu_en.h5 +hexrd/resources/pinhole_materials.h5 hexrd/core/resources/pinhole_materials.h5 +hexrd/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt hexrd/core/resources/PXRDIP_IMAGE-PLATE-B_bnd.txt +hexrd/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt hexrd/core/resources/PXRDIP_IMAGE-PLATE-D_bnd.txt +hexrd/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt hexrd/core/resources/PXRDIP_IMAGE-PLATE-L_bnd.txt +hexrd/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt hexrd/core/resources/PXRDIP_IMAGE-PLATE-R_bnd.txt +hexrd/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt hexrd/core/resources/PXRDIP_IMAGE-PLATE-U_bnd.txt +hexrd/resources/pxrdip_reference_config.yml hexrd/core/resources/pxrdip_reference_config.yml +hexrd/resources/surface_harmonics.h5 hexrd/core/resources/surface_harmonics.h5 +hexrd/resources/tardis_2xrs_reference_config.yml hexrd/core/resources/tardis_2xrs_reference_config.yml +hexrd/resources/TARDIS_IMAGE-PLATE-2_bnd.txt hexrd/core/resources/TARDIS_IMAGE-PLATE-2_bnd.txt +hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd.txt hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd.txt +hexrd/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt hexrd/core/resources/TARDIS_IMAGE-PLATE-3_bnd_cropped.txt +hexrd/resources/TARDIS_IMAGE-PLATE-4_bnd.txt hexrd/core/resources/TARDIS_IMAGE-PLATE-4_bnd.txt +hexrd/resources/tardis_reference_config.yml hexrd/core/resources/tardis_reference_config.yml +hexrd/resources/window_materials.h5 hexrd/core/resources/window_materials.h5 +hexrd/transforms/cpp_sublibrary/Makefile hexrd/core/transforms/cpp_sublibrary/Makefile +hexrd/transforms/cpp_sublibrary/src/inverse_distortion.cpp hexrd/core/transforms/cpp_sublibrary/src/inverse_distortion.cpp +hexrd/transforms/debug_helpers.h hexrd/core/transforms/debug_helpers.h +hexrd/transforms/Makefile hexrd/core/transforms/Makefile +hexrd/transforms/new_capi/angles_to_dvec.c hexrd/core/transforms/new_capi/angles_to_dvec.c +hexrd/transforms/new_capi/angles_to_gvec.c hexrd/core/transforms/new_capi/angles_to_gvec.c +hexrd/transforms/new_capi/gvec_to_xy.c hexrd/core/transforms/new_capi/gvec_to_xy.c +hexrd/transforms/new_capi/make_beam_rmat.c hexrd/core/transforms/new_capi/make_beam_rmat.c +hexrd/transforms/new_capi/make_binary_rmat.c hexrd/core/transforms/new_capi/make_binary_rmat.c +hexrd/transforms/new_capi/make_detector_rmat.c hexrd/core/transforms/new_capi/make_detector_rmat.c +hexrd/transforms/new_capi/make_rmat_of_expmap.c hexrd/core/transforms/new_capi/make_rmat_of_expmap.c +hexrd/transforms/new_capi/make_sample_rmat.c hexrd/core/transforms/new_capi/make_sample_rmat.c +hexrd/transforms/new_capi/module.c hexrd/core/transforms/new_capi/module.c +hexrd/transforms/new_capi/ndargs_helper.c hexrd/core/transforms/new_capi/ndargs_helper.c +hexrd/transforms/new_capi/ndargs_helper.h hexrd/core/transforms/new_capi/ndargs_helper.h +hexrd/transforms/new_capi/new_func.c hexrd/core/transforms/new_capi/new_func.c +hexrd/transforms/new_capi/oscill_angles_of_HKLs.c hexrd/core/transforms/new_capi/oscill_angles_of_HKLs.c +hexrd/transforms/new_capi/quat_distance.c hexrd/core/transforms/new_capi/quat_distance.c +hexrd/transforms/new_capi/README.md hexrd/core/transforms/new_capi/README.md +hexrd/transforms/new_capi/rotate_vecs_about_axis.c hexrd/core/transforms/new_capi/rotate_vecs_about_axis.c +hexrd/transforms/new_capi/transforms_prototypes.h hexrd/core/transforms/new_capi/transforms_prototypes.h +hexrd/transforms/new_capi/transforms_types.h hexrd/core/transforms/new_capi/transforms_types.h +hexrd/transforms/new_capi/transforms_utils.h hexrd/core/transforms/new_capi/transforms_utils.h +hexrd/transforms/new_capi/unit_row_vector.c hexrd/core/transforms/new_capi/unit_row_vector.c +hexrd/transforms/new_capi/validate_angle_ranges.c hexrd/core/transforms/new_capi/validate_angle_ranges.c +hexrd/transforms/new_capi/xy_to_gvec.c hexrd/core/transforms/new_capi/xy_to_gvec.c +hexrd/transforms/stdbool.h hexrd/core/transforms/stdbool.h +hexrd/transforms/transforms_CAPI.c hexrd/core/transforms/transforms_CAPI.c +hexrd/transforms/transforms_CAPI.h hexrd/core/transforms/transforms_CAPI.h +hexrd/transforms/transforms_CFUNC.c hexrd/core/transforms/transforms_CFUNC.c +hexrd/transforms/transforms_CFUNC.h hexrd/core/transforms/transforms_CFUNC.h +hexrd/__init__.py hexrd/__init__.py +LICENSE LICENSE +NOTICE NOTICE +pyproject.toml pyproject.toml +README.md README.md +tests/calibration/test_2xrs_calibration.py tests/calibration/test_2xrs_calibration.py +tests/calibration/test_calibration.py tests/calibration/test_calibration.py +tests/calibration/test_relative_constraints.py tests/calibration/test_relative_constraints.py +tests/data/calibration_expected.npy tests/data/calibration_expected.npy +tests/data/gvec_to_xy.json tests/data/gvec_to_xy.json +tests/data/ideal_tardis_transmissions.npy tests/data/ideal_tardis_transmissions.npy +tests/data/inverse_distortion_in_out.json tests/data/inverse_distortion_in_out.json +tests/data/materials/Ag(TeMo)6.cif tests/data/materials/Ag(TeMo)6.cif +tests/data/materials/Al2SiO5.cif tests/data/materials/Al2SiO5.cif +tests/data/materials/AlCuO2.cif tests/data/materials/AlCuO2.cif +tests/data/materials/C.cif tests/data/materials/C.cif +tests/data/materials/Cs.cif tests/data/materials/Cs.cif +tests/data/materials/Mg.cif tests/data/materials/Mg.cif +tests/data/materials/Si.cif tests/data/materials/Si.cif +tests/data/materials/U.cif tests/data/materials/U.cif +tests/data/plane_data_test.npy tests/data/plane_data_test.npy +tests/data/testmat.h5 tests/data/testmat.h5 +tests/data/test_correct_angles_to_dvec.npy tests/data/test_correct_angles_to_dvec.npy +tests/data/test_correct_angles_to_gvec.npy tests/data/test_correct_angles_to_gvec.npy +tests/data/test_correct_gvec_to_xy.npy tests/data/test_correct_gvec_to_xy.npy +tests/data/test_correct_make_beam_rmat.npy tests/data/test_correct_make_beam_rmat.npy +tests/data/test_correct_make_detector_rmat.npy tests/data/test_correct_make_detector_rmat.npy +tests/data/test_correct_make_rmat_of_expmap.npy tests/data/test_correct_make_rmat_of_expmap.npy +tests/data/test_correct_make_sample_rmat.npy tests/data/test_correct_make_sample_rmat.npy +tests/data/test_correct_quat_distance.npy tests/data/test_correct_quat_distance.npy +tests/data/test_correct_validate_angle_ranges.npy tests/data/test_correct_validate_angle_ranges.npy +tests/data/test_correct_xy_to_gvec.npy tests/data/test_correct_xy_to_gvec.npy +tests/data/test_polar_view_expected.npy tests/data/test_polar_view_expected.npy +tests/imageseries/test_pickleable.py tests/imageseries/test_pickleable.py +tests/requirements-dev.txt tests/requirements-dev.txt +tests/rotations/test_utilities.py tests/rotations/test_utilities.py +tests/test_polar_view.py tests/test_polar_view.py +file_table.tsv file_table.tsv +hexrd\core\config\__init__.py hexrd\core\config\__init__.py +hexrd\core\config\beam.py hexrd\core\config\beam.py +hexrd\core\config\config.py hexrd\core\config\config.py +hexrd\core\config\dumper.py hexrd\core\config\dumper.py +hexrd\core\config\imageseries.py hexrd\core\config\imageseries.py +hexrd\core\config\instrument.py hexrd\core\config\instrument.py +hexrd\core\config\loader.py hexrd\core\config\loader.py +hexrd\core\config\material.py hexrd\core\config\material.py +hexrd\core\config\root.py hexrd\core\config\root.py +hexrd\core\config\utils.py hexrd\core\config\utils.py +hexrd\core\constants.py hexrd\core\constants.py +hexrd\core\convolution\__init__.py hexrd\core\convolution\__init__.py +hexrd\core\convolution\convolve.py hexrd\core\convolution\convolve.py +hexrd\core\convolution\src\convolve.c hexrd\core\convolution\src\convolve.c +hexrd\core\convolution\src\convolve.h hexrd\core\convolution\src\convolve.h +hexrd\core\convolution\utils.py hexrd\core\convolution\utils.py +hexrd\core\deprecation.py hexrd\core\deprecation.py +hexrd\core\distortion\__init__.py hexrd\core\distortion\__init__.py +hexrd\core\distortion\dexela_2923.py hexrd\core\distortion\dexela_2923.py +hexrd\core\distortion\distortionabc.py hexrd\core\distortion\distortionabc.py +hexrd\core\distortion\ge_41rt.py hexrd\core\distortion\ge_41rt.py +hexrd\core\distortion\identity.py hexrd\core\distortion\identity.py +hexrd\core\distortion\nyi.py hexrd\core\distortion\nyi.py +hexrd\core\distortion\registry.py hexrd\core\distortion\registry.py +hexrd\core\distortion\utils.py hexrd\core\distortion\utils.py +hexrd\core\extensions\__init__.py hexrd\core\extensions\__init__.py +hexrd\core\fitting\__init__.py hexrd\core\fitting\__init__.py +hexrd\core\fitting\calibration\__init__.py hexrd\core\fitting\calibration\__init__.py +hexrd\core\fitting\calibration\relative_constraints.py hexrd\core\fitting\calibration\relative_constraints.py +hexrd\core\fitting\fitpeak.py hexrd\core\fitting\fitpeak.py +hexrd\core\fitting\peakfunctions.py hexrd\core\fitting\peakfunctions.py +hexrd\core\fitting\spectrum.py hexrd\core\fitting\spectrum.py +hexrd\core\fitting\utils.py hexrd\core\fitting\utils.py +hexrd\core\gridutil.py hexrd\core\gridutil.py +hexrd\core\imageseries\__init__.py hexrd\core\imageseries\__init__.py +hexrd\core\imageseries\baseclass.py hexrd\core\imageseries\baseclass.py +hexrd\core\imageseries\imageseriesabc.py hexrd\core\imageseries\imageseriesabc.py +hexrd\core\imageseries\imageseriesiter.py hexrd\core\imageseries\imageseriesiter.py +hexrd\core\imageseries\load\__init__.py hexrd\core\imageseries\load\__init__.py +hexrd\core\imageseries\load\array.py hexrd\core\imageseries\load\array.py +hexrd\core\imageseries\load\eiger_stream_v1.py hexrd\core\imageseries\load\eiger_stream_v1.py +hexrd\core\imageseries\load\framecache.py hexrd\core\imageseries\load\framecache.py +hexrd\core\imageseries\load\function.py hexrd\core\imageseries\load\function.py +hexrd\core\imageseries\load\hdf5.py hexrd\core\imageseries\load\hdf5.py +hexrd\core\imageseries\load\imagefiles.py hexrd\core\imageseries\load\imagefiles.py +hexrd\core\imageseries\load\metadata.py hexrd\core\imageseries\load\metadata.py +hexrd\core\imageseries\load\rawimage.py hexrd\core\imageseries\load\rawimage.py +hexrd\core\imageseries\load\registry.py hexrd\core\imageseries\load\registry.py +hexrd\core\imageseries\load\trivial.py hexrd\core\imageseries\load\trivial.py +hexrd\core\imageseries\omega.py hexrd\core\imageseries\omega.py +hexrd\core\imageseries\process.py hexrd\core\imageseries\process.py +hexrd\core\imageseries\save.py hexrd\core\imageseries\save.py +hexrd\core\imageseries\stats.py hexrd\core\imageseries\stats.py +hexrd\core\imageutil.py hexrd\core\imageutil.py +hexrd\core\instrument\__init__.py hexrd\core\instrument\__init__.py +hexrd\core\instrument\constants.py hexrd\core\instrument\constants.py +hexrd\core\instrument\cylindrical_detector.py hexrd\core\instrument\cylindrical_detector.py +hexrd\core\instrument\detector.py hexrd\core\instrument\detector.py +hexrd\core\instrument\detector.py hexrd\hed\instrument\detector.py +hexrd\core\instrument\detector.py hexrd\hedm\instrument\detector.py +hexrd\core\instrument\detector.py hexrd\laue\instrument\detector.py +hexrd\core\instrument\detector.py hexrd\powder\instrument\detector.py +hexrd\core\instrument\detector_coatings.py hexrd\core\instrument\detector_coatings.py +hexrd\core\instrument\hedm_instrument.py hexrd\core\instrument\hedm_instrument.py +hexrd\core\instrument\physics_package.py hexrd\core\instrument\physics_package.py +hexrd\core\instrument\planar_detector.py hexrd\core\instrument\planar_detector.py +hexrd\core\material\__init__.py hexrd\core\material\__init__.py +hexrd\core\material\crystallography.py hexrd\core\material\crystallography.py +hexrd\core\material\jcpds.py hexrd\core\material\jcpds.py +hexrd\core\material\material.py hexrd\core\material\material.py +hexrd\core\material\mksupport.py hexrd\core\material\mksupport.py +hexrd\core\material\spacegroup.py hexrd\core\material\spacegroup.py +hexrd\core\material\symbols.py hexrd\core\material\symbols.py +hexrd\core\material\symmetry.py hexrd\core\material\symmetry.py +hexrd\core\material\unitcell.py hexrd\core\material\unitcell.py +hexrd\core\material\utils.py hexrd\core\material\utils.py +hexrd\core\matrixutil.py hexrd\core\matrixutil.py +hexrd\core\projections\__init__.py hexrd\core\projections\__init__.py +hexrd\core\projections\polar.py hexrd\core\projections\polar.py +hexrd\core\projections\spherical.py hexrd\core\projections\spherical.py +hexrd\core\resources\__init__.py hexrd\core\resources\__init__.py +hexrd\core\resources\Anomalous.h5 hexrd\core\resources\Anomalous.h5 +hexrd\core\resources\BBXRD_IMAGE-PLATE-BACK_bnd.txt hexrd\core\resources\BBXRD_IMAGE-PLATE-BACK_bnd.txt +hexrd\core\resources\BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt hexrd\core\resources\BBXRD_IMAGE-PLATE-BOTTOM_bnd.txt +hexrd\core\resources\BBXRD_IMAGE-PLATE-LEFT_bnd.txt hexrd\core\resources\BBXRD_IMAGE-PLATE-LEFT_bnd.txt +hexrd\core\resources\BBXRD_IMAGE-PLATE-RIGHT_bnd.txt hexrd\core\resources\BBXRD_IMAGE-PLATE-RIGHT_bnd.txt +hexrd\core\resources\BBXRD_IMAGE-PLATE-TOP_bnd.txt hexrd\core\resources\BBXRD_IMAGE-PLATE-TOP_bnd.txt +hexrd\core\resources\characteristic_xray_energies.h5 hexrd\core\resources\characteristic_xray_energies.h5 +hexrd\core\resources\detector_templates\__init__.py hexrd\core\resources\detector_templates\__init__.py +hexrd\core\resources\detector_templates\dexela-2923-detector-subpanel.yml hexrd\core\resources\detector_templates\dexela-2923-detector-subpanel.yml +hexrd\core\resources\detector_templates\dexela-2923-detector.yml hexrd\core\resources\detector_templates\dexela-2923-detector.yml +hexrd\core\resources\detector_templates\GE-detector.yml hexrd\core\resources\detector_templates\GE-detector.yml +hexrd\core\resources\detector_templates\Hydra_Feb19.yml hexrd\core\resources\detector_templates\Hydra_Feb19.yml +hexrd\core\resources\detector_templates\Pilatus3X_2M-detector.yml hexrd\core\resources\detector_templates\Pilatus3X_2M-detector.yml +hexrd\core\resources\detector_templates\Pixirad2-detector.yml hexrd\core\resources\detector_templates\Pixirad2-detector.yml +hexrd\core\resources\detector_templates\Varex_4343CT-detector.yml hexrd\core\resources\detector_templates\Varex_4343CT-detector.yml +hexrd\core\resources\instrument_templates\__init__.py hexrd\core\resources\instrument_templates\__init__.py +hexrd\core\resources\instrument_templates\dcs.yml hexrd\core\resources\instrument_templates\dcs.yml +hexrd\core\resources\instrument_templates\dual_dexelas.yml hexrd\core\resources\instrument_templates\dual_dexelas.yml +hexrd\core\resources\instrument_templates\rigaku.hexrd hexrd\core\resources\instrument_templates\rigaku.hexrd +hexrd\core\resources\instrument_templates\rigaku.yml hexrd\core\resources\instrument_templates\rigaku.yml +hexrd\core\resources\instrument_templates\varex.yml hexrd\core\resources\instrument_templates\varex.yml +hexrd\core\resources\mu_en.h5 hexrd\core\resources\mu_en.h5 +hexrd\core\resources\pinhole_materials.h5 hexrd\core\resources\pinhole_materials.h5 +hexrd\core\resources\PXRDIP_IMAGE-PLATE-B_bnd.txt hexrd\core\resources\PXRDIP_IMAGE-PLATE-B_bnd.txt +hexrd\core\resources\PXRDIP_IMAGE-PLATE-D_bnd.txt hexrd\core\resources\PXRDIP_IMAGE-PLATE-D_bnd.txt +hexrd\core\resources\PXRDIP_IMAGE-PLATE-L_bnd.txt hexrd\core\resources\PXRDIP_IMAGE-PLATE-L_bnd.txt +hexrd\core\resources\PXRDIP_IMAGE-PLATE-R_bnd.txt hexrd\core\resources\PXRDIP_IMAGE-PLATE-R_bnd.txt +hexrd\core\resources\PXRDIP_IMAGE-PLATE-U_bnd.txt hexrd\core\resources\PXRDIP_IMAGE-PLATE-U_bnd.txt +hexrd\core\resources\pxrdip_reference_config.yml hexrd\core\resources\pxrdip_reference_config.yml +hexrd\core\resources\surface_harmonics.h5 hexrd\core\resources\surface_harmonics.h5 +hexrd\core\resources\tardis_2xrs_reference_config.yml hexrd\core\resources\tardis_2xrs_reference_config.yml +hexrd\core\resources\TARDIS_IMAGE-PLATE-2_bnd.txt hexrd\core\resources\TARDIS_IMAGE-PLATE-2_bnd.txt +hexrd\core\resources\TARDIS_IMAGE-PLATE-3_bnd.txt hexrd\core\resources\TARDIS_IMAGE-PLATE-3_bnd.txt +hexrd\core\resources\TARDIS_IMAGE-PLATE-3_bnd_cropped.txt hexrd\core\resources\TARDIS_IMAGE-PLATE-3_bnd_cropped.txt +hexrd\core\resources\TARDIS_IMAGE-PLATE-4_bnd.txt hexrd\core\resources\TARDIS_IMAGE-PLATE-4_bnd.txt +hexrd\core\resources\tardis_reference_config.yml hexrd\core\resources\tardis_reference_config.yml +hexrd\core\resources\window_materials.h5 hexrd\core\resources\window_materials.h5 +hexrd\core\rotations.py hexrd\core\rotations.py +hexrd\core\transforms\__init__.py hexrd\core\transforms\__init__.py +hexrd\core\transforms\cpp_sublibrary\Makefile hexrd\core\transforms\cpp_sublibrary\Makefile +hexrd\core\transforms\cpp_sublibrary\src\inverse_distortion.cpp hexrd\core\transforms\cpp_sublibrary\src\inverse_distortion.cpp +hexrd\core\transforms\debug_helpers.h hexrd\core\transforms\debug_helpers.h +hexrd\core\transforms\Makefile hexrd\core\transforms\Makefile +hexrd\core\transforms\new_capi\angles_to_dvec.c hexrd\core\transforms\new_capi\angles_to_dvec.c +hexrd\core\transforms\new_capi\angles_to_gvec.c hexrd\core\transforms\new_capi\angles_to_gvec.c +hexrd\core\transforms\new_capi\gvec_to_xy.c hexrd\core\transforms\new_capi\gvec_to_xy.c +hexrd\core\transforms\new_capi\make_beam_rmat.c hexrd\core\transforms\new_capi\make_beam_rmat.c +hexrd\core\transforms\new_capi\make_binary_rmat.c hexrd\core\transforms\new_capi\make_binary_rmat.c +hexrd\core\transforms\new_capi\make_detector_rmat.c hexrd\core\transforms\new_capi\make_detector_rmat.c +hexrd\core\transforms\new_capi\make_rmat_of_expmap.c hexrd\core\transforms\new_capi\make_rmat_of_expmap.c +hexrd\core\transforms\new_capi\make_sample_rmat.c hexrd\core\transforms\new_capi\make_sample_rmat.c +hexrd\core\transforms\new_capi\module.c hexrd\core\transforms\new_capi\module.c +hexrd\core\transforms\new_capi\ndargs_helper.c hexrd\core\transforms\new_capi\ndargs_helper.c +hexrd\core\transforms\new_capi\ndargs_helper.h hexrd\core\transforms\new_capi\ndargs_helper.h +hexrd\core\transforms\new_capi\new_func.c hexrd\core\transforms\new_capi\new_func.c +hexrd\core\transforms\new_capi\oscill_angles_of_HKLs.c hexrd\core\transforms\new_capi\oscill_angles_of_HKLs.c +hexrd\core\transforms\new_capi\quat_distance.c hexrd\core\transforms\new_capi\quat_distance.c +hexrd\core\transforms\new_capi\README.md hexrd\core\transforms\new_capi\README.md +hexrd\core\transforms\new_capi\reference.py hexrd\core\transforms\new_capi\reference.py +hexrd\core\transforms\new_capi\rotate_vecs_about_axis.c hexrd\core\transforms\new_capi\rotate_vecs_about_axis.c +hexrd\core\transforms\new_capi\transforms_prototypes.h hexrd\core\transforms\new_capi\transforms_prototypes.h +hexrd\core\transforms\new_capi\transforms_types.h hexrd\core\transforms\new_capi\transforms_types.h +hexrd\core\transforms\new_capi\transforms_utils.h hexrd\core\transforms\new_capi\transforms_utils.h +hexrd\core\transforms\new_capi\unit_row_vector.c hexrd\core\transforms\new_capi\unit_row_vector.c +hexrd\core\transforms\new_capi\validate_angle_ranges.c hexrd\core\transforms\new_capi\validate_angle_ranges.c +hexrd\core\transforms\new_capi\xf_new_capi.py hexrd\core\transforms\new_capi\xf_new_capi.py +hexrd\core\transforms\new_capi\xy_to_gvec.c hexrd\core\transforms\new_capi\xy_to_gvec.c +hexrd\core\transforms\old_xfcapi.py hexrd\core\transforms\old_xfcapi.py +hexrd\core\transforms\stdbool.h hexrd\core\transforms\stdbool.h +hexrd\core\transforms\transforms_CAPI.c hexrd\core\transforms\transforms_CAPI.c +hexrd\core\transforms\transforms_CAPI.h hexrd\core\transforms\transforms_CAPI.h +hexrd\core\transforms\transforms_CFUNC.c hexrd\core\transforms\transforms_CFUNC.c +hexrd\core\transforms\transforms_CFUNC.h hexrd\core\transforms\transforms_CFUNC.h +hexrd\core\transforms\xf.py hexrd\core\transforms\xf.py +hexrd\core\transforms\xfcapi.py hexrd\core\transforms\xfcapi.py +hexrd\core\utils\__init__.py hexrd\core\utils\__init__.py +hexrd\core\utils\compatibility.py hexrd\core\utils\compatibility.py +hexrd\core\utils\concurrent.py hexrd\core\utils\concurrent.py +hexrd\core\utils\decorators.py hexrd\core\utils\decorators.py +hexrd\core\utils\hdf5.py hexrd\core\utils\hdf5.py +hexrd\core\utils\hkl.py hexrd\core\utils\hkl.py +hexrd\core\utils\json.py hexrd\core\utils\json.py +hexrd\core\utils\multiprocess_generic.py hexrd\core\utils\multiprocess_generic.py +hexrd\core\utils\profiler.py hexrd\core\utils\profiler.py +hexrd\core\utils\progressbar.py hexrd\core\utils\progressbar.py +hexrd\core\utils\warnings.py hexrd\core\utils\warnings.py +hexrd\core\utils\yaml.py hexrd\core\utils\yaml.py +hexrd\core\valunits.py hexrd\core\valunits.py +hexrd\hed\instrument\hedm_instrument.py hexrd\hed\instrument\hedm_instrument.py +hexrd\hed\xrdutil\phutil.py hexrd\hed\xrdutil\phutil.py +hexrd\hed\xrdutil\utils.py hexrd\hed\xrdutil\utils.py +hexrd\cli\preprocess.py hexrd\hedm\cli\preprocess.py +hexrd\module_map.py hexrd\module_map.py +hexrd\preprocess\argument_classes_factory.py hexrd\hedm\preprocess\argument_classes_factory.py +hexrd\preprocess\preprocessors.py hexrd\hedm\preprocess\preprocessors.py +hexrd\preprocess\profiles.py hexrd\hedm\preprocess\profiles.py +hexrd\preprocess\yaml_internals.py hexrd\hedm\preprocess\yaml_internals.py +hexrd\preprocess\__init__.py hexrd\hedm\preprocess\__init__.py +tests\test_preprocess.py tests\test_preprocess.py +tests\test_polar_view.py tests\test_polar_view.py +hexrd\transforms\new_capi hexrd\core\transforms\new_capi +hexrd\transforms\cpp_sublibrary hexrd\core\transforms\cpp_sublibrary +hexrd\transforms\cpp_sublibrary\src hexrd\core\transforms\cpp_sublibrary\src +hexrd\convolution\src hexrd\core\convolution\src +hexrd\distortion\dexela_2923_quad.py hexrd\core\distortion\dexela_2923_quad.py +hexrd\resources\instrument_templates\rigaku.hexrd hexrd\core\resources\instrument_templates\rigaku.hexrd +hexrd\transforms\cpp_sublibrary\src\transforms.cpp hexrd\core\transforms\cpp_sublibrary\src\transforms.cpp +hexrd\utils\panel_buffer.py hexrd\core\utils\panel_buffer.py +tests\test_distortion.py tests\test_distortion.py +tests\transforms\test_make_binary_rmat.py tests\transforms\test_make_binary_rmat.py diff --git a/hexrd/grainmap/tomoutil.py b/hexrd/grainmap/tomoutil.py deleted file mode 100644 index 1b55c976f..000000000 --- a/hexrd/grainmap/tomoutil.py +++ /dev/null @@ -1,178 +0,0 @@ -#%% - -import numpy as np -#import scipy as sp - -import scipy.ndimage as img -try: - import imageio as imgio -except(ImportError): - from skimage import io as imgio - - -import skimage.transform as xformimg - - - - - -#%% - - -def gen_bright_field(tbf_data_folder,tbf_img_start,tbf_num_imgs,nrows,ncols,stem='nf_',num_digits=5,ext='.tif'): - - - tbf_img_nums=np.arange(tbf_img_start,tbf_img_start+tbf_num_imgs,1) - - - tbf_stack=np.zeros([tbf_num_imgs,nrows,ncols]) - - print('Loading data for median bright field...') - for ii in np.arange(tbf_num_imgs): - print('Image #: ' + str(ii)) - tbf_stack[ii,:,:]=imgio.imread(tbf_data_folder+'%s'%(stem)+str(tbf_img_nums[ii]).zfill(num_digits)+ext) - #image_stack[ii,:,:]=np.flipud(tmp_img>threshold) - print('making median...') - - tbf=np.median(tbf_stack,axis=0) - - return tbf - - - -def gen_median_image(data_folder,img_start,num_imgs,nrows,ncols,stem='nf_',num_digits=5,ext='.tif'): - - - img_nums=np.arange(img_start,img_start+num_imgs,1) - - - stack=np.zeros([num_imgs,nrows,ncols]) - - print('Loading data for median image...') - for ii in np.arange(num_imgs): - print('Image #: ' + str(ii)) - stack[ii,:,:]=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext) - #image_stack[ii,:,:]=np.flipud(tmp_img>threshold) - print('making median...') - - med=np.median(stack,axis=0) - - return med - -def gen_attenuation_rads(tomo_data_folder,tbf,tomo_img_start,tomo_num_imgs,nrows,ncols,stem='nf_',num_digits=5,ext='.tif',tdf=None): - - - - #Reconstructs a single tompgrahy layer to find the extent of the sample - tomo_img_nums=np.arange(tomo_img_start,tomo_img_start+tomo_num_imgs,1) - - #if tdf==None: - if len(tdf) == None: - tdf=np.zeros([nrows,ncols]) - - rad_stack=np.zeros([tomo_num_imgs,nrows,ncols]) - - print('Loading and Calculating Absorption Radiographs ...') - for ii in np.arange(tomo_num_imgs): - print('Image #: ' + str(ii)) - tmp_img=imgio.imread(tomo_data_folder+'%s'%(stem)+str(tomo_img_nums[ii]).zfill(num_digits)+ext) - - rad_stack[ii,:,:]=-np.log((tmp_img.astype(float)-tdf)/(tbf.astype(float)-tdf)) - - return rad_stack - - -def tomo_reconstruct_layer(rad_stack,cross_sectional_dim,layer_row=1024,start_tomo_ang=0., end_tomo_ang=360.,tomo_num_imgs=360, center=0.,pixel_size=0.00148): - sinogram=np.squeeze(rad_stack[:,layer_row,:]) - - rotation_axis_pos=-int(np.round(center/pixel_size)) - #rotation_axis_pos=13 - - theta = np.linspace(start_tomo_ang, end_tomo_ang, tomo_num_imgs, endpoint=False) - - max_rad=int(cross_sectional_dim/pixel_size/2.*1.1) #10% slack to avoid edge effects - - if rotation_axis_pos>=0: - sinogram_cut=sinogram[:,2*rotation_axis_pos:] - else: - sinogram_cut=sinogram[:,:(2*rotation_axis_pos)] - - dist_from_edge=np.round(sinogram_cut.shape[1]/2.).astype(int)-max_rad - - sinogram_cut=sinogram_cut[:,dist_from_edge:-dist_from_edge] - - print('Inverting Sinogram....') - reconstruction_fbp = xformimg.iradon(sinogram_cut.T, theta=theta, circle=True) - - reconstruction_fbp=np.rot90(reconstruction_fbp,3)#Rotation to get the result consistent with hexrd, needs to be checked - - return reconstruction_fbp - - -def threshold_and_clean_tomo_layer(reconstruction_fbp,recon_thresh, noise_obj_size,min_hole_size,edge_cleaning_iter=None,erosion_iter=1,dilation_iter=4): - binary_recon=reconstruction_fbp>recon_thresh - - #hard coded cleaning, grinding sausage... - binary_recon=img.morphology.binary_dilation(binary_recon,iterations=dilation_iter) - binary_recon=img.morphology.binary_erosion(binary_recon,iterations=erosion_iter) - - - - labeled_img,num_labels=img.label(binary_recon) - - print('Cleaning...') - print('Removing Noise...') - for ii in np.arange(1,num_labels): - obj1=np.where(labeled_img==ii) - if obj1[0].shape[0]=1 and obj1[0].shape[0] radius*radius - - binary_recon_bin[mask]=0 - - return binary_recon_bin - - diff --git a/hexrd/grainmap/vtkutil.py b/hexrd/grainmap/vtkutil.py deleted file mode 100644 index 3af28e407..000000000 --- a/hexrd/grainmap/vtkutil.py +++ /dev/null @@ -1,126 +0,0 @@ -import numpy as np - -import os - - - -#%% - - -def output_grain_map_vtk(data_location,data_stems,output_stem,vol_spacing,top_down=True): - - - - num_scans=len(data_stems) - - confidence_maps=[None]*num_scans - grain_maps=[None]*num_scans - Xss=[None]*num_scans - Yss=[None]*num_scans - Zss=[None]*num_scans - - - for ii in np.arange(num_scans): - print('Loading Volume %d ....'%(ii)) - conf_data=np.load(os.path.join(data_location,data_stems[ii]+'_grain_map_data.npz')) - - confidence_maps[ii]=conf_data['confidence_map'] - grain_maps[ii]=conf_data['grain_map'] - Xss[ii]=conf_data['Xs'] - Yss[ii]=conf_data['Ys'] - Zss[ii]=conf_data['Zs'] - - #assumes all volumes to be the same size - num_layers=grain_maps[0].shape[0] - - total_layers=num_layers*num_scans - - num_rows=grain_maps[0].shape[1] - num_cols=grain_maps[0].shape[2] - - grain_map_stitched=np.zeros((total_layers,num_rows,num_cols)) - confidence_stitched=np.zeros((total_layers,num_rows,num_cols)) - Xs_stitched=np.zeros((total_layers,num_rows,num_cols)) - Ys_stitched=np.zeros((total_layers,num_rows,num_cols)) - Zs_stitched=np.zeros((total_layers,num_rows,num_cols)) - - - for i in np.arange(num_scans): - if top_down==True: - grain_map_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=grain_maps[num_scans-1-i] - confidence_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=confidence_maps[num_scans-1-i] - Xs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Xss[num_scans-1-i] - Zs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Zss[num_scans-1-i] - Ys_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Yss[num_scans-1-i]+vol_spacing*i - else: - - grain_map_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=grain_maps[i] - confidence_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=confidence_maps[i] - Xs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Xss[i] - Zs_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Zss[i] - Ys_stitched[((i)*num_layers):((i)*num_layers+num_layers),:,:]=Yss[i]+vol_spacing*i - - - - - print('Writing VTK data...') - # VTK Dump - Xslist=Xs_stitched[:,:,:].ravel() - Yslist=Ys_stitched[:,:,:].ravel() - Zslist=Zs_stitched[:,:,:].ravel() - - grainlist=grain_map_stitched[:,:,:].ravel() - conflist=confidence_stitched[:,:,:].ravel() - - num_pts=Xslist.shape[0] - num_cells=(total_layers-1)*(num_rows-1)*(num_cols-1) - - f = open(os.path.join(data_location, output_stem +'_stitch.vtk'), 'w') - - - f.write('# vtk DataFile Version 3.0\n') - f.write('grainmap Data\n') - f.write('ASCII\n') - f.write('DATASET UNSTRUCTURED_GRID\n') - f.write('POINTS %d double\n' % (num_pts)) - - for i in np.arange(num_pts): - f.write('%e %e %e \n' %(Xslist[i],Yslist[i],Zslist[i])) - - scale2=num_cols*num_rows - scale1=num_cols - - f.write('CELLS %d %d\n' % (num_cells, 9*num_cells)) - for k in np.arange(Xs_stitched.shape[0]-1): - for j in np.arange(Xs_stitched.shape[1]-1): - for i in np.arange(Xs_stitched.shape[2]-1): - base=scale2*k+scale1*j+i - p1=base - p2=base+1 - p3=base+1+scale1 - p4=base+scale1 - p5=base+scale2 - p6=base+scale2+1 - p7=base+scale2+scale1+1 - p8=base+scale2+scale1 - - f.write('8 %d %d %d %d %d %d %d %d \n' %(p1,p2,p3,p4,p5,p6,p7,p8)) - - - f.write('CELL_TYPES %d \n' % (num_cells)) - for i in np.arange(num_cells): - f.write('12 \n') - - f.write('POINT_DATA %d \n' % (num_pts)) - f.write('SCALARS grain_id int \n') - f.write('LOOKUP_TABLE default \n') - for i in np.arange(num_pts): - f.write('%d \n' %(grainlist[i])) - - f.write('FIELD FieldData 1 \n' ) - f.write('confidence 1 %d float \n' % (num_pts)) - for i in np.arange(num_pts): - f.write('%e \n' %(conflist[i])) - - - f.close() \ No newline at end of file diff --git a/hexrd/hed/instrument/__init__.py b/hexrd/hed/instrument/__init__.py new file mode 100644 index 000000000..396a0d078 --- /dev/null +++ b/hexrd/hed/instrument/__init__.py @@ -0,0 +1,13 @@ +from .hedm_instrument import ( + calc_angles_from_beam_vec, + calc_beam_vec, + centers_of_edge_vec, + GenerateEtaOmeMaps, + GrainDataWriter, + HEDMInstrument, + max_tth, + switch_xray_source, + unwrap_dict_to_h5, + unwrap_h5_to_dict, +) +from hexrd.core.instrument.detector import Detector diff --git a/hexrd/hed/xrdutil/__init__.py b/hexrd/hed/xrdutil/__init__.py new file mode 100644 index 000000000..15fb5638f --- /dev/null +++ b/hexrd/hed/xrdutil/__init__.py @@ -0,0 +1 @@ +from .utils import _project_on_detector_plane, _project_on_detector_cylinder diff --git a/hexrd/xrdutil/phutil.py b/hexrd/hed/xrdutil/phutil.py similarity index 80% rename from hexrd/xrdutil/phutil.py rename to hexrd/hed/xrdutil/phutil.py index 71f2f7e22..329496383 100644 --- a/hexrd/xrdutil/phutil.py +++ b/hexrd/hed/xrdutil/phutil.py @@ -12,10 +12,10 @@ import numpy as np from numba import njit -from hexrd import constants as ct -from hexrd.instrument import Detector -from hexrd.transforms import xfcapi -from hexrd.utils.concurrent import distribute_tasks +from hexrd.core import constants as ct +from hexrd.core.instrument import Detector +from hexrd.core.transforms import xfcapi +from hexrd.core.utils.concurrent import distribute_tasks class LayerDistortion: @@ -85,8 +85,7 @@ def apply(self, xy_pts, return_nominal=True): class JHEPinholeDistortion: - def __init__(self, panel, - pinhole_thickness, pinhole_radius): + def __init__(self, panel, pinhole_thickness, pinhole_radius): self._panel = panel self._pinhole_thickness = pinhole_thickness self._pinhole_radius = pinhole_radius @@ -117,11 +116,14 @@ def pinhole_radius(self, x): self._pinhole_radius = float(x) def apply(self, xy_pts, return_nominal=True): - """ - """ - return tth_corr_pinhole(self.panel, xy_pts, - self.pinhole_thickness, self.pinhole_radius, - return_nominal=return_nominal) + """ """ + return tth_corr_pinhole( + self.panel, + xy_pts, + self.pinhole_thickness, + self.pinhole_radius, + return_nominal=return_nominal, + ) # Make an alias to the name for backward compatibility @@ -129,8 +131,14 @@ def apply(self, xy_pts, return_nominal=True): class RyggPinholeDistortion: - def __init__(self, panel, absorption_length, - pinhole_thickness, pinhole_radius, num_phi_elements=60): + def __init__( + self, + panel, + absorption_length, + pinhole_thickness, + pinhole_radius, + num_phi_elements=60, + ): self.panel = panel self.absorption_length = absorption_length @@ -139,11 +147,15 @@ def __init__(self, panel, absorption_length, self.num_phi_elements = num_phi_elements def apply(self, xy_pts, return_nominal=True): - return tth_corr_rygg_pinhole(self.panel, self.absorption_length, - xy_pts, self.pinhole_thickness, - self.pinhole_radius, - return_nominal=return_nominal, - num_phi_elements=self.num_phi_elements) + return tth_corr_rygg_pinhole( + self.panel, + self.absorption_length, + xy_pts, + self.pinhole_thickness, + self.pinhole_radius, + return_nominal=return_nominal, + num_phi_elements=self.num_phi_elements, + ) def tth_corr_layer(panel, xy_pts, @@ -157,7 +169,7 @@ def tth_corr_layer(panel, xy_pts, Parameters ---------- - panel : hexrd.instrument.Detector + panel : hexrd.core.instrument.Detector A panel instance. xy_pts : array_like The (n, 2) array of n (x, y) coordinates to be transformed in the raw @@ -185,11 +197,11 @@ def tth_corr_layer(panel, xy_pts, xy_pts = np.atleast_2d(xy_pts) # !!! full z offset from center of pinhole to center of layer - zs = layer_standoff + 0.5*layer_thickness + 0.5*pinhole_thickness + zs = layer_standoff + 0.5 * layer_thickness + 0.5 * pinhole_thickness - ref_angs, _ = panel.cart_to_angles(xy_pts, - rmat_s=None, tvec_s=None, - tvec_c=None, apply_distortion=True) + ref_angs, _ = panel.cart_to_angles( + xy_pts, rmat_s=None, tvec_s=None, tvec_c=None, apply_distortion=True + ) ref_tth = ref_angs[:, 0] dhats = xfcapi.unit_vector(panel.cart_to_dvecs(xy_pts)) @@ -198,7 +210,9 @@ def tth_corr_layer(panel, xy_pts, cos_beta[np.arccos(cos_beta) > critical_beta] = np.nan cos_tthn = np.cos(ref_tth) sin_tthn = np.sin(ref_tth) - tth_corr = np.arctan(sin_tthn/(source_distance*cos_beta/zs - cos_tthn)) + tth_corr = np.arctan( + sin_tthn / (source_distance * cos_beta / zs - cos_tthn) + ) if return_nominal: return np.vstack([ref_tth - tth_corr, ref_angs[:, 1]]).T else: @@ -206,9 +220,12 @@ def tth_corr_layer(panel, xy_pts, return np.vstack([-tth_corr, ref_angs[:, 1]]).T -def invalidate_past_critical_beta(panel: Detector, xy_pts: np.ndarray, - pinhole_thickness: float, - pinhole_radius: float) -> None: +def invalidate_past_critical_beta( + panel: Detector, + xy_pts: np.ndarray, + pinhole_thickness: float, + pinhole_radius: float, +) -> None: """Set any xy_pts past critical beta to be nan""" # Compute the critical beta angle. Anything past this is invalid. critical_beta = np.arctan(2 * pinhole_radius / pinhole_thickness) @@ -227,7 +244,7 @@ def tth_corr_map_layer(instrument, Parameters ---------- - instrument : hexrd.instrument.HEDMInstrument + instrument : hexrd.core.instrument.HEDMInstrument The pionhole camera instrument object. layer_standoff : scalar The sample layer standoff from the upstream face of the pinhole @@ -257,7 +274,7 @@ def tth_corr_map_layer(instrument, # view. But that is something we could do in the future: # critical_beta = np.arctan(2 * pinhole_radius / pinhole_thickness) - zs = layer_standoff + 0.5*layer_thickness + 0.5*pinhole_thickness + zs = layer_standoff + 0.5 * layer_thickness + 0.5 * pinhole_thickness tth_corr = dict.fromkeys(instrument.detectors) for det_key, panel in instrument.detectors.items(): ref_ptth, _ = panel.pixel_angles() @@ -270,7 +287,7 @@ def tth_corr_map_layer(instrument, cos_tthn = np.cos(ref_ptth.flatten()) sin_tthn = np.sin(ref_ptth.flatten()) tth_corr[det_key] = np.arctan( - sin_tthn/(instrument.source_distance*cos_beta/zs - cos_tthn) + sin_tthn / (instrument.source_distance * cos_beta / zs - cos_tthn) ).reshape(panel.shape) return tth_corr @@ -291,7 +308,7 @@ def tth_corr_pinhole(panel, xy_pts, Parameters ---------- - panel : hexrd.instrument.Detector + panel : hexrd.core.instrument.Detector A detector instance. xy_pts : array_like The (n, 2) array of n (x, y) coordinates to be transformed in the raw @@ -319,17 +336,13 @@ def tth_corr_pinhole(panel, xy_pts, cp_det = copy.deepcopy(panel) cp_det.bvec = np.sign(cp_det.bvec[2])*ct.beam_vec # !!! [0, 0, -1] ref_angs, _ = cp_det.cart_to_angles( - xy_pts, - rmat_s=None, tvec_s=None, - tvec_c=None, apply_distortion=True + xy_pts, rmat_s=None, tvec_s=None, tvec_c=None, apply_distortion=True ) ref_eta = ref_angs[:, 1] # These are the nominal tth values nom_angs, _ = panel.cart_to_angles( - xy_pts, - rmat_s=None, tvec_s=None, - tvec_c=None, apply_distortion=True + xy_pts, rmat_s=None, tvec_s=None, tvec_c=None, apply_distortion=True ) nom_tth = nom_angs[:, 0] @@ -337,8 +350,8 @@ def tth_corr_pinhole(panel, xy_pts, for i, (pxy, reta) in enumerate(zip(xy_pts, ref_eta)): # !!! JHE used pinhole center, but the back surface # seems to hew a bit closer to JRR's solution - origin = -pinhole_radius*np.array( - [np.cos(reta), np.sin(reta), 0.5*pinhole_thickness] + origin = -pinhole_radius * np.array( + [np.cos(reta), np.sin(reta), 0.5 * pinhole_thickness] ) angs, _ = panel.cart_to_angles(np.atleast_2d(pxy), tvec_c=origin) pin_tth[i] = angs[:, 0] @@ -356,7 +369,7 @@ def tth_corr_map_pinhole(instrument, pinhole_thickness, pinhole_radius): Parameters ---------- - instrument : hexrd.instrument.HEDMInstrument + instrument : hexrd.core.instrument.HEDMInstrument The pionhole camera instrument object. pinhole_thickness : scalar The thickenss (height) of the pinhole (cylinder) in mm @@ -391,8 +404,8 @@ def tth_corr_map_pinhole(instrument, pinhole_thickness, pinhole_radius): for i, (pxy, reta) in enumerate(zip(pcrds, ref_peta)): # !!! JHE used pinhole center, but the back surface # seems to hew a bit closer to JRR's solution - origin = -pinhole_radius*np.array( - [np.cos(reta), np.sin(reta), 0.5*pinhole_thickness] + origin = -pinhole_radius * np.array( + [np.cos(reta), np.sin(reta), 0.5 * pinhole_thickness] ) angs, _ = panel.cart_to_angles(np.atleast_2d(pxy), tvec_c=origin) new_ptth[i] = angs[:, 0] @@ -405,10 +418,10 @@ def calc_phi_x(bvec, eHat_l): returns phi_x in RADIANS """ bv = np.array(bvec) - bv[2] = 0. + bv[2] = 0.0 bv_norm = np.linalg.norm(bv) if np.isclose(bv_norm, 0): - return 0. + return 0.0 else: bv = bv / bv_norm return np.arccos(np.dot(bv, -eHat_l)).item() @@ -492,9 +505,16 @@ def _infer_eta_shift(panel): return eta_shift_dict[instr_type] -def calc_tth_rygg_pinhole(panels, absorption_length, tth, eta, - pinhole_thickness, pinhole_radius, - num_phi_elements=60, clip_to_panel=True): +def calc_tth_rygg_pinhole( + panels, + absorption_length, + tth, + eta, + pinhole_thickness, + pinhole_radius, + num_phi_elements=60, + clip_to_panel=True, +): """Return pinhole twotheta [rad] and effective scattering volume [mm3]. num_phi_elements: number of pinhole phi elements for integration @@ -545,8 +565,9 @@ def calc_tth_rygg_pinhole(panels, absorption_length, tth, eta, mu_p = 1000 * mu_p # convert to [mm^-1] # Convert tth and eta to phi_d, beta, and r_d - dvec_arg = np.vstack((tth.flatten(), eta.flatten(), - np.zeros(np.prod(eta.shape)))) + dvec_arg = np.vstack( + (tth.flatten(), eta.flatten(), np.zeros(np.prod(eta.shape))) + ) dvectors = xfcapi.angles_to_dvec(dvec_arg.T, bvec, eta_vec=eHat_l) v0 = np.array([0, 0, 1]) @@ -578,7 +599,7 @@ def calc_tth_rygg_pinhole(panels, absorption_length, tth, eta, dvecs = panel.cart_to_dvecs(cart) full_dvecs = dvecs.T.reshape(3, *tth.shape).T - panel_r_d = np.sqrt(np.sum((full_dvecs)**2, axis=2)).T + panel_r_d = np.sqrt(np.sum((full_dvecs) ** 2, axis=2)).T # Only overwrite positions that are still nan on r_d r_d[np.isnan(r_d)] = panel_r_d[np.isnan(r_d)] @@ -605,24 +626,28 @@ def calc_tth_rygg_pinhole(panels, absorption_length, tth, eta, phi_vec = np.arange(dphi / 2, 2 * np.pi, dphi) # includes elements for X and D edges - z_vec = np.arange(-h_p/2 - dz/2, h_p/2 + dz/1.999, dz) - z_vec[0] = -h_p/2 # X-side edge (negative z) - z_vec[-1] = h_p/2 # D-side edge (positive z) + z_vec = np.arange(-h_p / 2 - dz / 2, h_p / 2 + dz / 1.999, dz) + z_vec[0] = -h_p / 2 # X-side edge (negative z) + z_vec[-1] = h_p / 2 # D-side edge (positive z) phi_i, z_i = np.meshgrid(phi_vec, z_vec) # [Nz x Np] - phi_i = phi_i[:, :, None, None] # [Nz x Np x 1 x 1] - z_i = z_i[:, :, None, None] # axes 0,1 => P; axes 2,3 => D + phi_i = phi_i[:, :, None, None] # [Nz x Np x 1 x 1] + z_i = z_i[:, :, None, None] # axes 0,1 => P; axes 2,3 => D # ------ calculate twotheta_i [a.k.a. qq_i], for each grid element ------ - bx, bd = (d_p / (2 * r_x), d_p / (2 * r_d)) + bx, bd = (d_p / (2 * r_x), d_p / (2 * r_d)) sin_a, cos_a, tan_a = np.sin(alpha), np.cos(alpha), np.tan(alpha) - sin_b, cos_b, tan_b = np.sin(beta), np.cos(beta), np.tan(beta) + sin_b, cos_b, tan_b = np.sin(beta), np.cos(beta), np.tan(beta) sin_phii, cos_phii = np.sin(phi_i), np.cos(phi_i) cos_dphi_x = np.cos(phi_i - phi_x + np.pi) # [Nz x Np x Nu x Nv] - alpha_i = np.arctan2(np.sqrt(sin_a**2 + 2*bx*sin_a*cos_dphi_x + bx**2), - cos_a + z_i/r_x) - phi_xi = np.arctan2(sin_a * np.sin(phi_x) - bx*sin_phii, - sin_a * np.cos(phi_x) - bx * cos_phii) + alpha_i = np.arctan2( + np.sqrt(sin_a**2 + 2 * bx * sin_a * cos_dphi_x + bx**2), + cos_a + z_i / r_x, + ) + phi_xi = np.arctan2( + sin_a * np.sin(phi_x) - bx * sin_phii, + sin_a * np.cos(phi_x) - bx * cos_phii, + ) # !!! This section used 4D arrays before, which was very time consuming # for large grids. Instead, we now loop over the columns and do them @@ -716,26 +741,51 @@ def _compute_qq_p(use_numba=True, *args, **kwargs): with np.errstate(divide='ignore', invalid='ignore'): # Ignore the errors this will inevitably produce - return np.nansum(V_i * qq_i, - axis=(0, 1)) / V_p # [Nu x Nv] <= detector - - -def _compute_vi_qq_i(phi_d, sin_b, bd, sin_phii, cos_phii, alpha_i, phi_xi, - sin_a, cos_dphi_x, cos_a, cos_b, dV_s, dV_e, z_i, h_p, - d_p, tan_b, tan_a, phi_i, r_d): + return ( + np.nansum(V_i * qq_i, axis=(0, 1)) / V_p + ) # [Nu x Nv] <= detector + + +def _compute_vi_qq_i( + phi_d, + sin_b, + bd, + sin_phii, + cos_phii, + alpha_i, + phi_xi, + sin_a, + cos_dphi_x, + cos_a, + cos_b, + dV_s, + dV_e, + z_i, + h_p, + d_p, + tan_b, + tan_a, + phi_i, + r_d, +): # This function can be numbafied, and has a numbafied version below. # Compute V_i and qq_i cos_dphi_d = np.cos(phi_i - phi_d + np.pi) - beta_i = np.arctan2(np.sqrt(sin_b**2 + 2*bd*sin_b*cos_dphi_d + bd**2), - cos_b - z_i/r_d) + beta_i = np.arctan2( + np.sqrt(sin_b**2 + 2 * bd * sin_b * cos_dphi_d + bd**2), + cos_b - z_i / r_d, + ) - phi_di = np.arctan2(sin_b * np.sin(phi_d) - bd*sin_phii, - sin_b * np.cos(phi_d) - bd * cos_phii) + phi_di = np.arctan2( + sin_b * np.sin(phi_d) - bd * sin_phii, + sin_b * np.cos(phi_d) - bd * cos_phii, + ) - arg = (np.cos(alpha_i) * np.cos(beta_i) - np.sin(alpha_i) * - np.sin(beta_i) * np.cos(phi_di - phi_xi)) + arg = np.cos(alpha_i) * np.cos(beta_i) - np.sin(alpha_i) * np.sin( + beta_i + ) * np.cos(phi_di - phi_xi) # scattering angle for each P to each D qq_i = np.arccos(np.clip(arg, -1, 1)) @@ -755,12 +805,14 @@ def _compute_vi_qq_i(phi_d, sin_b, bd, sin_phii, cos_phii, alpha_i, phi_xi, # ------ visibility of each grid element ------ # pinhole surface - is_seen = np.logical_and(z_i > h_p/2 - d_p/tan_b * cos_dphi_d, - z_i < -h_p/2 + d_p/tan_a * cos_dphi_x) + is_seen = np.logical_and( + z_i > h_p / 2 - d_p / tan_b * cos_dphi_d, + z_i < -h_p / 2 + d_p / tan_a * cos_dphi_x, + ) # X-side edge - is_seen[0] = np.where(h_p/d_p * tan_b < cos_dphi_d[0], 1, 0) + is_seen[0] = np.where(h_p / d_p * tan_b < cos_dphi_d[0], 1, 0) # D-side edge - is_seen[-1] = np.where(h_p/d_p * tan_a < cos_dphi_x[-1], 1, 0) + is_seen[-1] = np.where(h_p / d_p * tan_a < cos_dphi_x[-1], 1, 0) # ------ weighted sum over elements to obtain average ------ V_i *= is_seen # zero weight to elements with no view of both X and D @@ -768,25 +820,35 @@ def _compute_vi_qq_i(phi_d, sin_b, bd, sin_phii, cos_phii, alpha_i, phi_xi, # The numba version (works better in conjunction with multi-threading) -_compute_vi_qq_i_numba = njit( - nogil=True, cache=True)(_compute_vi_qq_i) - - -def tth_corr_rygg_pinhole(panel, absorption_length, xy_pts, - pinhole_thickness, pinhole_radius, - return_nominal=True, num_phi_elements=60): +_compute_vi_qq_i_numba = njit(nogil=True, cache=True)(_compute_vi_qq_i) + + +def tth_corr_rygg_pinhole( + panel, + absorption_length, + xy_pts, + pinhole_thickness, + pinhole_radius, + return_nominal=True, + num_phi_elements=60, +): # These are the nominal tth values nom_angs, _ = panel.cart_to_angles( - xy_pts, - rmat_s=None, tvec_s=None, - tvec_c=None, apply_distortion=True + xy_pts, rmat_s=None, tvec_s=None, tvec_c=None, apply_distortion=True ) nom_tth, nom_eta = nom_angs[:, :2].T # Don't clip these values to the panel because they will be shifted qq_p = calc_tth_rygg_pinhole( - panel, absorption_length, nom_tth, nom_eta, pinhole_thickness, - pinhole_radius, num_phi_elements, clip_to_panel=False) + panel, + absorption_length, + nom_tth, + nom_eta, + pinhole_thickness, + pinhole_radius, + num_phi_elements, + clip_to_panel=False, + ) # Make the distortion shift to the left instead of the right # FIXME: why is qq_p shifting the data to the right instead of the left? @@ -805,23 +867,49 @@ def tth_corr_rygg_pinhole(panel, absorption_length, xy_pts, return angs -def tth_corr_map_rygg_pinhole(instrument, absorption_length, pinhole_thickness, - pinhole_radius, num_phi_elements=60): +def tth_corr_map_rygg_pinhole( + instrument, + absorption_length, + pinhole_thickness, + pinhole_radius, + num_phi_elements=60, +): tth_corr = {} for det_key, panel in instrument.detectors.items(): nom_ptth, nom_peta = panel.pixel_angles() qq_p = calc_tth_rygg_pinhole( - panel, absorption_length, nom_ptth, nom_peta, pinhole_thickness, - pinhole_radius, num_phi_elements) + panel, + absorption_length, + nom_ptth, + nom_peta, + pinhole_thickness, + pinhole_radius, + num_phi_elements, + ) tth_corr[det_key] = nom_ptth - qq_p return tth_corr -def polar_tth_corr_map_rygg_pinhole(tth, eta, instrument, absorption_length, - pinhole_thickness, pinhole_radius, - num_phi_elements=60): +def polar_tth_corr_map_rygg_pinhole( + tth, + eta, + instrument, + absorption_length, + pinhole_thickness, + pinhole_radius, + num_phi_elements=60, +): """Generate a polar tth corr map directly for all panels""" panels = list(instrument.detectors.values()) - return calc_tth_rygg_pinhole(panels, absorption_length, tth, eta, - pinhole_thickness, pinhole_radius, - num_phi_elements) - tth + return ( + calc_tth_rygg_pinhole( + panels, + absorption_length, + tth, + eta, + pinhole_thickness, + pinhole_radius, + num_phi_elements, + ) + - tth + ) diff --git a/hexrd/hed/xrdutil/utils.py b/hexrd/hed/xrdutil/utils.py new file mode 100644 index 000000000..52d86bb3e --- /dev/null +++ b/hexrd/hed/xrdutil/utils.py @@ -0,0 +1,426 @@ +#! /usr/bin/env python3 +# ============================================================ +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ + + +# TODO: Resolve extra-workflow dependency +from hexrd.core.distortion.distortionabc import DistortionABC + +import numpy as np + +from hexrd.core import constants +from hexrd.core.material.crystallography import processWavelength, PlaneData +from hexrd.core.transforms import xfcapi + + +simlp = 'hexrd.hedm.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +distortion_key = 'distortion' + +d2r = piby180 = constants.d2r +r2d = constants.r2d + +epsf = constants.epsf # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 +sqrt_epsf = constants.sqrt_epsf # ~1.5e-8 + +bHat_l_DFLT = constants.beam_vec.flatten() +eHat_l_DFLT = constants.eta_vec.flatten() + +nans_1x2 = np.nan * np.ones((1, 2)) + +# ============================================================================= +# FUNCTIONS +# ============================================================================= + +validateAngleRanges = xfcapi.validate_angle_ranges + +def _project_on_detector_plane( + allAngs: np.ndarray, + rMat_d: np.ndarray, + rMat_c: np.ndarray, + chi: float, + tVec_d: np.ndarray, + tVec_c: np.ndarray, + tVec_s: np.ndarray, + distortion: DistortionABC, + beamVec: np.ndarray = constants.beam_vec, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + utility routine for projecting a list of (tth, eta, ome) onto the + detector plane parameterized by the args + """ + gVec_cs = xfcapi.angles_to_gvec( + allAngs, chi=chi, rmat_c=rMat_c, beam_vec=beamVec + ) + + rMat_ss = xfcapi.make_sample_rmat(chi, allAngs[:, 2]) + + tmp_xys = xfcapi.gvec_to_xy( + gVec_cs, + rMat_d, + rMat_ss, + rMat_c, + tVec_d, + tVec_s, + tVec_c, + beam_vec=beamVec, + ) + + valid_mask = ~(np.isnan(tmp_xys[:, 0]) | np.isnan(tmp_xys[:, 1])) + + det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) + + # apply distortion if specified + if distortion is not None: + det_xy = distortion.apply_inverse(det_xy) + + return det_xy, rMat_ss, valid_mask + + +def _project_on_detector_cylinder( + allAngs: np.ndarray, + chi: float, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + distortion: DistortionABC = None, + beamVec: np.ndarray = constants.beam_vec, + etaVec: np.ndarray = constants.eta_vec, + tVec_s: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, + tVec_c: np.ndarray = constants.zeros_3x1, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + utility routine for projecting a list of (tth, eta, ome) onto the + detector plane parameterized by the args. this function does the + computation for a cylindrical detector + """ + dVec_cs = xfcapi.angles_to_dvec( + allAngs, chi=chi, rmat_c=np.eye(3), beam_vec=beamVec, eta_vec=etaVec + ) + + rMat_ss = np.tile(rmat_s, [allAngs.shape[0], 1, 1]) + + tmp_xys, valid_mask = _dvecToDetectorXYcylinder( + dVec_cs, + tVec_d, + caxis, + paxis, + radius, + physical_size, + angle_extent, + tVec_s=tVec_s, + rmat_s=rmat_s, + tVec_c=tVec_c, + ) + + det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) + + # apply distortion if specified + if distortion is not None: + det_xy = distortion.apply_inverse(det_xy) + + return det_xy, rMat_ss, valid_mask + + +def _unitvec_to_cylinder( + uvw: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + tvec: np.ndarray, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> np.ndarray: + """ + get point where unitvector uvw + intersect the cylindrical detector. + this will give points which are + outside the actual panel. the points + will be clipped to the panel later + + Parameters + ---------- + uvw : numpy.ndarray + unit vectors stacked row wise (nx3) shape + + Returns + ------- + numpy.ndarray + (x,y,z) vectors point which intersect with + the cylinder with (nx3) shape + """ + naxis = np.cross(caxis, paxis) + naxis = naxis / np.linalg.norm(naxis) + + tvec_c_l = np.dot(rmat_s, tVec_c) + + delta = tvec - (radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l)) + num = uvw.shape[0] + cx = np.atleast_2d(caxis).T + + delta_t = np.tile(delta, [num, 1]) + + t1 = np.dot(uvw, delta.T) + t2 = np.squeeze(np.dot(uvw, cx)) + t3 = np.squeeze(np.dot(delta, cx)) + t4 = np.dot(uvw, cx) + + A = np.squeeze(1 - t4**2) + B = t1 - t2 * t3 + C = radius**2 - np.linalg.norm(delta) ** 2 + t3**2 + + mask = np.abs(A) < 1e-10 + beta = np.zeros( + [ + num, + ] + ) + + beta[~mask] = (B[~mask] + np.sqrt(B[~mask] ** 2 + A[~mask] * C)) / A[~mask] + + beta[mask] = np.nan + return np.tile(beta, [3, 1]).T * uvw + + +def _clip_to_cylindrical_detector( + uvw: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> tuple[np.ndarray, np.ndarray]: + """ + takes in the intersection points uvw + with the cylindrical detector and + prunes out points which don't actually + hit the actual panel + + Parameters + ---------- + uvw : numpy.ndarray + unit vectors stacked row wise (nx3) shape + + Returns + ------- + numpy.ndarray + (x,y,z) vectors point which fall on panel + with (mx3) shape + """ + # first get rid of points which are above + # or below the detector + naxis = np.cross(caxis, paxis) + num = uvw.shape[0] + + cx = np.atleast_2d(caxis).T + nx = np.atleast_2d(naxis).T + + tvec_c_l = np.dot(rmat_s, tVec_c) + + delta = tVec_d - ( + radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) + ) + + delta_t = np.tile(delta, [num, 1]) + + uvwp = uvw - delta_t + dp = np.dot(uvwp, cx) + + uvwpxy = uvwp - np.tile(dp, [1, 3]) * np.tile(cx, [1, num]).T + + size = physical_size + tvec = np.atleast_2d(tVec_d).T + + # ycomp = uvwp - np.tile(tVec_d,[num, 1]) + mask1 = np.squeeze(np.abs(dp) > size[0] * 0.5) + uvwp[mask1, :] = np.nan + + # next get rid of points that fall outside + # the polar angle range + + ang = np.dot(uvwpxy, nx) / radius + ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) + + ang = np.arccos(ang) + mask2 = np.squeeze(ang >= angle_extent) + mask = np.logical_or(mask1, mask2) + res = uvw.copy() + res[mask, :] = np.nan + + return res, ~mask + + +def _dewarp_from_cylinder( + uvw: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +): + """ + routine to convert cylindrical coordinates + to cartesian coordinates in image frame + """ + naxis = np.cross(caxis, paxis) + naxis = naxis / np.linalg.norm(naxis) + + cx = np.atleast_2d(caxis).T + px = np.atleast_2d(paxis).T + nx = np.atleast_2d(naxis).T + num = uvw.shape[0] + + tvec_c_l = np.dot(rmat_s, tVec_c) + + delta = tVec_d - ( + radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) + ) + + delta_t = np.tile(delta, [num, 1]) + + uvwp = uvw - delta_t + + uvwpxy = uvwp - np.tile(np.dot(uvwp, cx), [1, 3]) * np.tile(cx, [1, num]).T + + sgn = np.sign(np.dot(uvwpxy, px)) + sgn[sgn == 0.0] = 1.0 + ang = np.dot(uvwpxy, nx) / radius + ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) + ang = np.arccos(ang) + xcrd = np.squeeze(radius * ang * sgn) + ycrd = np.squeeze(np.dot(uvwp, cx)) + return np.vstack((xcrd, ycrd)).T + + +def _warp_to_cylinder( + cart: np.ndarray, + tVec_d: np.ndarray, + radius: float, + caxis: np.ndarray, + paxis: np.ndarray, + tVec_s: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, + tVec_c: np.ndarray = constants.zeros_3x1, + normalize: bool = True, +) -> np.ndarray: + """ + routine to convert cartesian coordinates + in image frame to cylindrical coordinates + """ + tvec = np.atleast_2d(tVec_d).T + if tVec_s.ndim == 1: + tVec_s = np.atleast_2d(tVec_s).T + if tVec_c.ndim == 1: + tVec_c = np.atleast_2d(tVec_c).T + num = cart.shape[0] + naxis = np.cross(paxis, caxis) + x = cart[:, 0] + y = cart[:, 1] + th = x / radius + xp = radius * np.sin(th) + xn = radius * (1 - np.cos(th)) + + ccomp = np.tile(y, [3, 1]).T * np.tile(caxis, [num, 1]) + pcomp = np.tile(xp, [3, 1]).T * np.tile(paxis, [num, 1]) + ncomp = np.tile(xn, [3, 1]).T * np.tile(naxis, [num, 1]) + cart3d = pcomp + ccomp + ncomp + + tVec_c_l = np.dot(rmat_s, tVec_c) + + res = cart3d + np.tile(tvec - tVec_s - tVec_c_l, [1, num]).T + + if normalize: + return res / np.tile(np.linalg.norm(res, axis=1), [3, 1]).T + else: + return res + + +def _dvecToDetectorXYcylinder( + dVec_cs: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> tuple[np.ndarray, np.ndarray]: + + cvec = _unitvec_to_cylinder( + dVec_cs, + caxis, + paxis, + radius, + tVec_d, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + cvec_det, valid_mask = _clip_to_cylindrical_detector( + cvec, + tVec_d, + caxis, + paxis, + radius, + physical_size, + angle_extent, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + xy_det = _dewarp_from_cylinder( + cvec_det, + tVec_d, + caxis, + paxis, + radius, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + return xy_det, valid_mask diff --git a/hexrd/cli/__init__.py b/hexrd/hedm/cli/__init__.py similarity index 99% rename from hexrd/cli/__init__.py rename to hexrd/hedm/cli/__init__.py index 09b5763f7..634ae5cd1 100644 --- a/hexrd/cli/__init__.py +++ b/hexrd/hedm/cli/__init__.py @@ -7,5 +7,4 @@ # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. - # from .main import main diff --git a/hexrd/cli/documentation.py b/hexrd/hedm/cli/documentation.py similarity index 99% rename from hexrd/cli/documentation.py rename to hexrd/hedm/cli/documentation.py index 69c2b481a..4ac2559aa 100644 --- a/hexrd/cli/documentation.py +++ b/hexrd/hedm/cli/documentation.py @@ -12,4 +12,5 @@ def execute(args, parser): import webbrowser import hexrd + webbrowser.open_new_tab(hexrd.doc_url) diff --git a/hexrd/cli/find_orientations.py b/hexrd/hedm/cli/find_orientations.py similarity index 71% rename from hexrd/cli/find_orientations.py rename to hexrd/hedm/cli/find_orientations.py index 7b3792b8d..8e299d08e 100644 --- a/hexrd/cli/find_orientations.py +++ b/hexrd/hedm/cli/find_orientations.py @@ -6,11 +6,14 @@ import numpy as np -from hexrd import constants as const -from hexrd import config -from hexrd import instrument -from hexrd.transforms import xfcapi -from hexrd.findorientations import find_orientations, write_scored_orientations +from hexrd.core import constants as const +from hexrd.hedm import config +from hexrd.core import instrument +from hexrd.core.transforms import xfcapi +from hexrd.hedm.findorientations import ( + find_orientations, + write_scored_orientations, +) descr = 'Process rotation image series to find grain orientations' @@ -22,36 +25,42 @@ def configure_parser(sub_parsers): p = sub_parsers.add_parser( - 'find-orientations', - description=descr, - help=descr - ) - p.add_argument( - 'yml', type=str, - help='YAML configuration file' - ) + 'find-orientations', description=descr, help=descr + ) + p.add_argument('yml', type=str, help='YAML configuration file') p.add_argument( - '-q', '--quiet', action='store_true', - help="don't report progress in terminal" - ) + '-q', + '--quiet', + action='store_true', + help="don't report progress in terminal", + ) p.add_argument( - '-f', '--force', action='store_true', - help='overwrites existing analysis' - ) + '-f', + '--force', + action='store_true', + help='overwrites existing analysis', + ) p.add_argument( - '-c', '--clean', action='store_true', - help='overwrites existing analysis, including maps' - ) + '-c', + '--clean', + action='store_true', + help='overwrites existing analysis, including maps', + ) p.add_argument( - '--hkls', metavar='HKLs', type=str, default=None, + '--hkls', + metavar='HKLs', + type=str, + default=None, help="""\ list hkl entries in the materials file to use for fitting; - if None, defaults to list specified in the yml file""" - ) + if None, defaults to list specified in the yml file""", + ) p.add_argument( - '-p', '--profile', action='store_true', + '-p', + '--profile', + action='store_true', help='runs the analysis with cProfile enabled', - ) + ) p.set_defaults(func=execute) @@ -61,16 +70,15 @@ def write_results(results, cfg): # Write accepted orientations. qbar_filename = str(cfg.find_orientations.accepted_orientations_file) - np.savetxt(qbar_filename, results['qbar'].T, - fmt='%.18e', delimiter='\t') + np.savetxt(qbar_filename, results['qbar'].T, fmt='%.18e', delimiter='\t') # Write grains.out. gw = instrument.GrainDataWriter(cfg.find_orientations.grains_file) for gid, q in enumerate(results['qbar'].T): - phi = 2*np.arccos(q[0]) + phi = 2 * np.arccos(q[0]) n = xfcapi.unit_vector(q[1:]) - grain_params = np.hstack([phi*n, const.zeros_3, const.identity_6x1]) - gw.dump_grain(gid, 1., 0., grain_params) + grain_params = np.hstack([phi * n, const.zeros_3, const.identity_6x1]) + gw.dump_grain(gid, 1.0, 0.0, grain_params) gw.close() @@ -93,7 +101,7 @@ def execute(args, parser): ch.setLevel(logging.CRITICAL if args.quiet else log_level) ch.setFormatter( logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S') - ) + ) logger.addHandler(ch) logger.info('=== begin find-orientations ===') @@ -106,7 +114,7 @@ def execute(args, parser): if (quats_f.exists()) and not (args.force or args.clean): logger.error( '%s already exists. Change yml file or specify "force" or "clean"', - quats_f + quats_f, ) sys.exit() @@ -119,10 +127,9 @@ def execute(args, parser): fh.setLevel(log_level) fh.setFormatter( logging.Formatter( - '%(asctime)s - %(name)s - %(message)s', - '%m-%d %H:%M:%S' - ) + '%(asctime)s - %(name)s - %(message)s', '%m-%d %H:%M:%S' ) + ) logger.info("logging to %s", logfile) logger.addHandler(fh) @@ -130,15 +137,13 @@ def execute(args, parser): import cProfile as profile import pstats from io import StringIO + pr = profile.Profile() pr.enable() # process the data results = find_orientations( - cfg, - hkls=args.hkls, - clean=args.clean, - profile=args.profile + cfg, hkls=args.hkls, clean=args.clean, profile=args.profile ) # Write out the results diff --git a/hexrd/cli/fit_grains.py b/hexrd/hedm/cli/fit_grains.py similarity index 82% rename from hexrd/cli/fit_grains.py rename to hexrd/hedm/cli/fit_grains.py index 615807f0e..3860b60a5 100644 --- a/hexrd/cli/fit_grains.py +++ b/hexrd/hedm/cli/fit_grains.py @@ -5,13 +5,14 @@ import numpy as np -from hexrd import config -from hexrd import constants as cnst -from hexrd import rotations -from hexrd import instrument -from hexrd.findorientations import find_orientations -from hexrd.fitgrains import fit_grains -from hexrd.transforms import xfcapi +from hexrd.hedm import config +from hexrd.core import config +from hexrd.core import constants as cnst +from hexrd.core import rotations +from hexrd.core import instrument +from hexrd.hedm.findorientations import find_orientations +from hexrd.hedm.fitgrains import fit_grains +from hexrd.core.transforms import xfcapi descr = 'Extracts G vectors, grain position and strain' @@ -21,7 +22,13 @@ """ _flds = [ - "id", "completeness", "chisq", "expmap", "centroid", "inv_Vs", "ln_Vs" + "id", + "completeness", + "chisq", + "expmap", + "centroid", + "inv_Vs", + "ln_Vs", ] _BaseGrainData = namedtuple("_BaseGrainData", _flds) del _flds @@ -31,7 +38,7 @@ class GrainData(_BaseGrainData): """Simple class for storing grain output data To read the grains file, use the `load` method, like this: - > from hexrd.fitgrains import GrainData + > from hexrd.hedm.fitgrains import GrainData > gd = GrainData.load("grains.npz") """ @@ -64,7 +71,7 @@ def from_grains_out(cls, fname): def from_array(cls, a): """Return GrainData instance from numpy datatype array""" return cls( - id=a[:,0].astype(int), + id=a[:, 0].astype(int), completeness=a[:, 1], chisq=a[:, 2], expmap=a[:, 3:6], @@ -92,13 +99,12 @@ def num_grains(self): @property def quaternions(self): - """Return quaternions as array(num_grains, 4). - """ + """Return quaternions as array(num_grains, 4).""" return rotations.quatOfExpMap(self.expmap.T).T @property def rotation_matrices(self): - """"Return rotation matrices from exponential map parameters""" + """ "Return rotation matrices from exponential map parameters""" # # Compute the rotation matrices only once, the first time this is # called, and save the results. @@ -139,44 +145,55 @@ def select(self, min_completeness=0.0, max_chisq=None): sel = sel_comp & (self.chisq <= max_chisq) if has_chisq else sel_comp return __class__( - self.id[sel], self.completeness[sel], self.chisq[sel], - self.expmap[sel], self.centroid[sel], self.inv_Vs[sel], - self.ln_Vs[sel] + self.id[sel], + self.completeness[sel], + self.chisq[sel], + self.expmap[sel], + self.centroid[sel], + self.inv_Vs[sel], + self.ln_Vs[sel], ) def configure_parser(sub_parsers): p = sub_parsers.add_parser('fit-grains', description=descr, help=descr) + p.add_argument('yml', type=str, help='YAML configuration file') p.add_argument( - 'yml', type=str, - help='YAML configuration file' - ) + '-g', + '--grains', + type=str, + default=None, + help="comma-separated list of IDs to refine, defaults to all", + ) p.add_argument( - '-g', '--grains', type=str, default=None, - help="comma-separated list of IDs to refine, defaults to all" - ) - p.add_argument( - '-q', '--quiet', action='store_true', - help="don't report progress in terminal" - ) + '-q', + '--quiet', + action='store_true', + help="don't report progress in terminal", + ) p.add_argument( - '-c', '--clean', action='store_true', - help='overwrites existing analysis, uses initial orientations' - ) + '-c', + '--clean', + action='store_true', + help='overwrites existing analysis, uses initial orientations', + ) p.add_argument( - '-f', '--force', action='store_true', - help='overwrites existing analysis' - ) + '-f', + '--force', + action='store_true', + help='overwrites existing analysis', + ) p.add_argument( - '-p', '--profile', action='store_true', + '-p', + '--profile', + action='store_true', help='runs the analysis with cProfile enabled', - ) + ) p.set_defaults(func=execute) def write_results( - fit_results, cfg, - grains_filename='grains.out', grains_npz='grains.npz' + fit_results, cfg, grains_filename='grains.out', grains_npz='grains.npz' ): instr = cfg.instrument.hedm nfit = len(fit_results) @@ -186,7 +203,7 @@ def write_results( for det_key in instr.detectors: (cfg.analysis_dir / det_key).mkdir(parents=True, exist_ok=True) - gw = instrument.GrainDataWriter(str(cfg.analysis_dir /grains_filename)) + gw = instrument.GrainDataWriter(str(cfg.analysis_dir / grains_filename)) gd_array = np.zeros((nfit, 21)) gwa = instrument.GrainDataWriter(array=gd_array) for fit_result in fit_results: @@ -241,9 +258,11 @@ def execute(args, parser): if have_orientations: try: qbar = np.loadtxt(quats_f, ndmin=2).T - except(IOError): - raise(RuntimeError, - "error loading indexing results '%s'" % quats_f) + except IOError: + raise ( + RuntimeError, + "error loading indexing results '%s'" % quats_f, + ) else: logger.info("Missing %s, running find-orientations", quats_f) logger.removeHandler(ch) @@ -262,8 +281,8 @@ def execute(args, parser): logger.error( 'Analysis "%s" already exists. ' 'Change yml file or specify "force"', - cfg.analysis_name - ) + cfg.analysis_name, + ) sys.exit() # Set up analysis directory and output directories. @@ -287,9 +306,7 @@ def execute(args, parser): maxtth = np.radians(cfg.fit_grains.tth_max) excl_p = excl_p._replace(tthmax=maxtth) - cfg.material.plane_data.exclude( - **excl_p._asdict() - ) + cfg.material.plane_data.exclude(**excl_p._asdict()) using_nhkls = np.count_nonzero( np.logical_not(cfg.material.plane_data.exclusions) ) @@ -302,9 +319,8 @@ def execute(args, parser): fh = logging.FileHandler(logfile, mode='w') fh.setLevel(log_level) ff = logging.Formatter( - '%(asctime)s - %(name)s - %(message)s', - '%m-%d %H:%M:%S' - ) + '%(asctime)s - %(name)s - %(message)s', '%m-%d %H:%M:%S' + ) fh.setFormatter(ff) logger.info("logging to %s", logfile) logger.addHandler(fh) @@ -317,7 +333,6 @@ def execute(args, parser): pr = profile.Profile() pr.enable() - # some conditionals for arg handling existing_analysis = grains_filename.exists() fit_estimate = cfg.fit_grains.estimate @@ -351,29 +366,32 @@ def execute(args, parser): # grains.out file gw = instrument.GrainDataWriter(grains_filename) for i_g, q in enumerate(qbar.T): - phi = 2*np.arccos(q[0]) + phi = 2 * np.arccos(q[0]) n = xfcapi.unit_vector(q[1:]) grain_params = np.hstack( - [phi*n, cnst.zeros_3, cnst.identity_6x1] + [phi * n, cnst.zeros_3, cnst.identity_6x1] ) - gw.dump_grain(int(i_g), 1., 0., grain_params) + gw.dump_grain(int(i_g), 1.0, 0.0, grain_params) gw.close() - except(IOError): - raise(RuntimeError, - "indexing results '%s' not found!" - % str(grains_filename)) + except IOError: + raise ( + RuntimeError, + "indexing results '%s' not found!" % str(grains_filename), + ) elif force_with_estimate or new_with_estimate: grains_filename = fit_estimate logger.info("using initial estimate '%s'", fit_estimate) elif existing_analysis and not clobber: - raise(RuntimeError, - "fit results '%s' exist, " % grains_filename - + "but --clean or --force options not specified") + raise ( + RuntimeError, + "fit results '%s' exist, " % grains_filename + + "but --clean or --force options not specified", + ) # get grain parameters by loading grains table try: grains_table = np.loadtxt(grains_filename, ndmin=2) - except(IOError): + except IOError: raise RuntimeError("problem loading '%s'" % grains_filename) # process the data @@ -386,7 +404,7 @@ def execute(args, parser): grains_table, show_progress=not args.quiet, ids_to_refine=gid_list, - ) + ) if args.profile: pr.disable() diff --git a/hexrd/cli/help.py b/hexrd/hedm/cli/help.py similarity index 56% rename from hexrd/cli/help.py rename to hexrd/hedm/cli/help.py index 5a7afbe90..0f2a09dc5 100644 --- a/hexrd/cli/help.py +++ b/hexrd/hedm/cli/help.py @@ -1,18 +1,15 @@ - - descr = "Displays a list of available conda commands and their help strings." + def configure_parser(sub_parsers): - p = sub_parsers.add_parser('help', - description = descr, - help = descr) + p = sub_parsers.add_parser('help', description=descr, help=descr) p.add_argument( 'command', - metavar = 'COMMAND', - action = "store", - nargs = '?', - help = "print help information for COMMAND " - "(same as: conda COMMAND -h)", + metavar='COMMAND', + action="store", + nargs='?', + help="print help information for COMMAND " + "(same as: conda COMMAND -h)", ) p.set_defaults(func=execute) diff --git a/hexrd/cli/main.py b/hexrd/hedm/cli/main.py similarity index 87% rename from hexrd/cli/main.py rename to hexrd/hedm/cli/main.py index eb5825acf..6e827b5d1 100644 --- a/hexrd/cli/main.py +++ b/hexrd/hedm/cli/main.py @@ -8,15 +8,15 @@ # These can't be relative imports on Windows because of the hack # in main() for multiprocessing.freeze_support() -from hexrd.cli import help -from hexrd.cli import test -from hexrd.cli import documentation -from hexrd.utils import profiler +from hexrd.hedm.cli import help +from hexrd.hedm.cli import test +from hexrd.hedm.cli import documentation +from hexrd.core.utils import profiler -from hexrd.cli import find_orientations -from hexrd.cli import fit_grains -from hexrd.cli import pickle23 -from hexrd.cli import preprocess +from hexrd.hedm.cli import find_orientations +from hexrd.hedm.cli import fit_grains +from hexrd.hedm.cli import pickle23 +from hexrd.hedm.cli import preprocess try: @@ -71,6 +71,7 @@ def main(): try: import argcomplete + argcomplete.autocomplete(p) except ImportError: pass diff --git a/hexrd/cli/pickle23.py b/hexrd/hedm/cli/pickle23.py similarity index 65% rename from hexrd/cli/pickle23.py rename to hexrd/hedm/cli/pickle23.py index 34b23712d..e499e67a2 100644 --- a/hexrd/cli/pickle23.py +++ b/hexrd/hedm/cli/pickle23.py @@ -1,4 +1,5 @@ """Convert python 2 hexrd pickles to hexrd3""" + import sys import shutil @@ -8,15 +9,10 @@ def configure_parser(sub_parsers): - p = sub_parsers.add_parser('pickle23', - description = descr, - help = descr) + p = sub_parsers.add_parser('pickle23', description=descr, help=descr) p.set_defaults(func=execute) - p.add_argument( - 'file', type=str, - help='name of file to convert' - ) + p.add_argument('file', type=str, help='name of file to convert') def execute(args, p): @@ -24,9 +20,9 @@ def execute(args, p): fname = args.file fback = fname + ".bak" shutil.copy(fname, fback) - with open(fname, "w") as fnew: + with open(fname, "w") as fnew: with open(fback, "r") as f: - for l in f: + for l in f: l = l.replace('hexrd.xrd.', 'hexrd.') fnew.write(l) return diff --git a/hexrd/cli/preprocess.py b/hexrd/hedm/cli/preprocess.py similarity index 97% rename from hexrd/cli/preprocess.py rename to hexrd/hedm/cli/preprocess.py index 02791ef1b..1713a2726 100644 --- a/hexrd/cli/preprocess.py +++ b/hexrd/hedm/cli/preprocess.py @@ -1,6 +1,6 @@ import dataclasses -from hexrd.preprocess.profiles import HexrdPPScript_Arguments -from hexrd.preprocess.preprocessors import preprocess +from hexrd.hedm.preprocess.profiles import HexrdPPScript_Arguments +from hexrd.hedm.preprocess.preprocessors import preprocess from dataclasses import fields import json import copy diff --git a/hexrd/cli/test.py b/hexrd/hedm/cli/test.py similarity index 54% rename from hexrd/cli/test.py rename to hexrd/hedm/cli/test.py index 9387e9525..7d92a854b 100644 --- a/hexrd/cli/test.py +++ b/hexrd/hedm/cli/test.py @@ -1,6 +1,5 @@ """Command to run tests""" - descr = 'runs the hexrd test suite' example = """ examples: @@ -9,17 +8,19 @@ def configure_parser(sub_parsers): - p = sub_parsers.add_parser('test', description = descr, help = descr) + p = sub_parsers.add_parser('test', description=descr, help=descr) p.set_defaults(func=execute) p.add_argument( - '-v', '--verbose', action='store_true', - help="report detailed results in terminal" - ) + '-v', + '--verbose', + action='store_true', + help="report detailed results in terminal", + ) def execute(args, parser): import unittest suite = unittest.TestLoader().discover('hexrd') - unittest.TextTestRunner(verbosity = args.verbose + 1).run(suite) + unittest.TextTestRunner(verbosity=args.verbose + 1).run(suite) diff --git a/hexrd/hedm/config/__init__.py b/hexrd/hedm/config/__init__.py new file mode 100644 index 000000000..5a51db4e0 --- /dev/null +++ b/hexrd/hedm/config/__init__.py @@ -0,0 +1,53 @@ +import os + +import yaml + + + +# The following were moved to core +from hexrd.core.config import config, root, utils +from hexrd.core.config import material + +""" +Note that we need to use the open() builtin in what was formerly the "open()" +function. So we define the _open(), and then redefine open() to the new +function. +""" +open_file = open + + +def open(file_name=None): + """ + Reads configuration settings from a yaml file. + + Returns a list of configuration objects, one for each document section in + the file. + """ + if file_name is None: + return [root.RootConfig({})] + + if not os.path.isfile(file_name): + raise ValueError(f'Config file not found: "{file_name}"') + + with open_file(file_name) as f: + res = [] + for cfg in yaml.load_all(f, Loader=yaml.SafeLoader): + try: + # take the previous config section and update with values + # from the current one + res.append(utils.merge_dicts(res[-1], cfg)) + except IndexError: + # this is the first config section + res.append(cfg) + + return [root.RootConfig(i) for i in res] + + +def save(config_list, file_name): + res = [cfg._cfg for cfg in config_list] + + with open_file(file_name, 'w') as f: + if len(res) > 1: + yaml.safe_dump_all(res, f) + else: + yaml.safe_dump(res, f) diff --git a/hexrd/config/findorientations.py b/hexrd/hedm/config/findorientations.py similarity index 80% rename from hexrd/config/findorientations.py rename to hexrd/hedm/config/findorientations.py index 8c75829b3..1b86e2ab4 100644 --- a/hexrd/config/findorientations.py +++ b/hexrd/hedm/config/findorientations.py @@ -4,7 +4,7 @@ import numpy as np -from .config import Config +from hexrd.core.config.config import Config logger = logging.getLogger('hexrd.config') @@ -13,12 +13,16 @@ # TODO: set these as defaults seed_search_methods = { 'label': dict(filter_radius=1, threshold=1), - 'blob_log': dict(min_sigma=0.5, max_sigma=5, - num_sigma=10, threshold=0.01, - overlap=0.1), - 'blob_dog': dict(min_sigma=0.5, max_sigma=5, - sigma_ratio=1.6, - threshold=0.01, overlap=0.1) + 'blob_log': dict( + min_sigma=0.5, max_sigma=5, num_sigma=10, threshold=0.01, overlap=0.1 + ), + 'blob_dog': dict( + min_sigma=0.5, + max_sigma=5, + sigma_ratio=1.6, + threshold=0.01, + overlap=0.1, + ), } @@ -50,9 +54,7 @@ def accepted_orientations_file(self): newname = f"accepted-orientations-{actmat}.dat" aof_path = self.parent.analysis_dir / newname else: - oldname = ( - 'accepted_orientations_%s.dat' % self.parent.analysis_id - ) + oldname = 'accepted_orientations_%s.dat' % self.parent.analysis_id aof_path = self.parent.working_dir / oldname return aof_path @@ -98,16 +100,13 @@ def use_quaternion_grid(self): temp = os.path.join(self._cfg.working_dir, temp) if os.path.isfile(temp): return temp - raise IOError( - '"%s": "%s" does not exist' % (key, temp) - ) + raise IOError('"%s": "%s" does not exist' % (key, temp)) @property def extract_measured_g_vectors(self): return self._cfg.get( - 'find_orientations:extract_measured_g_vectors', - False - ) + 'find_orientations:extract_measured_g_vectors', False + ) class ClusteringConfig(Config): @@ -122,7 +121,7 @@ def algorithm(self): raise RuntimeError( '"%s": "%s" not recognized, must be one of %s' % (key, temp, choices) - ) + ) @property def completeness(self): @@ -130,9 +129,7 @@ def completeness(self): temp = self._cfg.get(key, None) if temp is not None: return temp - raise RuntimeError( - '"%s" must be specified' % key - ) + raise RuntimeError('"%s" must be specified' % key) @property def radius(self): @@ -140,9 +137,7 @@ def radius(self): temp = self._cfg.get(key, None) if temp is not None: return temp - raise RuntimeError( - '"%s" must be specified' % key - ) + raise RuntimeError('"%s" must be specified' % key) class OmegaConfig(Config): @@ -153,22 +148,21 @@ class OmegaConfig(Config): def period(self): # FIXME: this is deprecated and now set from the imageseries key = 'find_orientations:omega:period' - temp = self._cfg.get(key, [-180., 180]) - range = np.abs(temp[1]-temp[0]) + temp = self._cfg.get(key, [-180.0, 180]) + range = np.abs(temp[1] - temp[0]) logger.warning('omega period specification is deprecated') if range != 360: raise RuntimeError( '"%s": range must be 360 degrees, range of %s is %g' % (key, temp, range) - ) + ) return temp @property def tolerance(self): return self._cfg.get( - 'find_orientations:omega:tolerance', - self.tolerance_dflt - ) + 'find_orientations:omega:tolerance', self.tolerance_dflt + ) class EtaConfig(Config): @@ -178,9 +172,8 @@ class EtaConfig(Config): @property def tolerance(self): return self._cfg.get( - 'find_orientations:eta:tolerance', - self.tolerance_dflt - ) + 'find_orientations:eta:tolerance', self.tolerance_dflt + ) @property def mask(self): @@ -191,7 +184,9 @@ def range(self): mask = self.mask if mask is None: return mask - return np.array([[-90. + mask, 90. - mask], [90. + mask, 270. - mask]]) + return np.array( + [[-90.0 + mask, 90.0 - mask], [90.0 + mask, 270.0 - mask]] + ) class SeedSearchConfig(Config): @@ -202,41 +197,39 @@ def hkl_seeds(self): try: temp = self._cfg.get(key) if isinstance(temp, int): - temp = [temp, ] + temp = [ + temp, + ] return temp except: if self._cfg.find_orientations.use_quaternion_grid is None: raise RuntimeError( '"%s" must be defined for seeded search' % key - ) + ) @property def fiber_step(self): return self._cfg.get( 'find_orientations:seed_search:fiber_step', - self._cfg.find_orientations.omega.tolerance - ) + self._cfg.find_orientations.omega.tolerance, + ) @property def method(self): key = 'find_orientations:seed_search:method' try: temp = self._cfg.get(key) - assert len(temp) == 1., \ - "method must have exactly one key" + assert len(temp) == 1.0, "method must have exactly one key" if isinstance(temp, dict): method_spec = next(iter(list(temp.keys()))) if method_spec.lower() not in seed_search_methods: raise RuntimeError( - 'invalid seed search method "%s"' - % method_spec + 'invalid seed search method "%s"' % method_spec ) else: return temp except: - raise RuntimeError( - '"%s" must be defined for seeded search' % key - ) + raise RuntimeError('"%s" must be defined for seeded search' % key) @property def fiber_ndiv(self): @@ -249,7 +242,7 @@ class OrientationMapsConfig(Config): def active_hkls(self): temp = self._cfg.get( 'find_orientations:orientation_maps:active_hkls', default='all' - ) + ) if isinstance(temp, int): temp = [temp] if temp == 'all': @@ -260,13 +253,13 @@ def active_hkls(self): def bin_frames(self): return self._cfg.get( 'find_orientations:orientation_maps:bin_frames', default=1 - ) + ) @property def eta_step(self): return self._cfg.get( 'find_orientations:orientation_maps:eta_step', default=0.25 - ) + ) @property def file(self): @@ -287,8 +280,7 @@ def file(self): # Now check the YAML. temp = self._cfg.get( - 'find_orientations:orientation_maps:file', - default=None + 'find_orientations:orientation_maps:file', default=None ) if temp is None: return mapf @@ -321,5 +313,6 @@ def threshold(self): @property def filter_maps(self): - return self._cfg.get('find_orientations:orientation_maps:filter_maps', - default=False) + return self._cfg.get( + 'find_orientations:orientation_maps:filter_maps', default=False + ) diff --git a/hexrd/config/fitgrains.py b/hexrd/hedm/config/fitgrains.py similarity index 95% rename from hexrd/config/fitgrains.py rename to hexrd/hedm/config/fitgrains.py index 8a708efec..fe4ade161 100644 --- a/hexrd/config/fitgrains.py +++ b/hexrd/hedm/config/fitgrains.py @@ -1,8 +1,8 @@ import logging import os -from .config import Config -from .utils import get_exclusion_parameters +from hexrd.core.config.config import Config +from hexrd.core.config.utils import get_exclusion_parameters logger = logging.getLogger('hexrd.config') @@ -99,7 +99,7 @@ def refit(self): raise RuntimeError( '"%s" must be None, a scalar, or a list, got "%s"' % (key, temp) - ) + ) if isinstance(temp, (int, float)): temp = [temp, temp] return temp @@ -107,6 +107,7 @@ def refit(self): """ TODO: evaluate the need for this """ + @property def skip_on_estimate(self): key = 'fit_grains:skip_on_estimate' @@ -115,7 +116,7 @@ def skip_on_estimate(self): return temp raise RuntimeError( '"%s" must be true or false, got "%s"' % (key, temp) - ) + ) @property def fit_only(self): @@ -125,7 +126,7 @@ def fit_only(self): return temp raise RuntimeError( '"%s" must be true or false, got "%s"' % (key, temp) - ) + ) @property def tth_max(self): @@ -138,4 +139,4 @@ def tth_max(self): return temp raise RuntimeError( '"%s" must be > 0, true, or false, got "%s"' % (key, temp) - ) + ) diff --git a/hexrd/findorientations.py b/hexrd/hedm/findorientations.py old mode 100755 new mode 100644 similarity index 82% rename from hexrd/findorientations.py rename to hexrd/hedm/findorientations.py index f6f7cce0f..1dbbb8a7d --- a/hexrd/findorientations.py +++ b/hexrd/hedm/findorientations.py @@ -5,6 +5,7 @@ import timeit import numpy as np + # np.seterr(over='ignore', invalid='ignore') # import tqdm @@ -12,20 +13,21 @@ import scipy.cluster as cluster from scipy import ndimage -from hexrd import constants as const -from hexrd import matrixutil as mutil -from hexrd import indexer -from hexrd import instrument -from hexrd.imageutil import find_peaks_2d -from hexrd import rotations as rot -from hexrd.transforms import xfcapi -from hexrd.xrdutil import EtaOmeMaps +from hexrd.core import constants as const +from hexrd.core import matrixutil as mutil +from hexrd.hedm import indexer +from hexrd.core import instrument +from hexrd.core.imageutil import find_peaks_2d +from hexrd.core import rotations as rot +from hexrd.core.transforms import xfcapi +from hexrd.hedm.xrdutil import EtaOmeMaps # just require scikit-learn? have_sklearn = False try: from sklearn.cluster import dbscan from sklearn.metrics.pairwise import pairwise_distances + have_sklearn = True except ImportError: pass @@ -54,18 +56,15 @@ def write_scored_orientations(results, cfg): """ np.savez_compressed( cfg.find_orientations.orientation_maps.scored_orientations_file, - **results['scored_orientations'] + **results['scored_orientations'], ) def _process_omegas(omegaimageseries_dict): """Extract omega period and ranges from an OmegaImageseries dictionary.""" oims = next(iter(omegaimageseries_dict.values())) - ome_period = oims.omega[0, 0] + np.r_[0., 360.] - ome_ranges = [ - ([i['ostart'], i['ostop']]) - for i in oims.omegawedges.wedges - ] + ome_period = oims.omega[0, 0] + np.r_[0.0, 360.0] + ome_ranges = [([i['ostart'], i['ostop']]) for i in oims.omegawedges.wedges] return ome_period, ome_ranges @@ -97,8 +96,7 @@ def generate_orientation_fibers(cfg, eta_ome): # default values for each case? They must be specified as of now. method = next(iter(method_dict.keys())) method_kwargs = method_dict[method] - logger.info('\tusing "%s" method for fiber generation' - % method) + logger.info('\tusing "%s" method for fiber generation' % method) # crystallography data from the pd object pd = eta_ome.planeData @@ -108,24 +106,16 @@ def generate_orientation_fibers(cfg, eta_ome): # !!! changed recently where iHKLList are now master hklIDs pd_hkl_ids = eta_ome.iHKLList[seed_hkl_ids] - pd_hkl_idx = pd.getHKLID( - pd.getHKLs(*eta_ome.iHKLList).T, - master=False - ) + pd_hkl_idx = pd.getHKLID(pd.getHKLs(*eta_ome.iHKLList).T, master=False) seed_hkls = pd.getHKLs(*pd_hkl_ids) seed_tths = tTh[pd_hkl_idx][seed_hkl_ids] - logger.info('\tusing seed hkls: %s' - % [str(i) for i in seed_hkls]) + logger.info('\tusing seed hkls: %s' % [str(i) for i in seed_hkls]) # grab angular grid infor from maps del_ome = eta_ome.omegas[1] - eta_ome.omegas[0] del_eta = eta_ome.etas[1] - eta_ome.etas[0] - params = dict( - bMat=bMat, - chi=chi, - csym=csym, - fiber_ndiv=fiber_ndiv) + params = dict(bMat=bMat, chi=chi, csym=csym, fiber_ndiv=fiber_ndiv) # ========================================================================= # Labeling of spots from seed hkls @@ -144,8 +134,12 @@ def generate_orientation_fibers(cfg, eta_ome): for i, (this_hkl, this_tth) in enumerate(zip(seed_hkls, seed_tths)): for ispot in range(numSpots[i]): if not np.isnan(coms[i][ispot][0]): - ome_c = eta_ome.omeEdges[0] + (0.5 + coms[i][ispot][0])*del_ome - eta_c = eta_ome.etaEdges[0] + (0.5 + coms[i][ispot][1])*del_eta + ome_c = ( + eta_ome.omeEdges[0] + (0.5 + coms[i][ispot][0]) * del_ome + ) + eta_c = ( + eta_ome.etaEdges[0] + (0.5 + coms[i][ispot][1]) * del_eta + ) input_p.append(np.hstack([this_hkl, this_tth, eta_c, ome_c])) # do the mapping @@ -154,12 +148,9 @@ def generate_orientation_fibers(cfg, eta_ome): if ncpus > 1: # multiple process version # ???: Need a chunksize in map? - chunksize = max(1, len(input_p)//(10*ncpus)) - pool = mp.Pool(ncpus, discretefiber_init, (params, )) - qfib = pool.map( - discretefiber_reduced, input_p, - chunksize=chunksize - ) + chunksize = max(1, len(input_p) // (10 * ncpus)) + pool = mp.Pool(ncpus, discretefiber_init, (params,)) + qfib = pool.map(discretefiber_reduced, input_p, chunksize=chunksize) ''' # This is an experiment... ntotal= 10*ncpus + np.remainder(len(input_p), 10*ncpus) > 0 @@ -182,7 +173,7 @@ def generate_orientation_fibers(cfg, eta_ome): qfib = list(map(discretefiber_reduced, input_p)) discretefiber_cleanup() - elapsed = (timeit.default_timer() - start) + elapsed = timeit.default_timer() - start logger.info("\tfiber generation took %.3f seconds", elapsed) return np.hstack(qfib) @@ -212,25 +203,20 @@ def discretefiber_reduced(params_in): gVec_s = xfcapi.angles_to_gvec( np.atleast_2d(params_in[3:]), chi=chi, - ).T + ).T tmp = mutil.uniqueVectors( rot.discreteFiber( - hkl, - gVec_s, - B=bMat, - ndiv=fiber_ndiv, - invert=False, - csym=csym - )[0] - ) + hkl, gVec_s, B=bMat, ndiv=fiber_ndiv, invert=False, csym=csym + )[0] + ) return tmp -def run_cluster(compl, qfib, qsym, cfg, - min_samples=None, compl_thresh=None, radius=None): - """ - """ +def run_cluster( + compl, qfib, qsym, cfg, min_samples=None, compl_thresh=None, radius=None +): + """ """ algorithm = cfg.find_orientations.clustering.algorithm cl_radius = cfg.find_orientations.clustering.radius @@ -263,8 +249,7 @@ def run_cluster(compl, qfib, qsym, cfg, def quat_distance(x, y): return xfcapi.quat_distance( - np.array(x, order='C'), np.array(y, order='C'), - qsym + np.array(x, order='C'), np.array(y, order='C'), qsym ) qfib_r = qfib[:, np.array(compl) > min_compl] @@ -282,19 +267,22 @@ def quat_distance(x, y): logger.info( "Feeding %d orientations above %.1f%% to clustering", - num_ors, 100*min_compl - ) + num_ors, + 100 * min_compl, + ) if algorithm == 'dbscan' and not have_sklearn: algorithm = 'fclusterdata' logger.warning( "sklearn >= 0.14 required for dbscan; using fclusterdata" - ) + ) if algorithm in ['dbscan', 'ort-dbscan', 'sph-dbscan']: # munge min_samples according to options - if min_samples is None \ - or cfg.find_orientations.use_quaternion_grid is not None: + if ( + min_samples is None + or cfg.find_orientations.use_quaternion_grid is not None + ): min_samples = 1 if algorithm == 'sph-dbscan': @@ -302,7 +290,7 @@ def quat_distance(x, y): # compute distance matrix pdist = pairwise_distances( qfib_r.T, metric=quat_distance, n_jobs=1 - ) + ) # run dbscan core_samples, labels = dbscan( @@ -311,16 +299,16 @@ def quat_distance(x, y): min_samples=min_samples, metric='precomputed', n_jobs=ncpus, - ) + ) else: if algorithm == 'ort-dbscan': logger.info("using euclidean orthographic DBSCAN") pts = qfib_r[1:, :].T - eps = 0.25*np.radians(cl_radius) + eps = 0.25 * np.radians(cl_radius) else: logger.info("using euclidean DBSCAN") pts = qfib_r.T - eps = 0.5*np.radians(cl_radius) + eps = 0.5 * np.radians(cl_radius) # run dbscan core_samples, labels = dbscan( @@ -330,7 +318,7 @@ def quat_distance(x, y): metric='minkowski', p=2, n_jobs=ncpus, - ) + ) # extract cluster labels cl = np.array(labels, dtype=int) # convert to array @@ -344,12 +332,12 @@ def quat_distance(x, y): qfib_r.T, np.radians(cl_radius), criterion='distance', - metric=quat_distance - ) + metric=quat_distance, + ) else: raise RuntimeError( "Clustering algorithm %s not recognized" % algorithm - ) + ) # extract number of clusters if np.any(cl == -1): @@ -365,18 +353,20 @@ def quat_distance(x, y): qfib_r[:, cl == i + 1], qsym ).flatten() - if algorithm in ('dbscan', 'ort-dbscan') and qbar.size/4 > 1: + if algorithm in ('dbscan', 'ort-dbscan') and qbar.size / 4 > 1: logger.info("\tchecking for duplicate orientations...") cl = cluster.hierarchy.fclusterdata( qbar.T, np.radians(cl_radius), criterion='distance', - metric=quat_distance) + metric=quat_distance, + ) nblobs_new = len(np.unique(cl)) if nblobs_new < nblobs: logger.info( "\tfound %d duplicates within %f degrees", - nblobs - nblobs_new, cl_radius + nblobs - nblobs_new, + cl_radius, ) tmp = np.zeros((4, nblobs_new)) for i in range(nblobs_new): @@ -390,10 +380,10 @@ def quat_distance(x, y): logger.info( "Found %d orientation clusters with >=%.1f%% completeness" " and %2f misorientation", - qbar.size/4, - 100.*min_compl, - cl_radius - ) + qbar.size / 4, + 100.0 * min_compl, + cl_radius, + ) return np.atleast_2d(qbar), cl @@ -438,7 +428,7 @@ def load_eta_ome_maps(cfg, pd, image_series, hkls=None, clean=False): shkls = pd.getHKLs(*res.iHKLList, asStr=True) logger.info( 'hkls used to generate orientation maps: %s', - [f'[{i}]' for i in shkls] + [f'[{i}]' for i in shkls], ) except (AttributeError, IOError): logger.info( @@ -462,12 +452,10 @@ def filter_maps_if_requested(eta_ome, cfg): if filter_maps: if not isinstance(filter_maps, bool): sigm = const.fwhm_to_sigma * filter_maps - logger.info("filtering eta/ome maps incl LoG with %.2f std dev", - sigm) - _filter_eta_ome_maps( - eta_ome, - filter_stdev=sigm + logger.info( + "filtering eta/ome maps incl LoG with %.2f std dev", sigm ) + _filter_eta_ome_maps(eta_ome, filter_stdev=sigm) else: logger.info("filtering eta/ome maps") _filter_eta_ome_maps(eta_ome) @@ -479,7 +467,7 @@ def generate_eta_ome_maps(cfg, hkls=None, save=True): Parameters ---------- - cfg : hexrd.config.root.RootConfig + cfg : hexrd.core.config.root.RootConfig A hexrd far-field HEDM config instance. hkls : array_like, optional If not None, an override for the hkls used to generate maps. This can @@ -532,8 +520,9 @@ def generate_eta_ome_maps(cfg, hkls=None, save=True): # we have actual hkls hkls = plane_data.getHKLID(temp.tolist(), master=True) else: - raise RuntimeError('active_hkls spec must be 1-d or 2-d, not %d-d' - % temp.ndim) + raise RuntimeError( + 'active_hkls spec must be 1-d or 2-d, not %d-d' % temp.ndim + ) # apply some checks to active_hkls specificaton if not use_all: @@ -558,8 +547,7 @@ def generate_eta_ome_maps(cfg, hkls=None, save=True): # logging output shkls = plane_data.getHKLs(*active_hklIDs, asStr=True) logger.info( - "building eta_ome maps using hkls: %s", - [f'[{i}]' for i in shkls] + "building eta_ome maps using hkls: %s", [f'[{i}]' for i in shkls] ) # grad imageseries dict from cfg @@ -572,11 +560,14 @@ def generate_eta_ome_maps(cfg, hkls=None, save=True): # make eta_ome maps eta_ome = instrument.GenerateEtaOmeMaps( - imsd, cfg.instrument.hedm, plane_data, + imsd, + cfg.instrument.hedm, + plane_data, active_hkls=active_hklIDs, eta_step=cfg.find_orientations.orientation_maps.eta_step, threshold=cfg.find_orientations.orientation_maps.threshold, - ome_period=ome_period) + ome_period=ome_period, + ) logger.info("\t\t...took %f seconds", timeit.default_timer() - start) @@ -667,18 +658,22 @@ def create_clustering_parameters(cfg, eta_ome): # !!! default to use 100 grains ngrains = 100 rand_q = mutil.unitVector(np.random.randn(4, ngrains)) - rand_e = np.tile(2.*np.arccos(rand_q[0, :]), (3, 1)) \ - * mutil.unitVector(rand_q[1:, :]) + rand_e = np.tile(2.0 * np.arccos(rand_q[0, :]), (3, 1)) * mutil.unitVector( + rand_q[1:, :] + ) grain_param_list = np.vstack( - [rand_e, - np.zeros((3, ngrains)), - np.tile(const.identity_6x1, (ngrains, 1)).T] - ).T + [ + rand_e, + np.zeros((3, ngrains)), + np.tile(const.identity_6x1, (ngrains, 1)).T, + ] + ).T sim_results = instr.simulate_rotation_series( - plane_data, grain_param_list, - eta_ranges=np.radians(eta_ranges), - ome_ranges=np.radians(ome_ranges), - ome_period=np.radians(ome_period) + plane_data, + grain_param_list, + eta_ranges=np.radians(eta_ranges), + ome_ranges=np.radians(ome_ranges), + ome_period=np.radians(ome_period), ) refl_per_grain = np.zeros(ngrains) @@ -687,21 +682,20 @@ def create_clustering_parameters(cfg, eta_ome): for i, refl_ids in enumerate(sim_result[0]): refl_per_grain[i] += len(refl_ids) seed_refl_per_grain[i] += np.sum( - [sum(refl_ids == hkl_id) for hkl_id in seed_hkl_ids] - ) + [sum(refl_ids == hkl_id) for hkl_id in seed_hkl_ids] + ) min_samples = max( - int(np.floor(0.5*compl_thresh*min(seed_refl_per_grain))), - 2 + int(np.floor(0.5 * compl_thresh * min(seed_refl_per_grain))), 2 ) mean_rpg = int(np.round(np.average(refl_per_grain))) return min_samples, mean_rpg -def find_orientations(cfg, - hkls=None, clean=False, profile=False, - use_direct_testing=False): +def find_orientations( + cfg, hkls=None, clean=False, profile=False, use_direct_testing=False +): """ @@ -758,16 +752,17 @@ def find_orientations(cfg, if use_direct_testing: npdiv_DFLT = 2 params = dict( - plane_data=plane_data, - instrument=instr, - imgser_dict=imsd, - tth_tol=tth_tol, - eta_tol=eta_tol, - ome_tol=ome_tol, - eta_ranges=np.radians(eta_ranges), - ome_period=np.radians(ome_period), - npdiv=npdiv_DFLT, - threshold=image_threshold) + plane_data=plane_data, + instrument=instr, + imgser_dict=imsd, + tth_tol=tth_tol, + eta_tol=eta_tol, + ome_tol=ome_tol, + eta_ranges=np.radians(eta_ranges), + ome_period=np.radians(ome_period), + npdiv=npdiv_DFLT, + threshold=image_threshold, + ) logger.info("\tusing direct search on %d processes", ncpus) @@ -776,36 +771,33 @@ def find_orientations(cfg, # doing seeded search logger.info("Will perform seeded search") logger.info( - "\tgenerating search quaternion list using %d processes", - ncpus + "\tgenerating search quaternion list using %d processes", ncpus ) start = timeit.default_timer() # need maps - eta_ome = load_eta_ome_maps(cfg, plane_data, imsd, - hkls=hkls, clean=clean) + eta_ome = load_eta_ome_maps( + cfg, plane_data, imsd, hkls=hkls, clean=clean + ) # generate trial orientations qfib = generate_orientation_fibers(cfg, eta_ome) - logger.info("\t\t...took %f seconds", - timeit.default_timer() - start) + logger.info( + "\t\t...took %f seconds", timeit.default_timer() - start + ) else: # doing grid search try: qfib = np.load(cfg.find_orientations.use_quaternion_grid) - except(IOError): + except IOError: raise RuntimeError( "specified quaternion grid file '%s' not found!" % cfg.find_orientations.use_quaternion_grid ) # execute direct search - pool = mp.Pool( - ncpus, - indexer.test_orientation_FF_init, - (params, ) - ) + pool = mp.Pool(ncpus, indexer.test_orientation_FF_init, (params,)) completeness = pool.map(indexer.test_orientation_FF_reduced, qfib.T) pool.close() pool.join() @@ -815,26 +807,27 @@ def find_orientations(cfg, start = timeit.default_timer() # handle eta-ome maps - eta_ome = load_eta_ome_maps(cfg, plane_data, imsd, - hkls=hkls, clean=clean) + eta_ome = load_eta_ome_maps( + cfg, plane_data, imsd, hkls=hkls, clean=clean + ) # handle search space if cfg.find_orientations.use_quaternion_grid is None: # doing seeded search logger.info( - "\tgenerating search quaternion list using %d processes", - ncpus + "\tgenerating search quaternion list using %d processes", ncpus ) start = timeit.default_timer() qfib = generate_orientation_fibers(cfg, eta_ome) - logger.info("\t\t...took %f seconds", - timeit.default_timer() - start) + logger.info( + "\t\t...took %f seconds", timeit.default_timer() - start + ) else: # doing grid search try: qfib = np.load(cfg.find_orientations.use_quaternion_grid) - except(IOError): + except IOError: raise RuntimeError( "specified quaternion grid file '%s' not found!" % cfg.find_orientations.use_quaternion_grid @@ -842,8 +835,9 @@ def find_orientations(cfg, # do map-based indexing start = timeit.default_timer() - logger.info("will test %d quaternions using %d processes", - qfib.shape[1], ncpus) + logger.info( + "will test %d quaternions using %d processes", qfib.shape[1], ncpus + ) completeness = indexer.paintGrid( qfib, @@ -854,19 +848,21 @@ def find_orientations(cfg, omePeriod=np.radians(cfg.find_orientations.omega.period), threshold=on_map_threshold, doMultiProc=ncpus > 1, - nCPUs=ncpus - ) - logger.info("\t\t...took %f seconds", - timeit.default_timer() - start) + nCPUs=ncpus, + ) + logger.info("\t\t...took %f seconds", timeit.default_timer() - start) completeness = np.array(completeness) - logger.info("\tSaving %d scored orientations with max completeness %f%%", - qfib.shape[1], 100*np.max(completeness)) + logger.info( + "\tSaving %d scored orientations with max completeness %f%%", + qfib.shape[1], + 100 * np.max(completeness), + ) results = {} results['scored_orientations'] = { 'test_quaternions': qfib, - 'score': completeness + 'score': completeness, } # ========================================================================= @@ -887,10 +883,14 @@ def find_orientations(cfg, logger.info("\tneighborhood size: %d", min_samples) qbar, cl = run_cluster( - completeness, qfib, plane_data.q_sym, cfg, + completeness, + qfib, + plane_data.q_sym, + cfg, min_samples=min_samples, compl_thresh=compl_thresh, - radius=cl_radius) + radius=cl_radius, + ) logger.info("\t\t...took %f seconds", (timeit.default_timer() - start)) logger.info("\tfound %d grains", qbar.shape[1]) diff --git a/hexrd/fitgrains.py b/hexrd/hedm/fitgrains.py similarity index 76% rename from hexrd/fitgrains.py rename to hexrd/hedm/fitgrains.py index caa2a7e30..19cc2afac 100644 --- a/hexrd/fitgrains.py +++ b/hexrd/hedm/fitgrains.py @@ -12,10 +12,10 @@ import timeit import warnings -from hexrd import instrument -from hexrd.transforms import xfcapi -from hexrd import rotations -from hexrd.fitting import fitGrain, objFuncFitGrain, gFlag_ref +from hexrd.core import instrument +from hexrd.core.transforms import xfcapi +from hexrd.core import rotations +from hexrd.core.fitting import fitGrain, objFuncFitGrain, gFlag_ref logger = logging.getLogger(__name__) @@ -107,17 +107,23 @@ def fit_grain_FF_reduced(grain_id): for tols in zip(tth_tol, eta_tol, ome_tol): complvec, results = instrument.pull_spots( - plane_data, grain_params, + plane_data, + grain_params, imgser_dict, tth_tol=tols[0], eta_tol=tols[1], ome_tol=tols[2], - npdiv=npdiv, threshold=threshold, + npdiv=npdiv, + threshold=threshold, eta_ranges=eta_ranges, ome_period=ome_period, - dirname=analysis_dirname, filename=spots_filename, + dirname=analysis_dirname, + filename=spots_filename, return_spot_list=False, - quiet=True, check_only=False, interp='nearest') + quiet=True, + check_only=False, + interp='nearest', + ) # ======= DETERMINE VALID REFLECTIONS ======= @@ -153,8 +159,9 @@ def fit_grain_FF_reduced(grain_id): # find unsaturated spots on this panel unsat_spots = np.ones(len(valid_refl_ids), dtype=bool) if panel.saturation_level is not None: - unsat_spots[valid_refl_ids] = \ + unsat_spots[valid_refl_ids] = ( max_int[valid_refl_ids] < panel.saturation_level + ) idx = np.logical_and(valid_refl_ids, unsat_spots) @@ -163,15 +170,15 @@ def fit_grain_FF_reduced(grain_id): try: ot = np.load( os.path.join( - analysis_dirname, os.path.join( - det_key, OVERLAP_TABLE_FILE - ) + analysis_dirname, + os.path.join(det_key, OVERLAP_TABLE_FILE), ) ) for key in ot.keys(): for this_table in ot[key]: these_overlaps = np.where( - this_table[:, 0] == grain_id)[0] + this_table[:, 0] == grain_id + )[0] if len(these_overlaps) > 0: mark_these = np.array( this_table[these_overlaps, 1], dtype=int @@ -183,7 +190,7 @@ def fit_grain_FF_reduced(grain_id): overlaps[otidx] = True idx = np.logical_and(idx, ~overlaps) # logger.info("found overlap table for '%s'", det_key) - except(IOError, IndexError): + except (IOError, IndexError): # logger.info("no overlap table found for '%s'", det_key) pass @@ -198,7 +205,7 @@ def fit_grain_FF_reduced(grain_id): # try: completeness = num_refl_valid / float(num_refl_tot) - except(ZeroDivisionError): + except ZeroDivisionError: raise RuntimeError( "simulated number of relfections is 0; " + "check instrument config or grain parameters" @@ -206,38 +213,51 @@ def fit_grain_FF_reduced(grain_id): # ======= DO LEASTSQ FIT ======= - if num_refl_valid <= 12: # not enough reflections to fit... exit + if num_refl_valid <= 12: # not enough reflections to fit... exit warnings.warn( f'Not enough valid reflections ({num_refl_valid}) to fit, ' f'exiting', - RuntimeWarning + RuntimeWarning, ) return grain_id, completeness, np.inf, grain_params else: grain_params = fitGrain( - grain_params, instrument, culled_results, - plane_data.latVecOps['B'], plane_data.wavelength - ) + grain_params, + instrument, + culled_results, + plane_data.latVecOps['B'], + plane_data.wavelength, + ) # get chisq # TODO: do this while evaluating fit??? chisq = objFuncFitGrain( - grain_params[gFlag_ref], grain_params, gFlag_ref, - instrument, - culled_results, - plane_data.latVecOps['B'], plane_data.wavelength, - ome_period, - simOnly=False, return_value_flag=2) + grain_params[gFlag_ref], + grain_params, + gFlag_ref, + instrument, + culled_results, + plane_data.latVecOps['B'], + plane_data.wavelength, + ome_period, + simOnly=False, + return_value_flag=2, + ) if refit is not None: # first get calculated x, y, ome from previous solution # NOTE: this result is a dict xyo_det_fit_dict = objFuncFitGrain( - grain_params[gFlag_ref], grain_params, gFlag_ref, + grain_params[gFlag_ref], + grain_params, + gFlag_ref, instrument, culled_results, - plane_data.latVecOps['B'], plane_data.wavelength, + plane_data.latVecOps['B'], + plane_data.wavelength, ome_period, - simOnly=True, return_value_flag=2) + simOnly=True, + return_value_flag=2, + ) # make dict to contain new culled results culled_results_r = dict.fromkeys(culled_results) @@ -250,7 +270,7 @@ def fit_grain_FF_reduced(grain_id): continue ims = next(iter(imgser_dict.values())) # grab first for the omes - ome_step = sum(np.r_[-1, 1]*ims.metadata['omega'][0, :]) + ome_step = sum(np.r_[-1, 1] * ims.metadata['omega'][0, :]) xyo_det = np.atleast_2d( np.vstack([np.r_[x[7], x[6][-1]] for x in presults]) @@ -258,25 +278,25 @@ def fit_grain_FF_reduced(grain_id): xyo_det_fit = xyo_det_fit_dict[det_key] - xpix_tol = refit[0]*panel.pixel_size_col - ypix_tol = refit[0]*panel.pixel_size_row - fome_tol = refit[1]*ome_step + xpix_tol = refit[0] * panel.pixel_size_col + ypix_tol = refit[0] * panel.pixel_size_row + fome_tol = refit[1] * ome_step # define difference vectors for spot fits x_diff = abs(xyo_det[:, 0] - xyo_det_fit['calc_xy'][:, 0]) y_diff = abs(xyo_det[:, 1] - xyo_det_fit['calc_xy'][:, 1]) ome_diff = np.degrees( - rotations.angularDifference(xyo_det[:, 2], - xyo_det_fit['calc_omes']) + rotations.angularDifference( + xyo_det[:, 2], xyo_det_fit['calc_omes'] ) + ) # filter out reflections with centroids more than # a pixel and delta omega away from predicted value idx_new = np.logical_and( x_diff <= xpix_tol, - np.logical_and(y_diff <= ypix_tol, - ome_diff <= fome_tol) - ) + np.logical_and(y_diff <= ypix_tol, ome_diff <= fome_tol), + ) # attach to proper dict entry culled_results_r[det_key] = [ @@ -288,28 +308,37 @@ def fit_grain_FF_reduced(grain_id): # only execute fit if left with enough reflections if num_refl_valid > 12: grain_params = fitGrain( - grain_params, instrument, culled_results_r, - plane_data.latVecOps['B'], plane_data.wavelength + grain_params, + instrument, + culled_results_r, + plane_data.latVecOps['B'], + plane_data.wavelength, ) # get chisq # TODO: do this while evaluating fit??? chisq = objFuncFitGrain( - grain_params[gFlag_ref], - grain_params, gFlag_ref, - instrument, - culled_results_r, - plane_data.latVecOps['B'], plane_data.wavelength, - ome_period, - simOnly=False, return_value_flag=2) + grain_params[gFlag_ref], + grain_params, + gFlag_ref, + instrument, + culled_results_r, + plane_data.latVecOps['B'], + plane_data.wavelength, + ome_period, + simOnly=False, + return_value_flag=2, + ) return grain_id, completeness, chisq, grain_params -def fit_grains(cfg, - grains_table, - show_progress=False, - ids_to_refine=None, - write_spots_files=True, - check_if_canceled_func=None): +def fit_grains( + cfg, + grains_table, + show_progress=False, + ids_to_refine=None, + write_spots_files=True, + check_if_canceled_func=None, +): """ Performs optimization of grain parameters. @@ -340,7 +369,7 @@ def fit_grains(cfg, # handle omega period # !!! we assume all detector ims have the same ome ranges, so any will do! oims = next(iter(imsd.values())) - ome_period = np.radians(oims.omega[0, 0] + np.r_[0., 360.]) + ome_period = np.radians(oims.omega[0, 0] + np.r_[0.0, 360.0]) # number of processes ncpus = cfg.multiprocessing @@ -353,20 +382,21 @@ def fit_grains(cfg, spots_filename = SPOTS_OUT_FILE if write_spots_files else None params = dict( - grains_table=grains_table, - plane_data=cfg.material.plane_data, - instrument=instr, - imgser_dict=imsd, - tth_tol=cfg.fit_grains.tolerance.tth, - eta_tol=cfg.fit_grains.tolerance.eta, - ome_tol=cfg.fit_grains.tolerance.omega, - npdiv=cfg.fit_grains.npdiv, - refit=cfg.fit_grains.refit, - threshold=threshold, - eta_ranges=eta_ranges, - ome_period=ome_period, - analysis_dirname=cfg.analysis_dir, - spots_filename=spots_filename) + grains_table=grains_table, + plane_data=cfg.material.plane_data, + instrument=instr, + imgser_dict=imsd, + tth_tol=cfg.fit_grains.tolerance.tth, + eta_tol=cfg.fit_grains.tolerance.eta, + ome_tol=cfg.fit_grains.tolerance.omega, + npdiv=cfg.fit_grains.npdiv, + refit=cfg.fit_grains.refit, + threshold=threshold, + eta_ranges=eta_ranges, + ome_period=ome_period, + analysis_dirname=cfg.analysis_dir, + spots_filename=spots_filename, + ) # ===================================================================== # EXECUTE MP FIT @@ -378,8 +408,7 @@ def fit_grains(cfg, start = timeit.default_timer() fit_grain_FF_init(params) fit_results = list( - map(fit_grain_FF_reduced, - np.array(grains_table[:, 0], dtype=int)) + map(fit_grain_FF_reduced, np.array(grains_table[:, 0], dtype=int)) ) fit_grain_FF_cleanup() elapsed = timeit.default_timer() - start @@ -399,16 +428,12 @@ def fit_grains(cfg, logger.info("\tstarting fit on %d processes with chunksize %d", nproc, chunksize) start = timeit.default_timer() - pool = multiprocessing.Pool( - nproc, - fit_grain_FF_init, - (params, ) - ) + pool = multiprocessing.Pool(nproc, fit_grain_FF_init, (params,)) async_result = pool.map_async( fit_grain_FF_reduced, np.array(grains_table[:, 0], dtype=int), - chunksize=chunksize + chunksize=chunksize, ) while not async_result.ready(): if check_if_canceled_func and check_if_canceled_func(): diff --git a/hexrd/hedm/fitting/calibration/__init__.py b/hexrd/hedm/fitting/calibration/__init__.py new file mode 100644 index 000000000..1d2652b41 --- /dev/null +++ b/hexrd/hedm/fitting/calibration/__init__.py @@ -0,0 +1,19 @@ +from .grain import GrainCalibrator +from hexrd.core.fitting.calibration import ( + fix_detector_y, + InstrumentCalibrator, + LaueCalibrator, + StructureLessCalibrator, + StructurelessCalibrator, + PowderCalibrator, +) + +__all__ = [ + 'fix_detector_y', + 'GrainCalibrator', + 'InstrumentCalibrator', + 'LaueCalibrator', + 'PowderCalibrator', + 'StructurelessCalibrator', + 'StructureLessCalibrator', +] diff --git a/hexrd/hedm/fitting/calibration/grain.py b/hexrd/hedm/fitting/calibration/grain.py new file mode 100644 index 000000000..0995e5c44 --- /dev/null +++ b/hexrd/hedm/fitting/calibration/grain.py @@ -0,0 +1,214 @@ +import logging + +import numpy as np + +from hexrd.core import matrixutil as mutil +from hexrd.core.rotations import angularDifference +from hexrd.core.transforms import xfcapi + +from ....core.fitting.calibration.abstract_grain import AbstractGrainCalibrator +from ....core.fitting.calibration.lmfit_param_handling import ( + DEFAULT_EULER_CONVENTION, +) +from .. import grains as grainutil + +logger = logging.getLogger(__name__) + + +class GrainCalibrator(AbstractGrainCalibrator): + """This is for HEDM grain calibration""" + + type = 'grain' + + def __init__( + self, + instr, + material, + grain_params, + ome_period, + index=0, + default_refinements=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + ): + super().__init__( + instr, + material, + grain_params, + default_refinements, + calibration_picks, + euler_convention, + ) + self.ome_period = ome_period + self.index = index + + @property + def name(self): + return f'{self.material.name}_{self.index}' + + def autopick_points(self): + # We could call `pull_spots()` here to perform auto-picking. + raise NotImplementedError + + def _evaluate(self): + data_dict = self.data_dict + + # grab reflection data from picks input + pick_hkls_dict = {} + pick_xys_dict = {} + for det_key in self.instr.detectors: + # find valid reflections and recast hkls to int + xys = np.asarray(data_dict['pick_xys'][det_key], dtype=float) + hkls = np.asarray(data_dict['hkls'][det_key], dtype=int) + + valid_idx = ~np.isnan(xys[:, 0]) + + # fill local dicts + pick_hkls_dict[det_key] = [np.atleast_2d(hkls[valid_idx, :])] + pick_xys_dict[det_key] = [np.atleast_2d(xys[valid_idx, :])] + + return pick_hkls_dict, pick_xys_dict + + def residual(self): + pick_hkls_dict, pick_xys_dict = self._evaluate() + + return sxcal_obj_func( + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.ome_period, + ) + + def model(self): + pick_hkls_dict, pick_xys_dict = self._evaluate() + + return sxcal_obj_func( + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.ome_period, + sim_only=True, + ) + + +# Objective function for multigrain fitting +def sxcal_obj_func( + grain_params, instr, xyo_det, hkls_idx, bmat, ome_period, sim_only=False +): + ngrains = len(grain_params) + + # assign some useful params + wavelength = instr.beam_wavelength + bvec = instr.beam_vector + chi = instr.chi + tvec_s = instr.tvec + + # right now just stuck on the end and assumed + # to all be the same length... FIX THIS + xy_unwarped = {} + meas_omes = {} + calc_omes = {} + calc_xy = {} + + # loop over panels + npts_tot = 0 + for det_key, panel in instr.detectors.items(): + rmat_d = panel.rmat + tvec_d = panel.tvec + + xy_unwarped[det_key] = [] + meas_omes[det_key] = [] + calc_omes[det_key] = [] + calc_xy[det_key] = [] + + for ig, grain in enumerate(grain_params): + ghkls = hkls_idx[det_key][ig] + xyo = xyo_det[det_key][ig] + + npts_tot += len(xyo) + + xy_unwarped[det_key].append(xyo[:, :2]) + meas_omes[det_key].append(xyo[:, 2]) + if panel.distortion is not None: # do unwarping + xy_unwarped[det_key][ig] = panel.distortion.apply( + xy_unwarped[det_key][ig] + ) + + # transform G-vectors: + # 1) convert inv. stretch tensor from MV notation in to 3x3 + # 2) take reciprocal lattice vectors from CRYSTAL to SAMPLE frame + # 3) apply stretch tensor + # 4) normalize reciprocal lattice vectors in SAMPLE frame + # 5) transform unit reciprocal lattice vetors back to CRYSAL frame + rmat_c = xfcapi.make_rmat_of_expmap(grain[:3]) + tvec_c = grain[3:6] + vinv_s = grain[6:] + gvec_c = np.dot(bmat, ghkls.T) + vmat_s = mutil.vecMVToSymm(vinv_s) + ghat_s = mutil.unitVector(np.dot(vmat_s, np.dot(rmat_c, gvec_c))) + ghat_c = np.dot(rmat_c.T, ghat_s) + + match_omes, calc_omes_tmp = grainutil.matchOmegas( + xyo, + ghkls.T, + chi, + rmat_c, + bmat, + wavelength, + vInv=vinv_s, + beamVec=bvec, + omePeriod=ome_period, + ) + + rmat_s_arr = xfcapi.make_sample_rmat( + chi, np.ascontiguousarray(calc_omes_tmp) + ) + calc_xy_tmp = xfcapi.gvec_to_xy( + ghat_c.T, rmat_d, rmat_s_arr, rmat_c, tvec_d, tvec_s, tvec_c + ) + if np.any(np.isnan(calc_xy_tmp)): + logger.warning( + "infeasible parameters: may want to scale back " + "finite difference step size" + ) + + calc_omes[det_key].append(calc_omes_tmp) + calc_xy[det_key].append(calc_xy_tmp) + + # return values + if sim_only: + retval = {} + for det_key in calc_xy.keys(): + # ??? calc_xy is always 2-d + retval[det_key] = [] + for ig in range(ngrains): + retval[det_key].append( + np.vstack( + [calc_xy[det_key][ig].T, calc_omes[det_key][ig]] + ).T + ) + else: + meas_xy_all = [] + calc_xy_all = [] + meas_omes_all = [] + calc_omes_all = [] + for det_key in xy_unwarped.keys(): + meas_xy_all.append(np.vstack(xy_unwarped[det_key])) + calc_xy_all.append(np.vstack(calc_xy[det_key])) + meas_omes_all.append(np.hstack(meas_omes[det_key])) + calc_omes_all.append(np.hstack(calc_omes[det_key])) + meas_xy_all = np.vstack(meas_xy_all) + calc_xy_all = np.vstack(calc_xy_all) + meas_omes_all = np.hstack(meas_omes_all) + calc_omes_all = np.hstack(calc_omes_all) + + diff_vecs_xy = calc_xy_all - meas_xy_all + diff_ome = angularDifference(calc_omes_all, meas_omes_all) + retval = np.hstack( + [diff_vecs_xy, diff_ome.reshape(npts_tot, 1)] + ).flatten() + return retval diff --git a/hexrd/fitting/grains.py b/hexrd/hedm/fitting/grains.py similarity index 84% rename from hexrd/fitting/grains.py rename to hexrd/hedm/fitting/grains.py index 37169f79e..843f194dd 100644 --- a/hexrd/fitting/grains.py +++ b/hexrd/hedm/fitting/grains.py @@ -4,13 +4,13 @@ from scipy import optimize -from hexrd import matrixutil as mutil +from hexrd.core import matrixutil as mutil -from hexrd.transforms import xfcapi -from hexrd import constants -from hexrd import rotations +from hexrd.core.transforms import xfcapi +from hexrd.core import constants +from hexrd.core import rotations -from hexrd.xrdutil import ( +from hexrd.hedm.xrdutil import ( apply_correction_to_wavelength, extract_detector_transformation, ) @@ -22,7 +22,7 @@ bVec_ref = constants.beam_vec eta_ref = constants.eta_vec -vInv_ref = np.r_[1., 1., 1., 0., 0., 0.] +vInv_ref = np.r_[1.0, 1.0, 1.0, 0.0, 0.0, 0.0] # for grain parameters @@ -30,11 +30,19 @@ gScl_ref = np.ones(12, dtype=bool) -def fitGrain(gFull, instrument, reflections_dict, - bMat, wavelength, - gFlag=gFlag_ref, gScl=gScl_ref, - omePeriod=None, - factor=0.1, xtol=sqrt_epsf, ftol=sqrt_epsf): +def fitGrain( + gFull, + instrument, + reflections_dict, + bMat, + wavelength, + gFlag=gFlag_ref, + gScl=gScl_ref, + omePeriod=None, + factor=0.1, + xtol=sqrt_epsf, + ftol=sqrt_epsf, +): """ Perform least-squares optimization of grain parameters. @@ -120,13 +128,18 @@ def fitGrain(gFull, instrument, reflections_dict, return retval -def objFuncFitGrain(gFit, gFull, gFlag, - instrument, - reflections_dict, - bMat, wavelength, - omePeriod, - simOnly=False, - return_value_flag=return_value_flag): +def objFuncFitGrain( + gFit, + gFull, + gFlag, + instrument, + reflections_dict, + bMat, + wavelength, + omePeriod, + simOnly=False, + return_value_flag=return_value_flag, +): """ Calculate residual between measured and simulated ff-HEDM G-vectors. @@ -221,7 +234,8 @@ def objFuncFitGrain(gFit, gFull, gFlag, det_keys_ordered.append(det_key) rMat_d, tVec_d, chi, tVec_s = extract_detector_transformation( - instrument.detector_parameters[det_key]) + instrument.detector_parameters[det_key] + ) results = reflections_dict[det_key] if not isinstance(results, dict) and len(results) == 0: @@ -243,9 +257,7 @@ def objFuncFitGrain(gFit, gFull, gFlag, # WARNING: hkls and derived vectors below must be columnwise; # strictly necessary??? change affected APIs instead? # - hkls = np.atleast_2d( - np.vstack([x[2] for x in results]) - ).T + hkls = np.atleast_2d(np.vstack([x[2] for x in results])).T meas_xyo = np.atleast_2d( np.vstack([np.r_[x[7], x[6][-1]] for x in results]) @@ -287,10 +299,16 @@ def objFuncFitGrain(gFit, gFull, gFlag, # TODO: try Numba implementations rMat_s = xfcapi.make_sample_rmat(chi, calc_omes) - calc_xy = xfcapi.gvec_to_xy(gHat_c.T, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beam_vec=bVec) + calc_xy = xfcapi.gvec_to_xy( + gHat_c.T, + rMat_d, + rMat_s, + rMat_c, + tVec_d, + tVec_s, + tVec_c, + beam_vec=bVec, + ) # append to xy dict calc_xy_dict[det_key] = calc_xy @@ -307,8 +325,9 @@ def objFuncFitGrain(gFit, gFull, gFlag, npts = len(meas_xyo_all) if np.any(np.isnan(calc_xy)): raise RuntimeError( - "infeasible pFull: may want to scale" + - "back finite difference step size") + "infeasible pFull: may want to scale" + + "back finite difference step size" + ) # return values if simOnly: @@ -318,8 +337,10 @@ def objFuncFitGrain(gFit, gFull, gFlag, else: rd = dict.fromkeys(det_keys_ordered) for det_key in det_keys_ordered: - rd[det_key] = {'calc_xy': calc_xy_dict[det_key], - 'calc_omes': calc_omes_dict[det_key]} + rd[det_key] = { + 'calc_xy': calc_xy_dict[det_key], + 'calc_omes': calc_omes_dict[det_key], + } retval = rd else: # return residual vector @@ -328,27 +349,34 @@ def objFuncFitGrain(gFit, gFull, gFlag, diff_ome = rotations.angularDifference( calc_omes_all, meas_xyo_all[:, 2] ) - retval = np.hstack([diff_vecs_xy, - diff_ome.reshape(npts, 1) - ]).flatten() + retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1)]).flatten() if return_value_flag == 1: # return scalar sum of squared residuals retval = sum(abs(retval)) elif return_value_flag == 2: # return DOF-normalized chisq # TODO: check this calculation - denom = 3*npts - len(gFit) - 1. + denom = 3 * npts - len(gFit) - 1.0 if denom != 0: - nu_fac = 1. / denom + nu_fac = 1.0 / denom else: - nu_fac = 1. + nu_fac = 1.0 retval = nu_fac * sum(retval**2) return retval -def matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, - vInv=vInv_ref, beamVec=bVec_ref, etaVec=eta_ref, - omePeriod=None): +def matchOmegas( + xyo_det, + hkls_idx, + chi, + rMat_c, + bMat, + wavelength, + vInv=vInv_ref, + beamVec=bVec_ref, + etaVec=eta_ref, + omePeriod=None, +): """ For a given list of (x, y, ome) points, outputs the index into the results from oscillAnglesOfHKLs, including the calculated omega values. @@ -360,10 +388,15 @@ def matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, meas_omes = xyo_det[:, 2] oangs0, oangs1 = xfcapi.oscill_angles_of_hkls( - hkls_idx.T, chi, rMat_c, bMat, wavelength, - v_inv=vInv, - beam_vec=beamVec, - eta_vec=etaVec) + hkls_idx.T, + chi, + rMat_c, + bMat, + wavelength, + v_inv=vInv, + beam_vec=beamVec, + eta_vec=etaVec, + ) if np.any(np.isnan(oangs0)): # debugging # TODO: remove this @@ -379,8 +412,12 @@ def matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, # CAPI version gives vstacked angles... must be (2, nhkls) calc_omes = np.vstack([oangs0[:, 2], oangs1[:, 2]]) if omePeriod is not None: - calc_omes = np.vstack([rotations.mapAngle(oangs0[:, 2], omePeriod), - rotations.mapAngle(oangs1[:, 2], omePeriod)]) + calc_omes = np.vstack( + [ + rotations.mapAngle(oangs0[:, 2], omePeriod), + rotations.mapAngle(oangs1[:, 2], omePeriod), + ] + ) # do angular difference diff_omes = rotations.angularDifference( np.tile(meas_omes, (2, 1)), calc_omes diff --git a/hexrd/grainmap/__init__.py b/hexrd/hedm/grainmap/__init__.py similarity index 82% rename from hexrd/grainmap/__init__.py rename to hexrd/hedm/grainmap/__init__.py index 25873f0f5..d91f97694 100644 --- a/hexrd/grainmap/__init__.py +++ b/hexrd/hedm/grainmap/__init__.py @@ -1,28 +1,27 @@ # ============================================================ -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. # All rights reserved. -# +# # This file is part of HEXRD. For details on dowloading the source, # see the file COPYING. -# +# # Please also see the file LICENSE. -# +# # This program is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License (as published by the Free Software # Foundation) version 2.1 dated February 1999. -# +# # This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this program (see file LICENSE); if not, write to # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA or visit . # ============================================================ -"""Tools or X-ray diffraction analysis -""" +"""Tools or X-ray diffraction analysis""" diff --git a/hexrd/grainmap/nfutil.py b/hexrd/hedm/grainmap/nfutil.py similarity index 64% rename from hexrd/grainmap/nfutil.py rename to hexrd/hedm/grainmap/nfutil.py index 171f7c0f5..0513b45c8 100644 --- a/hexrd/grainmap/nfutil.py +++ b/hexrd/hedm/grainmap/nfutil.py @@ -22,13 +22,13 @@ # import of hexrd modules # import hexrd -from hexrd import constants -from hexrd import instrument -from hexrd import material -from hexrd import rotations -from hexrd.transforms import xfcapi -from hexrd import valunits -from hexrd import xrdutil +from hexrd.core import constants +from hexrd.core import instrument +from hexrd.core import material +from hexrd.core import rotations +from hexrd.core.transforms import xfcapi +from hexrd.core import valunits +from hexrd.hedm import xrdutil from skimage.morphology import dilation as ski_dilation @@ -40,6 +40,7 @@ rank = 0 try: from mpi4py import MPI + comm = MPI.COMM_WORLD world_size = comm.Get_size() rank = comm.Get_rank() @@ -56,7 +57,7 @@ try: import imageio as imgio -except(ImportError): +except ImportError: from skimage import io as imgio @@ -65,6 +66,7 @@ def load_instrument(yml): icfg = yaml.load(f, Loader=yaml.FullLoader) return instrument.HEDMInstrument(instrument_config=icfg) + # %% @@ -83,8 +85,13 @@ class ProcessController: track the results of the process as well as to provide clues of the progress of the process""" - def __init__(self, result_handler=None, progress_observer=None, ncpus=1, - chunk_size=100): + def __init__( + self, + result_handler=None, + progress_observer=None, + ncpus=1, + chunk_size=100, + ): self.rh = result_handler self.po = progress_observer self.ncpus = ncpus @@ -105,8 +112,12 @@ def finish(self, name): entry = self.timing.pop() assert name == entry[0] total = t - entry[2] - logging.info("%s took %8.3fs (%8.6fs per item).", - entry[0], total, total/entry[1]) + logging.info( + "%s took %8.3fs (%8.6fs per item).", + entry[0], + total, + total / entry[1], + ) def update(self, value): self.po.update(value) @@ -163,8 +174,9 @@ class ProgressBarProgressObserver: def start(self, name, count): from progressbar import ProgressBar, Percentage, Bar - self.pbar = ProgressBar(widgets=[name, Percentage(), Bar()], - maxval=count) + self.pbar = ProgressBar( + widgets=[name, Percentage(), Bar()], maxval=count + ) self.pbar.start() def update(self, value): @@ -187,6 +199,7 @@ def handle_result(self, key, value): def saving_result_handler(filename): """returns a result handler that saves the resulting arrays into a file with name filename""" + class SavingResultHandler: def __init__(self, file_name): self.filename = file_name @@ -216,6 +229,7 @@ def checking_result_handler(filename): match. A FULL PASS will happen when all existing results match """ + class CheckingResultHandler: def __init__(self, reference_file): """Checks the result against those save in 'reference_file'""" @@ -242,8 +256,9 @@ def handle_result(self, key, value): value = value.T check_len = min(len(reference), len(value)) - test_passed = np.allclose(value[:check_len], - reference[:check_len]) + test_passed = np.allclose( + value[:check_len], reference[:check_len] + ) if not test_passed: msg = "'{0}': FAIL" @@ -267,10 +282,11 @@ def handle_result(self, key, value): # %% OPTIMIZED BITS # ============================================================================= + # Some basic 3d algebra ======================================================= @numba.njit(nogil=True, cache=True) def _v3_dot(a, b): - return a[0]*b[0] + a[1]*b[1] + a[2]*b[2] + return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] @numba.njit(nogil=True, cache=True) @@ -278,9 +294,9 @@ def _m33_v3_multiply(m, v, dst): v0 = v[0] v1 = v[1] v2 = v[2] - dst[0] = m[0, 0]*v0 + m[0, 1]*v1 + m[0, 2]*v2 - dst[1] = m[1, 0]*v0 + m[1, 1]*v1 + m[1, 2]*v2 - dst[2] = m[2, 0]*v0 + m[2, 1]*v1 + m[2, 2]*v2 + dst[0] = m[0, 0] * v0 + m[0, 1] * v1 + m[0, 2] * v2 + dst[1] = m[1, 0] * v0 + m[1, 1] * v1 + m[1, 2] * v2 + dst[2] = m[2, 0] * v0 + m[2, 1] * v1 + m[2, 2] * v2 return dst @@ -290,8 +306,8 @@ def _v3_normalized(src, dst): v0 = src[0] v1 = src[1] v2 = src[2] - sqr_norm = v0*v0 + v1*v1 + v2*v2 - inv_norm = 1.0 if sqr_norm == 0.0 else 1./np.sqrt(sqr_norm) + sqr_norm = v0 * v0 + v1 * v1 + v2 * v2 + inv_norm = 1.0 if sqr_norm == 0.0 else 1.0 / np.sqrt(sqr_norm) dst[0] = v0 * inv_norm dst[1] = v1 * inv_norm @@ -306,21 +322,22 @@ def _make_binary_rot_mat(src, dst): v1 = src[1] v2 = src[2] - dst[0, 0] = 2.0*v0*v0 - 1.0 - dst[0, 1] = 2.0*v0*v1 - dst[0, 2] = 2.0*v0*v2 - dst[1, 0] = 2.0*v1*v0 - dst[1, 1] = 2.0*v1*v1 - 1.0 - dst[1, 2] = 2.0*v1*v2 - dst[2, 0] = 2.0*v2*v0 - dst[2, 1] = 2.0*v2*v1 - dst[2, 2] = 2.0*v2*v2 - 1.0 + dst[0, 0] = 2.0 * v0 * v0 - 1.0 + dst[0, 1] = 2.0 * v0 * v1 + dst[0, 2] = 2.0 * v0 * v2 + dst[1, 0] = 2.0 * v1 * v0 + dst[1, 1] = 2.0 * v1 * v1 - 1.0 + dst[1, 2] = 2.0 * v1 * v2 + dst[2, 0] = 2.0 * v2 * v0 + dst[2, 1] = 2.0 * v2 * v1 + dst[2, 2] = 2.0 * v2 * v2 - 1.0 return dst # code transcribed in numba from transforms module ============================ + # This is equivalent to the transform module anglesToGVec, but written in # numba. This should end in a module to share with other scripts @numba.njit(nogil=True, cache=True) @@ -328,29 +345,41 @@ def _anglesToGVec(angs, rMat_ss, rMat_c): """From a set of angles return them in crystal space""" result = np.empty_like(angs) for i in range(len(angs)): - cx = np.cos(0.5*angs[i, 0]) - sx = np.sin(0.5*angs[i, 0]) + cx = np.cos(0.5 * angs[i, 0]) + sx = np.sin(0.5 * angs[i, 0]) cy = np.cos(angs[i, 1]) sy = np.sin(angs[i, 1]) - g0 = cx*cy - g1 = cx*sy + g0 = cx * cy + g1 = cx * sy g2 = sx # with g being [cx*xy, cx*sy, sx] # result = dot(rMat_c, dot(rMat_ss[i], g)) - t0_0 = \ - rMat_ss[i, 0, 0]*g0 + rMat_ss[i, 1, 0]*g1 + rMat_ss[i, 2, 0]*g2 - t0_1 = \ - rMat_ss[i, 0, 1]*g0 + rMat_ss[i, 1, 1]*g1 + rMat_ss[i, 2, 1]*g2 - t0_2 = \ - rMat_ss[i, 0, 2]*g0 + rMat_ss[i, 1, 2]*g1 + rMat_ss[i, 2, 2]*g2 - - result[i, 0] = \ - rMat_c[0, 0]*t0_0 + rMat_c[1, 0]*t0_1 + rMat_c[2, 0]*t0_2 - result[i, 1] = \ - rMat_c[0, 1]*t0_0 + rMat_c[1, 1]*t0_1 + rMat_c[2, 1]*t0_2 - result[i, 2] = \ - rMat_c[0, 2]*t0_0 + rMat_c[1, 2]*t0_1 + rMat_c[2, 2]*t0_2 + t0_0 = ( + rMat_ss[i, 0, 0] * g0 + + rMat_ss[i, 1, 0] * g1 + + rMat_ss[i, 2, 0] * g2 + ) + t0_1 = ( + rMat_ss[i, 0, 1] * g0 + + rMat_ss[i, 1, 1] * g1 + + rMat_ss[i, 2, 1] * g2 + ) + t0_2 = ( + rMat_ss[i, 0, 2] * g0 + + rMat_ss[i, 1, 2] * g1 + + rMat_ss[i, 2, 2] * g2 + ) + + result[i, 0] = ( + rMat_c[0, 0] * t0_0 + rMat_c[1, 0] * t0_1 + rMat_c[2, 0] * t0_2 + ) + result[i, 1] = ( + rMat_c[0, 1] * t0_0 + rMat_c[1, 1] * t0_1 + rMat_c[2, 1] * t0_2 + ) + result[i, 2] = ( + rMat_c[0, 2] * t0_0 + rMat_c[1, 2] * t0_1 + rMat_c[2, 2] * t0_2 + ) return result @@ -361,13 +390,14 @@ def _anglesToGVec(angs, rMat_ss, rMat_c): # temporary arrays is not competitive with the stack allocation using in # the C version of the code (WiP) + # tC varies per coord # gvec_cs, rSm varies per grain # # gvec_cs @numba.njit(nogil=True, cache=True) def _gvec_to_detector_array(vG_sn, rD, rSn, rC, tD, tS, tC): - """ beamVec is the beam vector: (0, 0, -1) in this case """ + """beamVec is the beam vector: (0, 0, -1) in this case""" ztol = xrdutil.epsf p3_l = np.empty((3,)) tmp_vec = np.empty((3,)) @@ -409,8 +439,8 @@ def _gvec_to_detector_array(vG_sn, rD, rSn, rC, tD, tS, tC): result[i, 1] = np.nan continue - u = num/denom - tmp_res = u*tD_l - p3_minus_p1_l + u = num / denom + tmp_res = u * tD_l - p3_minus_p1_l result[i, 0] = _v3_dot(tmp_res, rD[:, 0]) result[i, 1] = _v3_dot(tmp_res, rD[:, 1]) @@ -418,8 +448,9 @@ def _gvec_to_detector_array(vG_sn, rD, rSn, rC, tD, tS, tC): @numba.njit(nogil=True, cache=True) -def _quant_and_clip_confidence(coords, angles, image, - base, inv_deltas, clip_vals, bsp): +def _quant_and_clip_confidence( + coords, angles, image, base, inv_deltas, clip_vals, bsp +): """quantize and clip the parametric coordinates in coords + angles coords - (..., 2) array: input 2d parametric coordinates @@ -444,9 +475,8 @@ def _quant_and_clip_confidence(coords, angles, image, xf = coords[i, 0] yf = coords[i, 1] - # does not count intensity which is covered by the beamstop dcp 5.13.21 - if np.abs(yf-bsp[0])<(bsp[1]/2.): + if np.abs(yf - bsp[0]) < (bsp[1] / 2.0): continue xf = np.floor((xf - base[0]) * inv_deltas[0]) @@ -476,22 +506,24 @@ def _quant_and_clip_confidence(coords, angles, image, if image[z, y, x]: matches += 1 - return 0 if in_sensor == 0 else float(matches)/float(in_sensor) + return 0 if in_sensor == 0 else float(matches) / float(in_sensor) # ============================================================================== # %% DIFFRACTION SIMULATION # ============================================================================== -def get_simulate_diffractions(grain_params, experiment, - cache_file='gold_cubes.npy', - controller=None): + +def get_simulate_diffractions( + grain_params, experiment, cache_file='gold_cubes.npy', controller=None +): """getter functions that handles the caching of the simulation""" try: image_stack = np.load(cache_file, mmap_mode='r', allow_pickle=False) except Exception: - image_stack = simulate_diffractions(grain_params, experiment, - controller=controller) + image_stack = simulate_diffractions( + grain_params, experiment, controller=controller + ) np.save(cache_file, image_stack) controller.handle_result('image_stack', image_stack) @@ -503,9 +535,11 @@ def simulate_diffractions(grain_params, experiment, controller): """actual forward simulation of the diffraction""" # use a packed array for the image_stack - array_dims = (experiment.nframes, - experiment.ncols, - ((experiment.nrows - 1)//8) + 1) + array_dims = ( + experiment.nframes, + experiment.ncols, + ((experiment.nrows - 1) // 8) + 1, + ) image_stack = np.zeros(array_dims, dtype=np.uint8) count = len(grain_params) @@ -518,7 +552,9 @@ def simulate_diffractions(grain_params, experiment, controller): tS = experiment.tVec_s distortion = experiment.distortion - eta_range = [(-np.pi, np.pi), ] + eta_range = [ + (-np.pi, np.pi), + ] ome_range = experiment.ome_range ome_period = (-np.pi, np.pi) @@ -542,11 +578,16 @@ def simulate_diffractions(grain_params, experiment, controller): ) all_angs[:, 2] = rotations.mapAngle(all_angs[:, 2], ome_period) - proj_pts = _project(all_angs, rD, rC, chi, tD, - tC, tS, distortion) + proj_pts = _project(all_angs, rD, rC, chi, tD, tC, tS, distortion) det_xy = proj_pts[0] - _write_pixels(det_xy, all_angs[:, 2], image_stack, experiment.base, - experiment.inv_deltas, experiment.clip_vals) + _write_pixels( + det_xy, + all_angs[:, 2], + image_stack, + experiment.base, + experiment.inv_deltas, + experiment.clip_vals, + ) controller.update(i + 1) @@ -559,15 +600,18 @@ def simulate_diffractions(grain_params, experiment, controller): # ============================================================================== -def get_dilated_image_stack(image_stack, experiment, controller, - cache_file='gold_cubes_dilated.npy'): +def get_dilated_image_stack( + image_stack, experiment, controller, cache_file='gold_cubes_dilated.npy' +): try: - dilated_image_stack = np.load(cache_file, mmap_mode='r', - allow_pickle=False) + dilated_image_stack = np.load( + cache_file, mmap_mode='r', allow_pickle=False + ) except Exception: - dilated_image_stack = dilate_image_stack(image_stack, experiment, - controller) + dilated_image_stack = dilate_image_stack( + image_stack, experiment, controller + ) np.save(cache_file, dilated_image_stack) return dilated_image_stack @@ -577,20 +621,19 @@ def dilate_image_stack(image_stack, experiment, controller): # first, perform image dilation =========================================== # perform image dilation (using scikit_image dilation) subprocess = 'dilate image_stack' - dilation_shape = np.ones((2*experiment.row_dilation + 1, - 2*experiment.col_dilation + 1), - dtype=np.uint8) + dilation_shape = np.ones( + (2 * experiment.row_dilation + 1, 2 * experiment.col_dilation + 1), + dtype=np.uint8, + ) image_stack_dilated = np.empty_like(image_stack) dilated = np.empty( - (image_stack.shape[-2], image_stack.shape[-1] << 3), - dtype=bool + (image_stack.shape[-2], image_stack.shape[-1] << 3), dtype=bool ) n_images = len(image_stack) controller.start(subprocess, n_images) for i_image in range(n_images): to_dilate = np.unpackbits(image_stack[i_image], axis=-1) - ski_dilation(to_dilate, dilation_shape, - out=dilated) + ski_dilation(to_dilate, dilation_shape, out=dilated) image_stack_dilated[i_image] = np.packbits(dilated, axis=-1) controller.update(i_image + 1) controller.finish(subprocess) @@ -607,6 +650,7 @@ def dilate_image_stack(image_stack, experiment, controller): # booleans, an array of uint8 could be used so the image is stored # with a bit per pixel. + @numba.njit(nogil=True, cache=True) def _write_pixels(coords, angles, image, base, inv_deltas, clip_vals): count = len(coords) @@ -625,7 +669,8 @@ def _write_pixels(coords, angles, image, base, inv_deltas, clip_vals): x_byte = x // 8 x_off = 7 - (x % 8) - image[z, y, x_byte] |= (1 << x_off) + image[z, y, x_byte] |= 1 << x_off + def get_offset_size(n_coords): offset = 0 @@ -640,6 +685,7 @@ def get_offset_size(n_coords): return (offset, size) + def gather_confidence(controller, confidence, n_grains, n_coords): if rank == 0: global_confidence = np.empty(n_grains * n_coords, dtype=np.float64) @@ -649,7 +695,9 @@ def gather_confidence(controller, confidence, n_grains, n_coords): # Calculate the send buffer sizes coords_per_rank = n_coords // world_size send_counts = np.full(world_size, coords_per_rank * n_grains) - send_counts[-1] = (n_coords - (coords_per_rank * (world_size-1))) * n_grains + send_counts[-1] = ( + n_coords - (coords_per_rank * (world_size - 1)) + ) * n_grains if rank == 0: # Time how long it takes to perform the MPI gather @@ -657,16 +705,25 @@ def gather_confidence(controller, confidence, n_grains, n_coords): # Transpose so the data will be more easily re-shaped into its final shape # Must be flattened as well so the underlying data is modified... - comm.Gatherv(confidence.T.flatten(), (global_confidence, send_counts), root=0) + comm.Gatherv( + confidence.T.flatten(), (global_confidence, send_counts), root=0 + ) if rank == 0: controller.finish('gather_confidence') confidence = global_confidence.reshape(n_coords, n_grains).T controller.handle_result("confidence", confidence) + # ============================================================================== # %% ORIENTATION TESTING # ============================================================================== -def test_orientations(image_stack, experiment, test_crds, controller, multiprocessing_start_method='fork'): +def test_orientations( + image_stack, + experiment, + test_crds, + controller, + multiprocessing_start_method='fork', +): """grand loop precomputing the grown image stack image-stack -- is the dilated image stack to be tested against. @@ -716,30 +773,43 @@ def test_orientations(image_stack, experiment, test_crds, controller, multiproce # grand loop ============================================================== # The near field simulation 'grand loop'. Where the bulk of computing is # performed. We are looking for a confidence matrix that has a n_grains - chunks = range(offset, offset+size, chunk_size) + chunks = range(offset, offset + size, chunk_size) subprocess = 'grand_loop' controller.start(subprocess, n_coords) finished = 0 ncpus = min(ncpus, len(chunks)) - logging.info(f'For {rank=}, {offset=}, {size=}, {chunks=}, {len(chunks)=}, {ncpus=}') + logging.info( + f'For {rank=}, {offset=}, {size=}, {chunks=}, {len(chunks)=}, {ncpus=}' + ) - logging.info('Checking confidence for %d coords, %d grains.', - n_coords, n_grains) + logging.info( + 'Checking confidence for %d coords, %d grains.', n_coords, n_grains + ) confidence = np.empty((n_grains, size)) if ncpus > 1: global _multiprocessing_start_method - _multiprocessing_start_method=multiprocessing_start_method - logging.info('Running multiprocess %d processes (%s)', - ncpus, _multiprocessing_start_method) - with grand_loop_pool(ncpus=ncpus, - state=(chunk_size, - image_stack, - all_angles, precomp, - test_crds, experiment)) as pool: - for rslice, rvalues in pool.imap_unordered(multiproc_inner_loop, - chunks): + _multiprocessing_start_method = multiprocessing_start_method + logging.info( + 'Running multiprocess %d processes (%s)', + ncpus, + _multiprocessing_start_method, + ) + with grand_loop_pool( + ncpus=ncpus, + state=( + chunk_size, + image_stack, + all_angles, + precomp, + test_crds, + experiment, + ), + ) as pool: + for rslice, rvalues in pool.imap_unordered( + multiproc_inner_loop, chunks + ): count = rvalues.shape[1] # We need to adjust this slice for the offset rslice = slice(rslice.start - offset, rslice.stop - offset) @@ -749,12 +819,15 @@ def test_orientations(image_stack, experiment, test_crds, controller, multiproce else: logging.info('Running in a single process') for chunk_start in chunks: - chunk_stop = min(n_coords, chunk_start+chunk_size) + chunk_stop = min(n_coords, chunk_start + chunk_size) rslice, rvalues = _grand_loop_inner( - image_stack, all_angles, - precomp, test_crds, experiment, + image_stack, + all_angles, + precomp, + test_crds, + experiment, start=chunk_start, - stop=chunk_stop + stop=chunk_stop, ) count = rvalues.shape[1] # We need to adjust this slice for the offset @@ -771,7 +844,6 @@ def test_orientations(image_stack, experiment, test_crds, controller, multiproce else: controller.handle_result("confidence", confidence) - return confidence @@ -794,17 +866,19 @@ def evaluate_diffraction_angles(experiment, controller=None): subprocess = 'evaluate diffraction angles' pbar = controller.start(subprocess, len(exp_maps)) all_angles = [] - ref_gparams = np.array([0., 0., 0., 1., 1., 1., 0., 0., 0.]) + ref_gparams = np.array([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]) for i, exp_map in enumerate(exp_maps): gparams = np.hstack([exp_map, ref_gparams]) - sim_results = xrdutil.simulateGVecs(plane_data, - detector_params, - gparams, - panel_dims=panel_dims_expanded, - pixel_pitch=pixel_size, - ome_range=ome_range, - ome_period=ome_period, - distortion=None) + sim_results = xrdutil.simulateGVecs( + plane_data, + detector_params, + gparams, + panel_dims=panel_dims_expanded, + pixel_pitch=pixel_size, + ome_range=ome_range, + ome_period=ome_period, + distortion=None, + ) all_angles.append(sim_results[2]) controller.update(i + 1) controller.finish(subprocess) @@ -812,8 +886,9 @@ def evaluate_diffraction_angles(experiment, controller=None): return all_angles -def _grand_loop_inner(image_stack, angles, precomp, - coords, experiment, start=0, stop=None): +def _grand_loop_inner( + image_stack, angles, precomp, coords, experiment, start=0, stop=None +): """Actual simulation code for a chunk of data. It will be used both, in single processor and multiprocessor cases. Chunking is performed on the coords. @@ -842,7 +917,7 @@ def _grand_loop_inner(image_stack, angles, precomp, inv_deltas = experiment.inv_deltas clip_vals = experiment.clip_vals distortion = experiment.distortion - bsp = experiment.bsp #beam stop vertical center and width + bsp = experiment.bsp # beam stop vertical center and width _to_detector = xfcapi.gvec_to_xy # _to_detector = _gvec_to_detector_array @@ -856,7 +931,7 @@ def _grand_loop_inner(image_stack, angles, precomp, acc_detector = 0.0 acc_distortion = 0.0 acc_quant_clip = 0.0 - confidence = np.zeros((n_angles, stop-start)) + confidence = np.zeros((n_angles, stop - start)) grains = 0 crds = 0 @@ -872,8 +947,15 @@ def _grand_loop_inner(image_stack, angles, precomp, gvec_cs, rD, rMat_ss, rC, tD, tS, coords[icrd] ) t1 = timeit.default_timer() - c = _quant_and_clip_confidence(det_xy, angs[:, 2], image_stack, - base, inv_deltas, clip_vals, bsp) + c = _quant_and_clip_confidence( + det_xy, + angs[:, 2], + image_stack, + base, + inv_deltas, + clip_vals, + bsp, + ) t2 = timeit.default_timer() acc_detector += t1 - t0 acc_quant_clip += t2 - t1 @@ -889,12 +971,19 @@ def _grand_loop_inner(image_stack, angles, precomp, t0 = timeit.default_timer() tmp_xys = _to_detector( gvec_cs, rD, rMat_ss, rC, tD, tS, coords[icrd] - ) #changed to tmp_xys from det_xy, dcp 2021_05_30 + ) # changed to tmp_xys from det_xy, dcp 2021_05_30 t1 = timeit.default_timer() det_xy = distortion_fn(tmp_xys, distortion_args, invert=True) t2 = timeit.default_timer() - c = _quant_and_clip_confidence(det_xy, angs[:, 2], image_stack, - base, inv_deltas, clip_vals,bsp) + c = _quant_and_clip_confidence( + det_xy, + angs[:, 2], + image_stack, + base, + inv_deltas, + clip_vals, + bsp, + ) t3 = timeit.default_timer() acc_detector += t1 - t0 acc_distortion += t2 - t1 @@ -926,6 +1015,7 @@ def generate_test_grid(low, top, samples): # would be less efficient in memory (as joblib memmaps by default the big # arrays, meaning they may be shared between processes). + def multiproc_inner_loop(chunk): """function to use in multiprocessing that computes the simulation over the task's alloted chunk of data""" @@ -935,7 +1025,7 @@ def multiproc_inner_loop(chunk): (offset, size) = get_offset_size(n_coords) - chunk_stop = min(offset+size, chunk+chunk_size) + chunk_stop = min(offset + size, chunk + chunk_size) return _grand_loop_inner(*_mp_state[1:], start=chunk, stop=chunk_stop) @@ -982,7 +1072,7 @@ def grand_loop_pool(ncpus, state): _mp_state = state pool = multiprocessing.Pool(ncpus) yield pool - del (_mp_state) + del _mp_state else: # Use SPAWN multiprocessing. @@ -994,19 +1084,23 @@ def grand_loop_pool(ncpus, state): # joblib). In theory, joblib uses memmap for arrays if they are not # compressed, so no compression is used for the bigger arrays. import joblib + tmp_dir = tempfile.mkdtemp(suffix='-nf-grand-loop') try: # dumb dumping doesn't seem to work very well.. do something ad-hoc logging.info('Using "%s" as temporary directory.', tmp_dir) - id_exp = joblib.dump(state[-1], - os.path.join(tmp_dir, - 'grand-loop-experiment.gz'), - compress=True) - id_state = joblib.dump(state[:-1], - os.path.join(tmp_dir, 'grand-loop-data')) - pool = multiprocessing.Pool(ncpus, worker_init, - (id_state[0], id_exp[0])) + id_exp = joblib.dump( + state[-1], + os.path.join(tmp_dir, 'grand-loop-experiment.gz'), + compress=True, + ) + id_state = joblib.dump( + state[:-1], os.path.join(tmp_dir, 'grand-loop-data') + ) + pool = multiprocessing.Pool( + ncpus, worker_init, (id_state[0], id_exp[0]) + ) yield pool finally: logging.info('Deleting "%s".', tmp_dir) @@ -1018,37 +1112,53 @@ def grand_loop_pool(ncpus, state): def gen_nf_test_grid(cross_sectional_dim, v_bnds, voxel_spacing): - Zs_list=np.arange(-cross_sectional_dim/2.+voxel_spacing/2.,cross_sectional_dim/2.,voxel_spacing) - Xs_list=np.arange(-cross_sectional_dim/2.+voxel_spacing/2.,cross_sectional_dim/2.,voxel_spacing) - + Zs_list = np.arange( + -cross_sectional_dim / 2.0 + voxel_spacing / 2.0, + cross_sectional_dim / 2.0, + voxel_spacing, + ) + Xs_list = np.arange( + -cross_sectional_dim / 2.0 + voxel_spacing / 2.0, + cross_sectional_dim / 2.0, + voxel_spacing, + ) - if v_bnds[0]==v_bnds[1]: - Xs,Ys,Zs=np.meshgrid(Xs_list,v_bnds[0],Zs_list) + if v_bnds[0] == v_bnds[1]: + Xs, Ys, Zs = np.meshgrid(Xs_list, v_bnds[0], Zs_list) else: - Xs,Ys,Zs=np.meshgrid(Xs_list,np.arange(v_bnds[0]+voxel_spacing/2.,v_bnds[1],voxel_spacing),Zs_list) - #note numpy shaping of arrays is goofy, returns(length(y),length(x),length(z)) - - + Xs, Ys, Zs = np.meshgrid( + Xs_list, + np.arange( + v_bnds[0] + voxel_spacing / 2.0, v_bnds[1], voxel_spacing + ), + Zs_list, + ) + # note numpy shaping of arrays is goofy, returns(length(y),length(x),length(z)) test_crds = np.vstack([Xs.flatten(), Ys.flatten(), Zs.flatten()]).T n_crds = len(test_crds) - return test_crds, n_crds, Xs, Ys, Zs def gen_nf_test_grid_tomo(x_dim_pnts, z_dim_pnts, v_bnds, voxel_spacing): - if v_bnds[0]==v_bnds[1]: - Xs,Ys,Zs=np.meshgrid(np.arange(x_dim_pnts),v_bnds[0],np.arange(z_dim_pnts)) + if v_bnds[0] == v_bnds[1]: + Xs, Ys, Zs = np.meshgrid( + np.arange(x_dim_pnts), v_bnds[0], np.arange(z_dim_pnts) + ) else: - Xs,Ys,Zs=np.meshgrid(np.arange(x_dim_pnts),np.arange(v_bnds[0]+voxel_spacing/2.,v_bnds[1],voxel_spacing),np.arange(z_dim_pnts)) - #note numpy shaping of arrays is goofy, returns(length(y),length(x),length(z)) - - - Zs=(Zs-(z_dim_pnts/2))*voxel_spacing - Xs=(Xs-(x_dim_pnts/2))*voxel_spacing + Xs, Ys, Zs = np.meshgrid( + np.arange(x_dim_pnts), + np.arange( + v_bnds[0] + voxel_spacing / 2.0, v_bnds[1], voxel_spacing + ), + np.arange(z_dim_pnts), + ) + # note numpy shaping of arrays is goofy, returns(length(y),length(x),length(z)) + Zs = (Zs - (z_dim_pnts / 2)) * voxel_spacing + Xs = (Xs - (x_dim_pnts / 2)) * voxel_spacing test_crds = np.vstack([Xs.flatten(), Ys.flatten(), Zs.flatten()]).T n_crds = len(test_crds) @@ -1058,22 +1168,38 @@ def gen_nf_test_grid_tomo(x_dim_pnts, z_dim_pnts, v_bnds, voxel_spacing): # %% -def gen_nf_dark(data_folder,img_nums,num_for_dark,nrows,ncols,dark_type='median',stem='nf_',num_digits=5,ext='.tif'): - dark_stack=np.zeros([num_for_dark,nrows,ncols]) +def gen_nf_dark( + data_folder, + img_nums, + num_for_dark, + nrows, + ncols, + dark_type='median', + stem='nf_', + num_digits=5, + ext='.tif', +): + + dark_stack = np.zeros([num_for_dark, nrows, ncols]) print('Loading data for dark generation...') for ii in np.arange(num_for_dark): print('Image #: ' + str(ii)) - dark_stack[ii,:,:]=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext) - #image_stack[ii,:,:]=np.flipud(tmp_img>threshold) + dark_stack[ii, :, :] = imgio.imread( + data_folder + + '%s' % (stem) + + str(img_nums[ii]).zfill(num_digits) + + ext + ) + # image_stack[ii,:,:]=np.flipud(tmp_img>threshold) - if dark_type=='median': + if dark_type == 'median': print('making median...') - dark=np.median(dark_stack,axis=0) - elif dark_type=='min': + dark = np.median(dark_stack, axis=0) + elif dark_type == 'min': print('making min...') - dark=np.min(dark_stack,axis=0) + dark = np.min(dark_stack, axis=0) return dark @@ -1081,49 +1207,77 @@ def gen_nf_dark(data_folder,img_nums,num_for_dark,nrows,ncols,dark_type='median' # %% -def gen_nf_cleaned_image_stack(data_folder,img_nums,dark,nrows,ncols, \ - process_type='gaussian',process_args=[4.5,5], \ - threshold=1.5,ome_dilation_iter=1,stem='nf_', \ - num_digits=5,ext='.tif'): +def gen_nf_cleaned_image_stack( + data_folder, + img_nums, + dark, + nrows, + ncols, + process_type='gaussian', + process_args=[4.5, 5], + threshold=1.5, + ome_dilation_iter=1, + stem='nf_', + num_digits=5, + ext='.tif', +): - image_stack=np.zeros([img_nums.shape[0],nrows,ncols],dtype=bool) + image_stack = np.zeros([img_nums.shape[0], nrows, ncols], dtype=bool) print('Loading and Cleaning Images...') - - if process_type=='gaussian': - sigma=process_args[0] - size=process_args[1].astype(int) #needs to be int + if process_type == 'gaussian': + sigma = process_args[0] + size = process_args[1].astype(int) # needs to be int for ii in np.arange(img_nums.shape[0]): print('Image #: ' + str(ii)) - tmp_img=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext)-dark - #image procesing + tmp_img = ( + imgio.imread( + data_folder + + '%s' % (stem) + + str(img_nums[ii]).zfill(num_digits) + + ext + ) + - dark + ) + # image procesing tmp_img = filters.gaussian(tmp_img, sigma=sigma) - tmp_img = img.morphology.grey_closing(tmp_img,size=(size,size)) + tmp_img = img.morphology.grey_closing(tmp_img, size=(size, size)) - binary_img = img.morphology.binary_fill_holes(tmp_img>threshold) - image_stack[ii,:,:]=binary_img + binary_img = img.morphology.binary_fill_holes(tmp_img > threshold) + image_stack[ii, :, :] = binary_img else: - num_erosions=process_args[0] - num_dilations=process_args[1] - + num_erosions = process_args[0] + num_dilations = process_args[1] for ii in np.arange(img_nums.shape[0]): print('Image #: ' + str(ii)) - tmp_img=imgio.imread(data_folder+'%s'%(stem)+str(img_nums[ii]).zfill(num_digits)+ext)-dark - #image procesing - image_stack[ii,:,:]=img.morphology.binary_erosion(tmp_img>threshold,iterations=num_erosions) - image_stack[ii,:,:]=img.morphology.binary_dilation(image_stack[ii,:,:],iterations=num_dilations) - + tmp_img = ( + imgio.imread( + data_folder + + '%s' % (stem) + + str(img_nums[ii]).zfill(num_digits) + + ext + ) + - dark + ) + # image procesing + image_stack[ii, :, :] = img.morphology.binary_erosion( + tmp_img > threshold, iterations=num_erosions + ) + image_stack[ii, :, :] = img.morphology.binary_dilation( + image_stack[ii, :, :], iterations=num_dilations + ) - #%A final dilation that includes omega + # %A final dilation that includes omega print('Final Dilation Including Omega....') - image_stack=img.morphology.binary_dilation(image_stack,iterations=ome_dilation_iter) - + image_stack = img.morphology.binary_dilation( + image_stack, iterations=ome_dilation_iter + ) return image_stack @@ -1131,87 +1285,104 @@ def gen_nf_cleaned_image_stack(data_folder,img_nums,dark,nrows,ncols, \ # %% -def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp_thresh, chi2_thresh, misorientation_bnd, \ - misorientation_spacing,ome_range_deg, nframes, beam_stop_parms): - +def gen_trial_exp_data( + grain_out_file, + det_file, + mat_file, + mat_name, + max_tth, + comp_thresh, + chi2_thresh, + misorientation_bnd, + misorientation_spacing, + ome_range_deg, + nframes, + beam_stop_parms, +): print('Loading Grain Data...') - #gen_grain_data - ff_data=np.loadtxt(grain_out_file) + # gen_grain_data + ff_data = np.loadtxt(grain_out_file) - #ff_data=np.atleast_2d(ff_data[2,:]) - - exp_maps=ff_data[:,3:6] - t_vec_ds=ff_data[:,6:9] + # ff_data=np.atleast_2d(ff_data[2,:]) + exp_maps = ff_data[:, 3:6] + t_vec_ds = ff_data[:, 6:9] # - completeness=ff_data[:,1] + completeness = ff_data[:, 1] - chi2=ff_data[:,2] + chi2 = ff_data[:, 2] - n_grains=exp_maps.shape[0] + n_grains = exp_maps.shape[0] rMat_c = rotations.rotMatOfExpMap(exp_maps.T) - cut=np.where(np.logical_and(completeness>comp_thresh,chi2 comp_thresh, chi2 < chi2_thresh) + )[0] + exp_maps = exp_maps[cut, :] + t_vec_ds = t_vec_ds[cut, :] + chi2 = chi2[cut] # Add Misorientation - mis_amt=misorientation_bnd*np.pi/180. - spacing=misorientation_spacing*np.pi/180. + mis_amt = misorientation_bnd * np.pi / 180.0 + spacing = misorientation_spacing * np.pi / 180.0 - mis_steps = int(misorientation_bnd/misorientation_spacing) - - ori_pts = np.arange(-mis_amt, (mis_amt+(spacing*0.999)),spacing) - num_ori_grid_pts=ori_pts.shape[0]**3 - num_oris=exp_maps.shape[0] + mis_steps = int(misorientation_bnd / misorientation_spacing) + ori_pts = np.arange(-mis_amt, (mis_amt + (spacing * 0.999)), spacing) + num_ori_grid_pts = ori_pts.shape[0] ** 3 + num_oris = exp_maps.shape[0] XsO, YsO, ZsO = np.meshgrid(ori_pts, ori_pts, ori_pts) grid0 = np.vstack([XsO.flatten(), YsO.flatten(), ZsO.flatten()]).T - - exp_maps_expanded=np.zeros([num_ori_grid_pts*num_oris,3]) - t_vec_ds_expanded=np.zeros([num_ori_grid_pts*num_oris,3]) - + exp_maps_expanded = np.zeros([num_ori_grid_pts * num_oris, 3]) + t_vec_ds_expanded = np.zeros([num_ori_grid_pts * num_oris, 3]) for ii in np.arange(num_oris): - pts_to_use=np.arange(num_ori_grid_pts)+ii*num_ori_grid_pts - exp_maps_expanded[pts_to_use,:]=grid0+np.r_[exp_maps[ii,:] ] - t_vec_ds_expanded[pts_to_use,:]=np.r_[t_vec_ds[ii,:] ] - + pts_to_use = np.arange(num_ori_grid_pts) + ii * num_ori_grid_pts + exp_maps_expanded[pts_to_use, :] = grid0 + np.r_[exp_maps[ii, :]] + t_vec_ds_expanded[pts_to_use, :] = np.r_[t_vec_ds[ii, :]] - exp_maps=exp_maps_expanded - t_vec_ds=t_vec_ds_expanded + exp_maps = exp_maps_expanded + t_vec_ds = t_vec_ds_expanded - n_grains=exp_maps.shape[0] + n_grains = exp_maps.shape[0] rMat_c = rotations.rotMatOfExpMap(exp_maps.T) - print('Loading Instrument Data...') - ome_period_deg=(ome_range_deg[0][0], (ome_range_deg[0][0]+360.)) #degrees - ome_step_deg=(ome_range_deg[0][1]-ome_range_deg[0][0])/nframes #degrees - - - ome_period = (ome_period_deg[0]*np.pi/180.,ome_period_deg[1]*np.pi/180.) - ome_range = [(ome_range_deg[0][0]*np.pi/180.,ome_range_deg[0][1]*np.pi/180.)] - ome_step = ome_step_deg*np.pi/180. - - - - ome_edges = np.arange(nframes+1)*ome_step+ome_range[0][0]#fixed 2/26/17 + ome_period_deg = ( + ome_range_deg[0][0], + (ome_range_deg[0][0] + 360.0), + ) # degrees + ome_step_deg = ( + ome_range_deg[0][1] - ome_range_deg[0][0] + ) / nframes # degrees + + ome_period = ( + ome_period_deg[0] * np.pi / 180.0, + ome_period_deg[1] * np.pi / 180.0, + ) + ome_range = [ + ( + ome_range_deg[0][0] * np.pi / 180.0, + ome_range_deg[0][1] * np.pi / 180.0, + ) + ] + ome_step = ome_step_deg * np.pi / 180.0 + ome_edges = ( + np.arange(nframes + 1) * ome_step + ome_range[0][0] + ) # fixed 2/26/17 - instr=load_instrument(det_file) + instr = load_instrument(det_file) panel = next(iter(instr.detectors.values())) # !!! there is only 1 - # tranform paramters + # tranform paramters # Sample chi = instr.chi tVec_s = instr.tvec @@ -1228,8 +1399,7 @@ def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp ncols = panel.cols # panel dimensions - panel_dims = [tuple(panel.corner_ll), - tuple(panel.corner_ur)] + panel_dims = [tuple(panel.corner_ll), tuple(panel.corner_ur)] x_col_edges = panel.col_edge_vec y_row_edges = panel.row_edge_vec @@ -1242,13 +1412,15 @@ def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp # a different parametrization for the sensor # (makes for faster quantization) - base = np.array([x_col_edges[0], - y_row_edges[0], - ome_edges[0]]) - deltas = np.array([x_col_edges[1] - x_col_edges[0], - y_row_edges[1] - y_row_edges[0], - ome_edges[1] - ome_edges[0]]) - inv_deltas = 1.0/deltas + base = np.array([x_col_edges[0], y_row_edges[0], ome_edges[0]]) + deltas = np.array( + [ + x_col_edges[1] - x_col_edges[0], + y_row_edges[1] - y_row_edges[0], + ome_edges[1] - ome_edges[0], + ] + ) + inv_deltas = 1.0 / deltas clip_vals = np.array([ncols, nrows]) # # dilation @@ -1256,11 +1428,11 @@ def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp # row_dilation = int(np.ceil(0.5 * max_diameter/row_ps)) # col_dilation = int(np.ceil(0.5 * max_diameter/col_ps)) - - print('Loading Materials Data...') # crystallography data - beam_energy = valunits.valWUnit("beam_energy", "energy", instr.beam_energy, "keV") + beam_energy = valunits.valWUnit( + "beam_energy", "energy", instr.beam_energy, "keV" + ) beam_wavelength = constants.keVToAngstrom(beam_energy.getVal('keV')) if max_tth is not None: dmin = valWUnit("dmin", "length", @@ -1272,16 +1444,14 @@ def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp "angstrom") # material loading - mats = material.load_materials_hdf5(mat_file, dmin=dmin,kev=beam_energy) + mats = material.load_materials_hdf5(mat_file, dmin=dmin, kev=beam_energy) pd = mats[mat_name].planeData if max_tth is not None: - pd.tThMax = np.amax(np.radians(max_tth)) + pd.tThMax = np.amax(np.radians(max_tth)) else: pd.tThMax = np.amax(max_pixel_tth) - - print('Final Assembly...') experiment = argparse.Namespace() # grains related information @@ -1314,62 +1484,109 @@ def gen_trial_exp_data(grain_out_file,det_file,mat_file, mat_name, max_tth, comp experiment.clip_vals = clip_vals experiment.bsp = beam_stop_parms - - if mis_steps ==0: + if mis_steps == 0: nf_to_ff_id_map = cut else: - nf_to_ff_id_map=np.tile(cut,3**3*mis_steps) + nf_to_ff_id_map = np.tile(cut, 3**3 * mis_steps) return experiment, nf_to_ff_id_map -def process_raw_confidence(raw_confidence,vol_shape=None,id_remap=None,min_thresh=0.0): + +def process_raw_confidence( + raw_confidence, vol_shape=None, id_remap=None, min_thresh=0.0 +): print('Compiling Confidence Map...') if vol_shape == None: - confidence_map=np.max(raw_confidence,axis=0) - grain_map=np.argmax(raw_confidence,axis=0) + confidence_map = np.max(raw_confidence, axis=0) + grain_map = np.argmax(raw_confidence, axis=0) else: - confidence_map=np.max(raw_confidence,axis=0).reshape(vol_shape) - grain_map=np.argmax(raw_confidence,axis=0).reshape(vol_shape) - - - #fix grain indexing - not_indexed=np.where(confidence_map<=min_thresh) - grain_map[not_indexed] =-1 + confidence_map = np.max(raw_confidence, axis=0).reshape(vol_shape) + grain_map = np.argmax(raw_confidence, axis=0).reshape(vol_shape) + # fix grain indexing + not_indexed = np.where(confidence_map <= min_thresh) + grain_map[not_indexed] = -1 if id_remap is not None: - max_grain_no=np.max(grain_map) - grain_map_copy=copy.copy(grain_map) + max_grain_no = np.max(grain_map) + grain_map_copy = copy.copy(grain_map) print('Remapping grain ids to ff...') for ii in np.arange(max_grain_no): - this_grain=np.where(grain_map==ii) - grain_map_copy[this_grain]=id_remap[ii] - grain_map=grain_map_copy + this_grain = np.where(grain_map == ii) + grain_map_copy[this_grain] = id_remap[ii] + grain_map = grain_map_copy return grain_map.astype(int), confidence_map # %% -def save_raw_confidence(save_dir,save_stem,raw_confidence,id_remap=None): +def save_raw_confidence(save_dir, save_stem, raw_confidence, id_remap=None): print('Saving raw confidence, might take a while...') if id_remap is not None: - np.savez(save_dir+save_stem+'_raw_confidence.npz',raw_confidence=raw_confidence,id_remap=id_remap) + np.savez( + save_dir + save_stem + '_raw_confidence.npz', + raw_confidence=raw_confidence, + id_remap=id_remap, + ) else: - np.savez(save_dir+save_stem+'_raw_confidence.npz',raw_confidence=raw_confidence) + np.savez( + save_dir + save_stem + '_raw_confidence.npz', + raw_confidence=raw_confidence, + ) + + # %% -def save_nf_data(save_dir,save_stem,grain_map,confidence_map,Xs,Ys,Zs,ori_list,id_remap=None): + +def save_nf_data( + save_dir, + save_stem, + grain_map, + confidence_map, + Xs, + Ys, + Zs, + ori_list, + id_remap=None, +): print('Saving grain map data...') if id_remap is not None: - np.savez(save_dir+save_stem+'_grain_map_data.npz',grain_map=grain_map,confidence_map=confidence_map,Xs=Xs,Ys=Ys,Zs=Zs,ori_list=ori_list,id_remap=id_remap) + np.savez( + save_dir + save_stem + '_grain_map_data.npz', + grain_map=grain_map, + confidence_map=confidence_map, + Xs=Xs, + Ys=Ys, + Zs=Zs, + ori_list=ori_list, + id_remap=id_remap, + ) else: - np.savez(save_dir+save_stem+'_grain_map_data.npz',grain_map=grain_map,confidence_map=confidence_map,Xs=Xs,Ys=Ys,Zs=Zs,ori_list=ori_list) + np.savez( + save_dir + save_stem + '_grain_map_data.npz', + grain_map=grain_map, + confidence_map=confidence_map, + Xs=Xs, + Ys=Ys, + Zs=Zs, + ori_list=ori_list, + ) # %% -def scan_detector_parm(image_stack, experiment,test_crds,controller,parm_to_opt,parm_range,slice_shape,ang='deg'): + +def scan_detector_parm( + image_stack, + experiment, + test_crds, + controller, + parm_to_opt, + parm_range, + slice_shape, + ang='deg', +): # 0-distance # 1-x center # 2-y center @@ -1377,42 +1594,46 @@ def scan_detector_parm(image_stack, experiment,test_crds,controller,parm_to_opt, # 4-ytilt # 5-ztilt - parm_vector=np.arange(parm_range[0],parm_range[1]+1e-6,(parm_range[1]-parm_range[0])/parm_range[2]) + parm_vector = np.arange( + parm_range[0], + parm_range[1] + 1e-6, + (parm_range[1] - parm_range[0]) / parm_range[2], + ) - if parm_to_opt>2 and ang=='deg': - parm_vector=parm_vector*np.pi/180. + if parm_to_opt > 2 and ang == 'deg': + parm_vector = parm_vector * np.pi / 180.0 multiprocessing_start_method = 'fork' if hasattr(os, 'fork') else 'spawn' # current detector parameters, note the value for the actively optimized # parameters will be ignored - distance=experiment.detector_params[5]#mm - x_cen=experiment.detector_params[3]#mm - y_cen=experiment.detector_params[4]#mm - xtilt=experiment.detector_params[0] - ytilt=experiment.detector_params[1] - ztilt=experiment.detector_params[2] - ome_range=copy.copy(experiment.ome_range) - ome_period=copy.copy(experiment.ome_period) - ome_edges=copy.copy(experiment.ome_edges) + distance = experiment.detector_params[5] # mm + x_cen = experiment.detector_params[3] # mm + y_cen = experiment.detector_params[4] # mm + xtilt = experiment.detector_params[0] + ytilt = experiment.detector_params[1] + ztilt = experiment.detector_params[2] + ome_range = copy.copy(experiment.ome_range) + ome_period = copy.copy(experiment.ome_period) + ome_edges = copy.copy(experiment.ome_edges) - num_parm_pts=len(parm_vector) + num_parm_pts = len(parm_vector) - trial_data=np.zeros([num_parm_pts,slice_shape[0],slice_shape[1]]) + trial_data = np.zeros([num_parm_pts, slice_shape[0], slice_shape[1]]) - tmp_td=copy.copy(experiment.tVec_d) + tmp_td = copy.copy(experiment.tVec_d) for jj in np.arange(num_parm_pts): - print('cycle %d of %d'%(jj+1,num_parm_pts)) + print('cycle %d of %d' % (jj + 1, num_parm_pts)) # overwrite translation vector components - if parm_to_opt==0: - tmp_td[2]=parm_vector[jj] + if parm_to_opt == 0: + tmp_td[2] = parm_vector[jj] - if parm_to_opt==1: - tmp_td[0]=parm_vector[jj] + if parm_to_opt == 1: + tmp_td[0] = parm_vector[jj] - if parm_to_opt==2: - tmp_td[1]=parm_vector[jj] + if parm_to_opt == 2: + tmp_td[1] = parm_vector[jj] if parm_to_opt == 3: rMat_d_tmp = xfcapi.make_detector_rmat( @@ -1432,7 +1653,7 @@ def scan_detector_parm(image_stack, experiment,test_crds,controller,parm_to_opt, experiment.rMat_d = rMat_d_tmp experiment.tVec_d = tmp_td - if parm_to_opt==6: + if parm_to_opt == 6: experiment.ome_range = [ ( @@ -1452,24 +1673,34 @@ def scan_detector_parm(image_stack, experiment,test_crds,controller,parm_to_opt, # print(experiment.ome_edges) # print(experiment.base) - conf=test_orientations(image_stack, experiment,test_crds,controller, \ - multiprocessing_start_method) + conf = test_orientations( + image_stack, + experiment, + test_crds, + controller, + multiprocessing_start_method, + ) - trial_data[jj]=np.max(conf,axis=0).reshape(slice_shape) + trial_data[jj] = np.max(conf, axis=0).reshape(slice_shape) return trial_data, parm_vector + # %% -def plot_ori_map(grain_map, confidence_map, exp_maps, layer_no,mat,id_remap=None): - grains_plot=np.squeeze(grain_map[layer_no,:,:]) - conf_plot=np.squeeze(confidence_map[layer_no,:,:]) - n_grains=len(exp_maps) +def plot_ori_map( + grain_map, confidence_map, exp_maps, layer_no, mat, id_remap=None +): + + grains_plot = np.squeeze(grain_map[layer_no, :, :]) + conf_plot = np.squeeze(confidence_map[layer_no, :, :]) + n_grains = len(exp_maps) rgb_image = np.zeros( - [grains_plot.shape[0], grains_plot.shape[1], 4], dtype='float32') - rgb_image[:, :, 3] = 1. + [grains_plot.shape[0], grains_plot.shape[1], 4], dtype='float32' + ) + rgb_image[:, :, 3] = 1.0 for ii in np.arange(n_grains): if id_remap is not None: @@ -1482,25 +1713,32 @@ def plot_ori_map(grain_map, confidence_map, exp_maps, layer_no,mat,id_remap=None rmats = rotations.rotMatOfExpMap(ori) rgb = mat.unitcell.color_orientations( - rmats, ref_dir=np.array([0., 1., 0.])) + rmats, ref_dir=np.array([0.0, 1.0, 0.0]) + ) - #color mapping + # color mapping rgb_image[this_grain[0], this_grain[1], 0] = rgb[0][0] rgb_image[this_grain[0], this_grain[1], 1] = rgb[0][1] rgb_image[this_grain[0], this_grain[1], 2] = rgb[0][2] - - fig1 = plt.figure() plt.imshow(rgb_image, interpolation='none') plt.title('Layer %d Grain Map' % layer_no) - #plt.show() + # plt.show() plt.hold(True) - #fig2 = plt.figure() - plt.imshow(conf_plot, vmin=0.0, vmax=1., - interpolation='none', cmap=plt.cm.gray, alpha=0.5) + # fig2 = plt.figure() + plt.imshow( + conf_plot, + vmin=0.0, + vmax=1.0, + interpolation='none', + cmap=plt.cm.gray, + alpha=0.5, + ) plt.title('Layer %d Confidence Map' % layer_no) plt.show() + + # ============================================================================== # %% SCRIPT ENTRY AND PARAMETER HANDLING # ============================================================================== @@ -1555,12 +1793,15 @@ def plot_ori_map(grain_map, confidence_map, exp_maps, layer_no,mat,id_remap=None # return args -def build_controller(check=None,generate=None,ncpus=2,chunk_size=10,limit=None): +def build_controller( + check=None, generate=None, ncpus=2, chunk_size=10, limit=None +): # builds the controller to use based on the args # result handle try: import progressbar + progress_handler = progressbar_progress_observer() except ImportError: progress_handler = null_progress_observer() @@ -1569,7 +1810,8 @@ def build_controller(check=None,generate=None,ncpus=2,chunk_size=10,limit=None): if generate is not None: logging.warn( "generating and checking can not happen at the same time, " - + "going with checking") + + "going with checking" + ) result_handler = checking_result_handler(check) elif generate is not None: @@ -1581,71 +1823,99 @@ def build_controller(check=None,generate=None,ncpus=2,chunk_size=10,limit=None): # logging.warn("Multiprocessing on Windows is disabled for now") # args.ncpus = 1 - controller = ProcessController(result_handler, progress_handler, - ncpus=ncpus, - chunk_size=chunk_size) + controller = ProcessController( + result_handler, progress_handler, ncpus=ncpus, chunk_size=chunk_size + ) if limit is not None: controller.set_limit('coords', lambda x: min(x, limit)) return controller -def output_grain_map(data_location,data_stems,output_stem,vol_spacing,top_down=True,save_type=['npz']): - num_scans=len(data_stems) +def output_grain_map( + data_location, + data_stems, + output_stem, + vol_spacing, + top_down=True, + save_type=['npz'], +): - confidence_maps=[None]*num_scans - grain_maps=[None]*num_scans - Xss=[None]*num_scans - Yss=[None]*num_scans - Zss=[None]*num_scans + num_scans = len(data_stems) - if len(vol_spacing)==1: - vol_shifts=np.arange(0,vol_spacing[0]*num_scans+1e-12,vol_spacing[0]) - else: - vol_shifts=vol_spacing + confidence_maps = [None] * num_scans + grain_maps = [None] * num_scans + Xss = [None] * num_scans + Yss = [None] * num_scans + Zss = [None] * num_scans + if len(vol_spacing) == 1: + vol_shifts = np.arange( + 0, vol_spacing[0] * num_scans + 1e-12, vol_spacing[0] + ) + else: + vol_shifts = vol_spacing for ii in np.arange(num_scans): - print('Loading Volume %d ....'%(ii)) - conf_data=np.load(os.path.join(data_location,data_stems[ii]+'_grain_map_data.npz')) - - confidence_maps[ii]=conf_data['confidence_map'] - grain_maps[ii]=conf_data['grain_map'] - Xss[ii]=conf_data['Xs'] - Yss[ii]=conf_data['Ys'] - Zss[ii]=conf_data['Zs'] + print('Loading Volume %d ....' % (ii)) + conf_data = np.load( + os.path.join(data_location, data_stems[ii] + '_grain_map_data.npz') + ) - #assumes all volumes to be the same size - num_layers=grain_maps[0].shape[0] + confidence_maps[ii] = conf_data['confidence_map'] + grain_maps[ii] = conf_data['grain_map'] + Xss[ii] = conf_data['Xs'] + Yss[ii] = conf_data['Ys'] + Zss[ii] = conf_data['Zs'] - total_layers=num_layers*num_scans + # assumes all volumes to be the same size + num_layers = grain_maps[0].shape[0] - num_rows=grain_maps[0].shape[1] - num_cols=grain_maps[0].shape[2] + total_layers = num_layers * num_scans - grain_map_stitched=np.zeros((total_layers,num_rows,num_cols)) - confidence_stitched=np.zeros((total_layers,num_rows,num_cols)) - Xs_stitched=np.zeros((total_layers,num_rows,num_cols)) - Ys_stitched=np.zeros((total_layers,num_rows,num_cols)) - Zs_stitched=np.zeros((total_layers,num_rows,num_cols)) + num_rows = grain_maps[0].shape[1] + num_cols = grain_maps[0].shape[2] + grain_map_stitched = np.zeros((total_layers, num_rows, num_cols)) + confidence_stitched = np.zeros((total_layers, num_rows, num_cols)) + Xs_stitched = np.zeros((total_layers, num_rows, num_cols)) + Ys_stitched = np.zeros((total_layers, num_rows, num_cols)) + Zs_stitched = np.zeros((total_layers, num_rows, num_cols)) for ii in np.arange(num_scans): - if top_down==True: - grain_map_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=grain_maps[num_scans-1-ii] - confidence_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=confidence_maps[num_scans-1-ii] - Xs_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=\ - Xss[num_scans-1-ii] - Zs_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=\ - Zss[num_scans-1-ii] - Ys_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=Yss[num_scans-1-ii]+vol_shifts[ii] + if top_down == True: + grain_map_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = grain_maps[num_scans - 1 - ii] + confidence_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = confidence_maps[num_scans - 1 - ii] + Xs_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = Xss[num_scans - 1 - ii] + Zs_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = Zss[num_scans - 1 - ii] + Ys_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = (Yss[num_scans - 1 - ii] + vol_shifts[ii]) else: - grain_map_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=grain_maps[ii] - confidence_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=confidence_maps[ii] - Xs_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=Xss[ii] - Zs_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=Zss[ii] - Ys_stitched[((ii)*num_layers):((ii)*num_layers+num_layers),:,:]=Yss[ii]+vol_shifts[ii] + grain_map_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = grain_maps[ii] + confidence_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = confidence_maps[ii] + Xs_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = Xss[ii] + Zs_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = Zss[ii] + Ys_stitched[ + ((ii) * num_layers) : ((ii) * num_layers + num_layers), :, : + ] = (Yss[ii] + vol_shifts[ii]) for ii in np.arange(len(save_type)): @@ -1660,31 +1930,34 @@ def output_grain_map(data_location,data_stems,output_stem,vol_spacing,top_down=T hf.create_dataset('Ys', data=Ys_stitched) hf.create_dataset('Zs', data=Zs_stitched) - elif save_type[ii]=='npz': + elif save_type[ii] == 'npz': print('Writing NPZ data...') - np.savez(output_stem + '_assembled.npz',\ - grain_map=grain_map_stitched,confidence=confidence_stitched, - Xs=Xs_stitched,Ys=Ys_stitched,Zs=Zs_stitched) - - elif save_type[ii]=='vtk': + np.savez( + output_stem + '_assembled.npz', + grain_map=grain_map_stitched, + confidence=confidence_stitched, + Xs=Xs_stitched, + Ys=Ys_stitched, + Zs=Zs_stitched, + ) + elif save_type[ii] == 'vtk': print('Writing VTK data...') # VTK Dump - Xslist=Xs_stitched[:,:,:].ravel() - Yslist=Ys_stitched[:,:,:].ravel() - Zslist=Zs_stitched[:,:,:].ravel() - - grainlist=grain_map_stitched[:,:,:].ravel() - conflist=confidence_stitched[:,:,:].ravel() + Xslist = Xs_stitched[:, :, :].ravel() + Yslist = Ys_stitched[:, :, :].ravel() + Zslist = Zs_stitched[:, :, :].ravel() - num_pts=Xslist.shape[0] - num_cells=(total_layers-1)*(num_rows-1)*(num_cols-1) + grainlist = grain_map_stitched[:, :, :].ravel() + conflist = confidence_stitched[:, :, :].ravel() - f = open(os.path.join(output_stem +'_assembled.vtk'), 'w') + num_pts = Xslist.shape[0] + num_cells = (total_layers - 1) * (num_rows - 1) * (num_cols - 1) + f = open(os.path.join(output_stem + '_assembled.vtk'), 'w') f.write('# vtk DataFile Version 3.0\n') f.write('grainmap Data\n') @@ -1693,28 +1966,29 @@ def output_grain_map(data_location,data_stems,output_stem,vol_spacing,top_down=T f.write('POINTS %d double\n' % (num_pts)) for i in np.arange(num_pts): - f.write('%e %e %e \n' %(Xslist[i],Yslist[i],Zslist[i])) - - scale2=num_cols*num_rows - scale1=num_cols - - f.write('CELLS %d %d\n' % (num_cells, 9*num_cells)) - for k in np.arange(Xs_stitched.shape[0]-1): - for j in np.arange(Xs_stitched.shape[1]-1): - for i in np.arange(Xs_stitched.shape[2]-1): - base=scale2*k+scale1*j+i - p1=base - p2=base+1 - p3=base+1+scale1 - p4=base+scale1 - p5=base+scale2 - p6=base+scale2+1 - p7=base+scale2+scale1+1 - p8=base+scale2+scale1 - - f.write('8 %d %d %d %d %d %d %d %d \n' \ - %(p1,p2,p3,p4,p5,p6,p7,p8)) - + f.write('%e %e %e \n' % (Xslist[i], Yslist[i], Zslist[i])) + + scale2 = num_cols * num_rows + scale1 = num_cols + + f.write('CELLS %d %d\n' % (num_cells, 9 * num_cells)) + for k in np.arange(Xs_stitched.shape[0] - 1): + for j in np.arange(Xs_stitched.shape[1] - 1): + for i in np.arange(Xs_stitched.shape[2] - 1): + base = scale2 * k + scale1 * j + i + p1 = base + p2 = base + 1 + p3 = base + 1 + scale1 + p4 = base + scale1 + p5 = base + scale2 + p6 = base + scale2 + 1 + p7 = base + scale2 + scale1 + 1 + p8 = base + scale2 + scale1 + + f.write( + '8 %d %d %d %d %d %d %d %d \n' + % (p1, p2, p3, p4, p5, p6, p7, p8) + ) f.write('CELL_TYPES %d \n' % (num_cells)) for i in np.arange(num_cells): @@ -1724,21 +1998,25 @@ def output_grain_map(data_location,data_stems,output_stem,vol_spacing,top_down=T f.write('SCALARS grain_id int \n') f.write('LOOKUP_TABLE default \n') for i in np.arange(num_pts): - f.write('%d \n' %(grainlist[i])) + f.write('%d \n' % (grainlist[i])) - f.write('FIELD FieldData 1 \n' ) + f.write('FIELD FieldData 1 \n') f.write('confidence 1 %d float \n' % (num_pts)) for i in np.arange(num_pts): - f.write('%e \n' %(conflist[i])) - + f.write('%e \n' % (conflist[i])) f.close() else: print('Not a valid save option, npz, vtk, or hdf5 allowed.') - return grain_map_stitched, confidence_stitched, Xs_stitched, Ys_stitched, \ - Zs_stitched + return ( + grain_map_stitched, + confidence_stitched, + Xs_stitched, + Ys_stitched, + Zs_stitched, + ) # # assume that if os has fork, it will be used by multiprocessing. @@ -1759,7 +2037,7 @@ def output_grain_map(data_location,data_stems,output_stem,vol_spacing,top_down=T # args = parse_args() # if len(args.inst_profile) > 0: -# from hexrd.utils import profiler +# from hexrd.core.utils import profiler # logging.debug("Instrumenting functions") # profiler.instrument_all(args.inst_profile) diff --git a/hexrd/hedm/grainmap/tomoutil.py b/hexrd/hedm/grainmap/tomoutil.py new file mode 100644 index 000000000..93c83a5ed --- /dev/null +++ b/hexrd/hedm/grainmap/tomoutil.py @@ -0,0 +1,263 @@ +# %% + +import numpy as np + +# import scipy as sp + +import scipy.ndimage as img + +try: + import imageio as imgio +except ImportError: + from skimage import io as imgio + + +import skimage.transform as xformimg + + +# %% + + +def gen_bright_field( + tbf_data_folder, + tbf_img_start, + tbf_num_imgs, + nrows, + ncols, + stem='nf_', + num_digits=5, + ext='.tif', +): + + tbf_img_nums = np.arange(tbf_img_start, tbf_img_start + tbf_num_imgs, 1) + + tbf_stack = np.zeros([tbf_num_imgs, nrows, ncols]) + + print('Loading data for median bright field...') + for ii in np.arange(tbf_num_imgs): + print('Image #: ' + str(ii)) + tbf_stack[ii, :, :] = imgio.imread( + tbf_data_folder + + '%s' % (stem) + + str(tbf_img_nums[ii]).zfill(num_digits) + + ext + ) + # image_stack[ii,:,:]=np.flipud(tmp_img>threshold) + print('making median...') + + tbf = np.median(tbf_stack, axis=0) + + return tbf + + +def gen_median_image( + data_folder, + img_start, + num_imgs, + nrows, + ncols, + stem='nf_', + num_digits=5, + ext='.tif', +): + + img_nums = np.arange(img_start, img_start + num_imgs, 1) + + stack = np.zeros([num_imgs, nrows, ncols]) + + print('Loading data for median image...') + for ii in np.arange(num_imgs): + print('Image #: ' + str(ii)) + stack[ii, :, :] = imgio.imread( + data_folder + + '%s' % (stem) + + str(img_nums[ii]).zfill(num_digits) + + ext + ) + # image_stack[ii,:,:]=np.flipud(tmp_img>threshold) + print('making median...') + + med = np.median(stack, axis=0) + + return med + + +def gen_attenuation_rads( + tomo_data_folder, + tbf, + tomo_img_start, + tomo_num_imgs, + nrows, + ncols, + stem='nf_', + num_digits=5, + ext='.tif', + tdf=None, +): + + # Reconstructs a single tompgrahy layer to find the extent of the sample + tomo_img_nums = np.arange( + tomo_img_start, tomo_img_start + tomo_num_imgs, 1 + ) + + # if tdf==None: + if len(tdf) == None: + tdf = np.zeros([nrows, ncols]) + + rad_stack = np.zeros([tomo_num_imgs, nrows, ncols]) + + print('Loading and Calculating Absorption Radiographs ...') + for ii in np.arange(tomo_num_imgs): + print('Image #: ' + str(ii)) + tmp_img = imgio.imread( + tomo_data_folder + + '%s' % (stem) + + str(tomo_img_nums[ii]).zfill(num_digits) + + ext + ) + + rad_stack[ii, :, :] = -np.log( + (tmp_img.astype(float) - tdf) / (tbf.astype(float) - tdf) + ) + + return rad_stack + + +def tomo_reconstruct_layer( + rad_stack, + cross_sectional_dim, + layer_row=1024, + start_tomo_ang=0.0, + end_tomo_ang=360.0, + tomo_num_imgs=360, + center=0.0, + pixel_size=0.00148, +): + sinogram = np.squeeze(rad_stack[:, layer_row, :]) + + rotation_axis_pos = -int(np.round(center / pixel_size)) + # rotation_axis_pos=13 + + theta = np.linspace( + start_tomo_ang, end_tomo_ang, tomo_num_imgs, endpoint=False + ) + + max_rad = int( + cross_sectional_dim / pixel_size / 2.0 * 1.1 + ) # 10% slack to avoid edge effects + + if rotation_axis_pos >= 0: + sinogram_cut = sinogram[:, 2 * rotation_axis_pos :] + else: + sinogram_cut = sinogram[:, : (2 * rotation_axis_pos)] + + dist_from_edge = ( + np.round(sinogram_cut.shape[1] / 2.0).astype(int) - max_rad + ) + + sinogram_cut = sinogram_cut[:, dist_from_edge:-dist_from_edge] + + print('Inverting Sinogram....') + reconstruction_fbp = xformimg.iradon( + sinogram_cut.T, theta=theta, circle=True + ) + + reconstruction_fbp = np.rot90( + reconstruction_fbp, 3 + ) # Rotation to get the result consistent with hexrd, needs to be checked + + return reconstruction_fbp + + +def threshold_and_clean_tomo_layer( + reconstruction_fbp, + recon_thresh, + noise_obj_size, + min_hole_size, + edge_cleaning_iter=None, + erosion_iter=1, + dilation_iter=4, +): + binary_recon = reconstruction_fbp > recon_thresh + + # hard coded cleaning, grinding sausage... + binary_recon = img.morphology.binary_dilation( + binary_recon, iterations=dilation_iter + ) + binary_recon = img.morphology.binary_erosion( + binary_recon, iterations=erosion_iter + ) + + labeled_img, num_labels = img.label(binary_recon) + + print('Cleaning...') + print('Removing Noise...') + for ii in np.arange(1, num_labels): + obj1 = np.where(labeled_img == ii) + if obj1[0].shape[0] < noise_obj_size: + binary_recon[obj1[0], obj1[1]] = 0 + + labeled_img, num_labels = img.label(binary_recon != 1) + + print('Closing Holes...') + for ii in np.arange(1, num_labels): + + obj1 = np.where(labeled_img == ii) + if obj1[0].shape[0] >= 1 and obj1[0].shape[0] < min_hole_size: + binary_recon[obj1[0], obj1[1]] = 1 + + if edge_cleaning_iter is not None: + binary_recon = img.morphology.binary_erosion( + binary_recon, iterations=edge_cleaning_iter + ) + binary_recon = img.morphology.binary_dilation( + binary_recon, iterations=edge_cleaning_iter + ) + + return binary_recon + + +def crop_and_rebin_tomo_layer( + binary_recon, + recon_thresh, + voxel_spacing, + pixel_size, + cross_sectional_dim, + circular_mask_rad=None, +): + scaling = voxel_spacing / pixel_size + + rows = binary_recon.shape[0] + cols = binary_recon.shape[1] + + new_rows = np.round(rows / scaling).astype(int) + new_cols = np.round(cols / scaling).astype(int) + + tmp_resize = xformimg.resize( + binary_recon, [new_rows, new_cols], preserve_range=True + ) + # tmp_resize_norm=tmp_resize/255 + tmp_resize_norm_force = np.floor(tmp_resize) + + binary_recon_bin = tmp_resize_norm_force.astype(bool) + + cut_edge = int( + np.round( + (binary_recon_bin.shape[0] * voxel_spacing - cross_sectional_dim) + / 2.0 + / voxel_spacing + ) + ) + + binary_recon_bin = binary_recon_bin[cut_edge:-cut_edge, cut_edge:-cut_edge] + + if circular_mask_rad is not None: + center = binary_recon_bin.shape[0] / 2 + radius = np.round(circular_mask_rad / voxel_spacing) + nx, ny = binary_recon_bin.shape + y, x = np.ogrid[-center : nx - center, -center : ny - center] + mask = x * x + y * y > radius * radius + + binary_recon_bin[mask] = 0 + + return binary_recon_bin diff --git a/hexrd/hedm/grainmap/vtkutil.py b/hexrd/hedm/grainmap/vtkutil.py new file mode 100644 index 000000000..50c6ba426 --- /dev/null +++ b/hexrd/hedm/grainmap/vtkutil.py @@ -0,0 +1,142 @@ +import numpy as np + +import os + + +# %% + + +def output_grain_map_vtk( + data_location, data_stems, output_stem, vol_spacing, top_down=True +): + + num_scans = len(data_stems) + + confidence_maps = [None] * num_scans + grain_maps = [None] * num_scans + Xss = [None] * num_scans + Yss = [None] * num_scans + Zss = [None] * num_scans + + for ii in np.arange(num_scans): + print('Loading Volume %d ....' % (ii)) + conf_data = np.load( + os.path.join(data_location, data_stems[ii] + '_grain_map_data.npz') + ) + + confidence_maps[ii] = conf_data['confidence_map'] + grain_maps[ii] = conf_data['grain_map'] + Xss[ii] = conf_data['Xs'] + Yss[ii] = conf_data['Ys'] + Zss[ii] = conf_data['Zs'] + + # assumes all volumes to be the same size + num_layers = grain_maps[0].shape[0] + + total_layers = num_layers * num_scans + + num_rows = grain_maps[0].shape[1] + num_cols = grain_maps[0].shape[2] + + grain_map_stitched = np.zeros((total_layers, num_rows, num_cols)) + confidence_stitched = np.zeros((total_layers, num_rows, num_cols)) + Xs_stitched = np.zeros((total_layers, num_rows, num_cols)) + Ys_stitched = np.zeros((total_layers, num_rows, num_cols)) + Zs_stitched = np.zeros((total_layers, num_rows, num_cols)) + + for i in np.arange(num_scans): + if top_down == True: + grain_map_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = grain_maps[num_scans - 1 - i] + confidence_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = confidence_maps[num_scans - 1 - i] + Xs_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = Xss[num_scans - 1 - i] + Zs_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = Zss[num_scans - 1 - i] + Ys_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = (Yss[num_scans - 1 - i] + vol_spacing * i) + else: + + grain_map_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = grain_maps[i] + confidence_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = confidence_maps[i] + Xs_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = Xss[i] + Zs_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = Zss[i] + Ys_stitched[ + ((i) * num_layers) : ((i) * num_layers + num_layers), :, : + ] = (Yss[i] + vol_spacing * i) + + print('Writing VTK data...') + # VTK Dump + Xslist = Xs_stitched[:, :, :].ravel() + Yslist = Ys_stitched[:, :, :].ravel() + Zslist = Zs_stitched[:, :, :].ravel() + + grainlist = grain_map_stitched[:, :, :].ravel() + conflist = confidence_stitched[:, :, :].ravel() + + num_pts = Xslist.shape[0] + num_cells = (total_layers - 1) * (num_rows - 1) * (num_cols - 1) + + f = open(os.path.join(data_location, output_stem + '_stitch.vtk'), 'w') + + f.write('# vtk DataFile Version 3.0\n') + f.write('grainmap Data\n') + f.write('ASCII\n') + f.write('DATASET UNSTRUCTURED_GRID\n') + f.write('POINTS %d double\n' % (num_pts)) + + for i in np.arange(num_pts): + f.write('%e %e %e \n' % (Xslist[i], Yslist[i], Zslist[i])) + + scale2 = num_cols * num_rows + scale1 = num_cols + + f.write('CELLS %d %d\n' % (num_cells, 9 * num_cells)) + for k in np.arange(Xs_stitched.shape[0] - 1): + for j in np.arange(Xs_stitched.shape[1] - 1): + for i in np.arange(Xs_stitched.shape[2] - 1): + base = scale2 * k + scale1 * j + i + p1 = base + p2 = base + 1 + p3 = base + 1 + scale1 + p4 = base + scale1 + p5 = base + scale2 + p6 = base + scale2 + 1 + p7 = base + scale2 + scale1 + 1 + p8 = base + scale2 + scale1 + + f.write( + '8 %d %d %d %d %d %d %d %d \n' + % (p1, p2, p3, p4, p5, p6, p7, p8) + ) + + f.write('CELL_TYPES %d \n' % (num_cells)) + for i in np.arange(num_cells): + f.write('12 \n') + + f.write('POINT_DATA %d \n' % (num_pts)) + f.write('SCALARS grain_id int \n') + f.write('LOOKUP_TABLE default \n') + for i in np.arange(num_pts): + f.write('%d \n' % (grainlist[i])) + + f.write('FIELD FieldData 1 \n') + f.write('confidence 1 %d float \n' % (num_pts)) + for i in np.arange(num_pts): + f.write('%e \n' % (conflist[i])) + + f.close() diff --git a/hexrd/indexer.py b/hexrd/hedm/indexer.py similarity index 99% rename from hexrd/indexer.py rename to hexrd/hedm/indexer.py index 8e388e1e9..1c9419a3f 100644 --- a/hexrd/indexer.py +++ b/hexrd/hedm/indexer.py @@ -36,9 +36,9 @@ import timeit -from hexrd import constants -from hexrd import rotations -from hexrd.transforms import xfcapi +from hexrd.core import constants +from hexrd.core import rotations +from hexrd.core.transforms import xfcapi # ============================================================================= @@ -80,7 +80,7 @@ def paintGrid( quats : (4, N) ndarray hstacked array of trial orientations in the form of unit quaternions. etaOmeMaps : object - an spherical map object of type `hexrd.instrument.GenerateEtaOmeMaps`. + an spherical map object of type `hexrd.hedm.instrument.GenerateEtaOmeMaps`. threshold : float, optional threshold value on the etaOmeMaps. bMat : (3, 3) ndarray, optional diff --git a/hexrd/hedm/instrument/__init__.py b/hexrd/hedm/instrument/__init__.py new file mode 100644 index 000000000..024003040 --- /dev/null +++ b/hexrd/hedm/instrument/__init__.py @@ -0,0 +1,13 @@ +from hexrd.core.instrument.hedm_instrument import ( + calc_angles_from_beam_vec, + calc_beam_vec, + centers_of_edge_vec, + GenerateEtaOmeMaps, + GrainDataWriter, + HEDMInstrument, + max_tth, + switch_xray_source, + unwrap_dict_to_h5, + unwrap_h5_to_dict, +) +from hexrd.core.instrument.detector import Detector diff --git a/hexrd/resources/__init__.py b/hexrd/hedm/ipfcolor/__init__.py similarity index 100% rename from hexrd/resources/__init__.py rename to hexrd/hedm/ipfcolor/__init__.py diff --git a/hexrd/ipfcolor/colorspace.py b/hexrd/hedm/ipfcolor/colorspace.py similarity index 79% rename from hexrd/ipfcolor/colorspace.py rename to hexrd/hedm/ipfcolor/colorspace.py index 615d04b03..b48a165c6 100644 --- a/hexrd/ipfcolor/colorspace.py +++ b/hexrd/hedm/ipfcolor/colorspace.py @@ -26,7 +26,7 @@ # Boston, MA 02111-1307 USA or visit . # ============================================================================= -from hexrd import constants +from hexrd.core import constants import numpy as np eps = constants.sqrt_epsf @@ -46,13 +46,15 @@ def hsl2rgb(hsl): different components ''' hsl = np.atleast_2d(hsl) - hsl[np.abs(hsl) < eps] = 0. - hsl[np.abs(hsl - np.ones(hsl.shape)) < eps] = 1. + hsl[np.abs(hsl) < eps] = 0.0 + hsl[np.abs(hsl - np.ones(hsl.shape)) < eps] = 1.0 - if( (hsl.min() < 0.) or (hsl.max() > 1.)): - raise RuntimeError("value of not in range [0,1]. normalizing before conversion") + if (hsl.min() < 0.0) or (hsl.max() > 1.0): + raise RuntimeError( + "value of not in range [0,1]. normalizing before conversion" + ) - if(hsl.ndim != 2): + if hsl.ndim != 2: raise RuntimeError("hsl_rgb: shape of hsl array is invalid.") rgb = np.zeros(hsl.shape) @@ -63,19 +65,19 @@ def hsl2rgb(hsl): S = hsl[:, 1] L = hsl[:, 2] - C = (1.0 - np.abs(2.*L - 1.)) * S - X = (1.0 - np.abs(np.mod(6*H, 2) - 1.0)) * C - m = L - C/2. + C = (1.0 - np.abs(2.0 * L - 1.0)) * S + X = (1.0 - np.abs(np.mod(6 * H, 2) - 1.0)) * C + m = L - C / 2.0 - case = np.floor(6.*H).astype(np.int32) + case = np.floor(6.0 * H).astype(np.int32) ''' depending on the range of H, the rgb definition changes. see https://www.rapidtables.com/convert/color/hsl-to-rgb.html for the detailed formula ''' - Cp = np.atleast_2d(C+m).T - Xp = np.atleast_2d(X+m).T + Cp = np.atleast_2d(C + m).T + Xp = np.atleast_2d(X + m).T Zp = np.atleast_2d(m).T mask = np.logical_or((case == 0), (case == 6)) @@ -99,8 +101,8 @@ def hsl2rgb(hsl): ''' catch all cases where rgb values are out of [0,1] bounds ''' - rgb[rgb < 0.] = 0. - rgb[rgb > 1.] = 1. + rgb[rgb < 0.0] = 0.0 + rgb[rgb > 1.0] = 1.0 return rgb @@ -117,7 +119,7 @@ def rgb2hsl(rgb): different components ''' rgb = np.atleast_2d(rgb) - if(rgb.ndim != 2): + if rgb.ndim != 2: raise RuntimeError("hsl_rgb: shape of hsl array is invalid.") hsl = np.zeros(rgb.shape) @@ -139,27 +141,31 @@ def rgb2hsl(rgb): rmask = rgb[:, 0] == Cmax rmask = np.logical_and(rmask, np.logical_not(zmask)) - hsl[rmask, 0] = np.mod( - (rgb[rmask, 1] - rgb[rmask, 2])/delta[rmask], 6) / 6. + hsl[rmask, 0] = ( + np.mod((rgb[rmask, 1] - rgb[rmask, 2]) / delta[rmask], 6) / 6.0 + ) gmask = rgb[:, 1] == Cmax gmask = np.logical_and(gmask, np.logical_not(zmask)) - hsl[gmask, 0] = np.mod( - (rgb[gmask, 2] - rgb[gmask, 0])/delta[gmask] + 2., 6) / 6. + hsl[gmask, 0] = ( + np.mod((rgb[gmask, 2] - rgb[gmask, 0]) / delta[gmask] + 2.0, 6) / 6.0 + ) bmask = rgb[:, 2] == Cmax bmask = np.logical_and(bmask, np.logical_not(zmask)) - hsl[bmask, 0] = np.mod( - (rgb[bmask, 0] - rgb[bmask, 1])/delta[bmask] + 4., 6) / 6. + hsl[bmask, 0] = ( + np.mod((rgb[bmask, 0] - rgb[bmask, 1]) / delta[bmask] + 4.0, 6) / 6.0 + ) - hsl[np.logical_not(zmask), 1] = delta[np.logical_not( - zmask)] / (1. - np.abs(2 * L[np.logical_not(zmask)] - 1.)) + hsl[np.logical_not(zmask), 1] = delta[np.logical_not(zmask)] / ( + 1.0 - np.abs(2 * L[np.logical_not(zmask)] - 1.0) + ) - hsl[:,2] = L + hsl[:, 2] = L ''' catch cases where hsl is out of [0,1] bounds ''' - hsl[hsl < 0.] = 0. - hsl[hsl > 1.] = 1. + hsl[hsl < 0.0] = 0.0 + hsl[hsl > 1.0] = 1.0 return hsl diff --git a/hexrd/ipfcolor/sphere_sector.py b/hexrd/hedm/ipfcolor/sphere_sector.py similarity index 83% rename from hexrd/ipfcolor/sphere_sector.py rename to hexrd/hedm/ipfcolor/sphere_sector.py index 8c3366379..c1ab718e8 100644 --- a/hexrd/ipfcolor/sphere_sector.py +++ b/hexrd/hedm/ipfcolor/sphere_sector.py @@ -25,9 +25,9 @@ # the Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA or visit . # ============================================================================= -from hexrd import constants +from hexrd.core import constants import numpy as np -from hexrd.ipfcolor import colorspace +from hexrd.hedm.ipfcolor import colorspace eps = constants.sqrt_epsf @@ -62,6 +62,7 @@ there are no triangles for the triclininc cases and needs to be handles differently ''' +# fmt: off pg2vertex = { 'c1': [3, np.array([[0., 0., 1.], [1., 0., 0.], @@ -281,6 +282,7 @@ np.atleast_2d(np.array([0, 1, 2])).T, 'upper'] } +# fmt: on class sector: @@ -290,7 +292,7 @@ class sector: @DETAIL this class is used to store spherical patch for a given point group. the class also has methods to compute the color of a direction by computing the hue, saturation and lightness values in [0,1]. these - values can be converted to rgb for display with the well known + values can be converted to rgb for display with the well known conversion formula. @@ -306,7 +308,7 @@ def __init__(self, pgsym, lauesym, supergroupsym, supergrouplauesym): 11/12/2020 SS 1.1 added lauesym as additional input parameter 11/23/2020 SS 1.2 added supergroupsym as additional parameter - @detail: this routine initializes the data needed for reducing a + @detail: this routine initializes the data needed for reducing a direction to the stereographic fundamental zone (standard stereographic triangle) for the pointgroup/lauegroup symmetry of the crystal. @@ -341,37 +343,37 @@ def __init__(self, pgsym, lauesym, supergroupsym, supergrouplauesym): self.connectivity['superlaue'] = data[2] self.hemisphere['superlaue'] = data[3] - if(self.ntriangle['pg'] != 0): + if self.ntriangle['pg'] != 0: # compute the barycenter or the centroid of point group b = np.mean(self.vertices['pg'], axis=1) - b = b/np.linalg.norm(b) + b = b / np.linalg.norm(b) self.barycenter['pg'] = b else: - self.barycenter['pg'] = np.array([0., 0., 1.]) + self.barycenter['pg'] = np.array([0.0, 0.0, 1.0]) - if(self.ntriangle['laue'] != 0): + if self.ntriangle['laue'] != 0: # compute the barycenter or the centroid of the laue group triangle b = np.mean(self.vertices['laue'], axis=1) - b = b/np.linalg.norm(b) + b = b / np.linalg.norm(b) self.barycenter['laue'] = b else: - self.barycenter['laue'] = np.array([0., 0., 1.]) + self.barycenter['laue'] = np.array([0.0, 0.0, 1.0]) - if(self.ntriangle['super'] != 0): + if self.ntriangle['super'] != 0: # compute the barycenter or the centroid of the supergroup group triangle b = np.mean(self.vertices['super'], axis=1) - b = b/np.linalg.norm(b) + b = b / np.linalg.norm(b) self.barycenter['super'] = b else: - self.barycenter['super'] = np.array([0., 0., 1.]) + self.barycenter['super'] = np.array([0.0, 0.0, 1.0]) - if(self.ntriangle['superlaue'] != 0): + if self.ntriangle['superlaue'] != 0: # compute the barycenter or the centroid of the supergroup group triangle b = np.mean(self.vertices['superlaue'], axis=1) - b = b/np.linalg.norm(b) + b = b / np.linalg.norm(b) self.barycenter['superlaue'] = b else: - self.barycenter['superlaue'] = np.array([0., 0., 1.]) + self.barycenter['superlaue'] = np.array([0.0, 0.0, 1.0]) def check_norm(self, dir3): ''' @@ -384,43 +386,49 @@ def check_norm(self, dir3): n = np.linalg.norm(dir3, axis=1) mask = n > eps n = n[mask] - dir3[mask, :] = dir3[mask, :]/np.tile(n, [3, 1]).T + dir3[mask, :] = dir3[mask, :] / np.tile(n, [3, 1]).T def check_hemisphere(self): zcoord = np.array([self.vx[2], self.vy[2], self.vz[2]]) - if(np.logical_or(np.all(zcoord >= 0.), np.all(zcoord <= 0.))): + if np.logical_or(np.all(zcoord >= 0.0), np.all(zcoord <= 0.0)): pass else: - raise RuntimeError("sphere_sector: the vertices of the stereographic \ - triangle are not in the same hemisphere") + raise RuntimeError( + "sphere_sector: the vertices of the stereographic \ + triangle are not in the same hemisphere" + ) def inside_sphericalpatch(self, vertex, dir3): ''' - @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - @DATE 12/09/2020 SS 1.0 original - @PARAM vertex vertices of the spherical triangle - dir3 normalized direction vectors - switch which group to check. acceptable arguments are 'pg', 'laue', 'supergroup' - and 'supergroup_laue' - @DETAIL check if direction is inside a spherical patch - the logic used as follows: - if determinant of [x A B], [x B C] and [x C A] are - all same sign, then the sphere is inside the traingle - formed by A, B and C - returns a mask with inside as True and outside as False + @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + @DATE 12/09/2020 SS 1.0 original + @PARAM vertex vertices of the spherical triangle + dir3 normalized direction vectors + switch which group to check. acceptable arguments are 'pg', 'laue', 'supergroup' + and 'supergroup_laue' + @DETAIL check if direction is inside a spherical patch + the logic used as follows: + if determinant of [x A B], [x B C] and [x C A] are + all same sign, then the sphere is inside the traingle + formed by A, B and C + returns a mask with inside as True and outside as False ''' nn = vertex.shape[1] mask = [] - d = np.zeros([nn, ]) + d = np.zeros( + [ + nn, + ] + ) for x in dir3: x2 = np.atleast_2d(x).T for ii in range(nn): A = np.atleast_2d(vertex[:, np.mod(ii, nn)]).T - B = np.atleast_2d(vertex[:, np.mod(ii+1, nn)]).T + B = np.atleast_2d(vertex[:, np.mod(ii + 1, nn)]).T d[ii] = np.linalg.det(np.hstack((x2, A, B))) ''' @@ -428,11 +436,11 @@ def inside_sphericalpatch(self, vertex, dir3): determinant can be very small positive or negative number ''' - if(np.abs(d[ii]) < eps): - d[ii] = 0. + if np.abs(d[ii]) < eps: + d[ii] = 0.0 ss = np.unique(np.sign(d)) - if(np.all(ss >= 0.)): + if np.all(ss >= 0.0): mask.append(True) else: mask.append(False) @@ -455,7 +463,7 @@ def fillet_region(self, dir3, switch): returns 1 if its barycenter, vertex 1 and vertex 2 returns 2 if its barycenter, vertex 2 and vertex 3 - it is implicitly assumed that the point lies inside the + it is implicitly assumed that the point lies inside the spherical triangle. behavior is unknown if it is not the case @@ -463,40 +471,45 @@ def fillet_region(self, dir3, switch): ''' vertex = np.copy(self.vertices[switch]) - fregion = -np.ones([dir3.shape[0], ]).astype(np.int32) + fregion = -np.ones( + [ + dir3.shape[0], + ] + ).astype(np.int32) bar_cen = self.barycenter[switch] # if barycenter matches one of the vertices, then remove that vertex - mask = np.all(bar_cen == vertex.T,axis=1) - vertex = vertex[:,~mask] + mask = np.all(bar_cen == vertex.T, axis=1) + vertex = vertex[:, ~mask] nn = vertex.shape[1] f = np.zeros([nn, 3, 3]) for i in range(nn): idx1 = np.mod(i, nn) - idx2 = np.mod(i+1, nn) + idx2 = np.mod(i + 1, nn) A = np.atleast_2d(vertex[:, idx1]).T B = np.atleast_2d(vertex[:, idx2]).T f[i, :, :] = np.hstack((np.atleast_2d(bar_cen).T, A, B)) for i in range(nn): - inside = np.logical_and(self.inside_sphericalpatch( - np.squeeze(f[i, :, :]), dir3), - fregion == -1) + inside = np.logical_and( + self.inside_sphericalpatch(np.squeeze(f[i, :, :]), dir3), + fregion == -1, + ) fregion[inside] = i return fregion def point_on_boundary(self, dir3, switch): ''' - @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - @DATE 12/09/2020 SS 1.0 original - @PARAM dir3 direction in fundamental sector. size is nx3 - switch color using pg or laue group - @DETAIL this function figures out the equivalent point on the boundary - given that the point is inside the spherical triangle + @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + @DATE 12/09/2020 SS 1.0 original + @PARAM dir3 direction in fundamental sector. size is nx3 + switch color using pg or laue group + @DETAIL this function figures out the equivalent point on the boundary + given that the point is inside the spherical triangle ''' vertex = self.vertices[switch] fregion = self.fillet_region(dir3, switch) @@ -509,18 +522,18 @@ def point_on_boundary(self, dir3, switch): d = dir3[i, :] A = vertex[:, np.mod(f, nn)] - B = vertex[:, np.mod(f+1, nn)] + B = vertex[:, np.mod(f + 1, nn)] nhat = np.cross(B, A) - nhat = nhat/np.linalg.norm(nhat) + nhat = nhat / np.linalg.norm(nhat) lam = np.dot(nhat, d) - deldir = lam*nhat + deldir = lam * nhat dp = d - deldir ndp = np.linalg.norm(dp) - if(ndp > 0.): - dp = dp/ndp + if ndp > 0.0: + dp = dp / ndp else: dp = d @@ -535,16 +548,20 @@ def calculate_rho(self, dir3, switch): @PARAM dir3 direction in fundamental sector. size is nx3 switch color using pg or laue group @DETAIL this function is used to calculate the azimuthal angle - of a bunch of directions. it is assumed all directions + of a bunch of directions. it is assumed all directions are indide the SST ''' vertex = self.vertices[switch] bar_cen = self.barycenter[switch] - rho = np.zeros([dir3.shape[0], ]) + rho = np.zeros( + [ + dir3.shape[0], + ] + ) # handle triclinic and monoclinic cases a little differently - if(np.all(bar_cen == np.array([0., 0., 1.]))): - rho = np.arctan2(dir3[:,1], dir3[:,0]) + np.pi + if np.all(bar_cen == np.array([0.0, 0.0, 1.0])): + rho = np.arctan2(dir3[:, 1], dir3[:, 0]) + np.pi else: dir3_b, fregion = self.point_on_boundary(dir3, switch) @@ -555,25 +572,27 @@ def calculate_rho(self, dir3, switch): d = dir3_b[i, :] A = vertex[:, np.mod(f, nn)] - B = vertex[:, np.mod(f+1, nn)] + B = vertex[:, np.mod(f + 1, nn)] # angle between A and B omega = np.dot(A, B) - if(np.abs(omega) > 1.): + if np.abs(omega) > 1.0: omega = np.sign(omega) # angle between point and A omegap = np.dot(A, d) - if(np.abs(omegap) > 1.): + if np.abs(omegap) > 1.0: omegap = np.sign(omega) omega = np.arccos(omega) omegap = np.arccos(omegap) - if(omegap != 0.): - rho[i] = 2*np.pi*omegap/omega/nn + f*2.*np.pi/nn + if omegap != 0.0: + rho[i] = ( + 2 * np.pi * omegap / omega / nn + f * 2.0 * np.pi / nn + ) else: - rho[i] = f*2.*np.pi/nn + rho[i] = f * 2.0 * np.pi / nn return rho @@ -583,26 +602,30 @@ def calculate_theta(self, dir3, switch): @DATE 12/09/2020 SS 1.0 original @PARAM dir3 direction in fundamental sector. size is nx3 switch color using pg or laue group - @DETAIL this function is used to calculate the polar angle + @DETAIL this function is used to calculate the polar angle of direction vectors. it is assumed that the direction vector lies inside the SST ''' vertex = self.vertices[switch] dir3_b, fregion = self.point_on_boundary(dir3, switch) - theta = np.zeros([dir3.shape[0], ]) + theta = np.zeros( + [ + dir3.shape[0], + ] + ) bar_cen = self.barycenter[switch] # handle triclinic and monoclinic cases a little differently - if(np.all(bar_cen == np.array([0., 0., 1.]))): - dp = np.dot(np.array([0., 0., 1.]), dir3.T) + if np.all(bar_cen == np.array([0.0, 0.0, 1.0])): + dp = np.dot(np.array([0.0, 0.0, 1.0]), dir3.T) # catch some cases where dot product is 1+/-epsilon - mask = np.abs(dp) > 1. + mask = np.abs(dp) > 1.0 dp[mask] = np.sign(dp[mask]) theta = np.arccos(dp) else: - # first calculate the angle the point makes with the barycenter + # first calculate the angle the point makes with the barycenter omega = np.dot(bar_cen, dir3.T) mask = np.abs(omega) > 1.0 omega[mask] = np.sign(omega[mask]) @@ -615,25 +638,28 @@ def calculate_theta(self, dir3, switch): omega = np.arccos(omega) omegap = np.arccos(omegap) - zmask = omegap == 0. + zmask = omegap == 0.0 - theta[~zmask] = np.pi*omega[~zmask]/omegap[~zmask]/2.0 + theta[~zmask] = np.pi * omega[~zmask] / omegap[~zmask] / 2.0 theta[zmask] = 0.0 return theta def hue_speed(self, rho): ''' - @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - @DATE 12/09/2020 SS 1.0 original - @PARAM rho azimuthal angle - @DETAIL calculate the hue speed for a vector of azimuthal angles - this is utilized in increasing the area of the red, blue and - green regions + @AUTHOR Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + @DATE 12/09/2020 SS 1.0 original + @PARAM rho azimuthal angle + @DETAIL calculate the hue speed for a vector of azimuthal angles + this is utilized in increasing the area of the red, blue and + green regions ''' rho = rho - np.pi - v = 0.5 + np.exp(-(4./7.)*rho**2) + \ - np.exp(-(4./7.)*(rho - 2.*np.pi/3.)**2) + \ - np.exp(-(4./7.)*(rho + 2.*np.pi/3.)**2) + v = ( + 0.5 + + np.exp(-(4.0 / 7.0) * rho**2) + + np.exp(-(4.0 / 7.0) * (rho - 2.0 * np.pi / 3.0) ** 2) + + np.exp(-(4.0 / 7.0) * (rho + 2.0 * np.pi / 3.0) ** 2) + ) return v @@ -660,16 +686,16 @@ def calc_hue(self, dir3, switch): ''' rho = self.calculate_rho(dir3, switch) - r = np.linspace(0., 2*np.pi, 1000) + r = np.linspace(0.0, 2 * np.pi, 1000) v = self.hue_speed(r) cons = np.trapz(v, r) h = np.zeros(rho.shape) for i in range(rho.shape[0]): - r = np.linspace(0., rho[i], 1000) + r = np.linspace(0.0, rho[i], 1000) v = self.hue_speed(r) - h[i] = np.trapz(v, r)/cons + h[i] = np.trapz(v, r) / cons return h @@ -689,7 +715,7 @@ def calc_saturation(self, l): @DETAIL calculate saturation. this is always set to 1. ''' - s = 1. - 2.*0.25*np.abs(l - 0.5) + s = 1.0 - 2.0 * 0.25 * np.abs(l - 0.5) return s def calc_lightness(self, dir3, mask, switch): @@ -714,10 +740,10 @@ def calc_lightness(self, dir3, mask, switch): ''' theta = np.pi - self.calculate_theta(dir3, switch) - f1 = theta/np.pi - f2 = np.sin(theta/2.)**2 - l = 0.35*f1 + 0.65*f2 - l[~mask] = 1. - l[~mask] + f1 = theta / np.pi + f2 = np.sin(theta / 2.0) ** 2 + l = 0.35 * f1 + 0.65 * f2 + l[~mask] = 1.0 - l[~mask] return l @@ -729,7 +755,7 @@ def get_color(self, dir3, mask, switch): 11/23/2020 SS 1.2 added mask argument which tell the directions for which the supergroup reductions dont match the point or laue group reductions. mask has size dir3.shape[0] - + @PARAM dir3 direction in fundamental sector. behavior is undefined if mask True if symmetry reduction of dir3 using point group does not match the super group and False otherwise diff --git a/hexrd/resources/detector_templates/__init__.py b/hexrd/hedm/preprocess/__init__.py similarity index 100% rename from hexrd/resources/detector_templates/__init__.py rename to hexrd/hedm/preprocess/__init__.py diff --git a/hexrd/preprocess/argument_classes_factory.py b/hexrd/hedm/preprocess/argument_classes_factory.py similarity index 90% rename from hexrd/preprocess/argument_classes_factory.py rename to hexrd/hedm/preprocess/argument_classes_factory.py index a62dd583a..727d29ae8 100644 --- a/hexrd/preprocess/argument_classes_factory.py +++ b/hexrd/hedm/preprocess/argument_classes_factory.py @@ -1,5 +1,7 @@ -import hexrd.preprocess.profiles as profiles -from typing import Type +from typing import Type, TYPE_CHECKING + +if TYPE_CHECKING: + import hexrd.hedm.preprocess.profiles as profiles class ArgumentClassesFactory: diff --git a/hexrd/preprocess/preprocessors.py b/hexrd/hedm/preprocess/preprocessors.py similarity index 96% rename from hexrd/preprocess/preprocessors.py rename to hexrd/hedm/preprocess/preprocessors.py index 4c96297b1..f934048ed 100644 --- a/hexrd/preprocess/preprocessors.py +++ b/hexrd/hedm/preprocess/preprocessors.py @@ -1,12 +1,12 @@ -from hexrd.imageseries.baseclass import ImageSeries -from hexrd.imageseries.omega import OmegaWedges -from hexrd.preprocess.profiles import ( +from hexrd.core.imageseries.baseclass import ImageSeries +from hexrd.core.imageseries.omega import OmegaWedges +from hexrd.hedm.preprocess.profiles import ( Eiger_Arguments, Dexelas_Arguments, HexrdPPScript_Arguments, ) -from hexrd import imageseries -from hexrd.imageseries.process import ProcessedImageSeries +from hexrd.core import imageseries +from hexrd.core.imageseries.process import ProcessedImageSeries import os import time from typing import Any, Optional, Union, Sequence, cast diff --git a/hexrd/preprocess/profiles.py b/hexrd/hedm/preprocess/profiles.py similarity index 97% rename from hexrd/preprocess/profiles.py rename to hexrd/hedm/preprocess/profiles.py index d36e3a012..dff82ca3c 100644 --- a/hexrd/preprocess/profiles.py +++ b/hexrd/hedm/preprocess/profiles.py @@ -2,13 +2,11 @@ import glob import os import yaml -from hexrd.preprocess.argument_classes_factory import ( +from hexrd.hedm.preprocess.argument_classes_factory import ( ArgumentClassesFactory, autoregister, ) -from hexrd.preprocess.yaml_internals import ( - HexrdPPScriptArgumentsDumper, -) +from hexrd.hedm.preprocess.yaml_internals import HexrdPPScriptArgumentsDumper from typing import Any, Union, Optional, cast diff --git a/hexrd/preprocess/yaml_internals.py b/hexrd/hedm/preprocess/yaml_internals.py similarity index 100% rename from hexrd/preprocess/yaml_internals.py rename to hexrd/hedm/preprocess/yaml_internals.py diff --git a/hexrd/hedm/sampleOrientations/__init__.py b/hexrd/hedm/sampleOrientations/__init__.py new file mode 100644 index 000000000..a4f6e5491 --- /dev/null +++ b/hexrd/hedm/sampleOrientations/__init__.py @@ -0,0 +1 @@ +from hexrd.hedm.sampleOrientations.sampleRFZ import sampleRFZ diff --git a/hexrd/sampleOrientations/conversions.py b/hexrd/hedm/sampleOrientations/conversions.py similarity index 75% rename from hexrd/sampleOrientations/conversions.py rename to hexrd/hedm/sampleOrientations/conversions.py index 2694fe6cc..98cc1212b 100644 --- a/hexrd/sampleOrientations/conversions.py +++ b/hexrd/hedm/sampleOrientations/conversions.py @@ -1,6 +1,6 @@ import numpy as np from numba import njit -from hexrd import constants +from hexrd.core import constants ap_2 = constants.cuA_2 sc = constants.sc @@ -51,33 +51,33 @@ def cu2ho(cu): xyz = sXYZ * sc ma = np.max(np.abs(xyz)) - if ma < 1E-8: + if ma < 1e-8: return np.array([0.0, 0.0, 0.0]) ma2 = np.max(np.abs(xyz[0:2])) - if ma2 < 1E-8: + if ma2 < 1e-8: LamXYZ = np.array([0.0, 0.0, constants.pref * xyz[2]]) else: if np.abs(xyz[1]) <= np.abs(xyz[0]): - q = (np.pi/12.0) * xyz[1]/xyz[0] + q = (np.pi / 12.0) * xyz[1] / xyz[0] c = np.cos(q) s = np.sin(q) - q = constants.prek * xyz[0] / np.sqrt(np.sqrt(2.0)-c) + q = constants.prek * xyz[0] / np.sqrt(np.sqrt(2.0) - c) T1 = (np.sqrt(2.0) * c - 1.0) * q T2 = np.sqrt(2.0) * s * q else: - q = (np.pi/12.0) * xyz[0]/xyz[1] + q = (np.pi / 12.0) * xyz[0] / xyz[1] c = np.cos(q) s = np.sin(q) - q = constants.prek * xyz[1] / np.sqrt(np.sqrt(2.0)-c) + q = constants.prek * xyz[1] / np.sqrt(np.sqrt(2.0) - c) T1 = np.sqrt(2.0) * s * q T2 = (np.sqrt(2.0) * c - 1.0) * q c = T1**2 + T2**2 - s = np.pi * c / (24.0 * xyz[2]**2) + s = np.pi * c / (24.0 * xyz[2] ** 2) c = np.sqrt(np.pi) * c / np.sqrt(24.0) / xyz[2] - q = np.sqrt( 1.0 - s ) + q = np.sqrt(1.0 - s) LamXYZ = np.array([T1 * q, T2 * q, constants.pref * xyz[2] - c]) if pyd == 1 or pyd == 2: @@ -96,18 +96,18 @@ def ho2ro(ho): @njit(cache=True, nogil=True) def ho2ax(ho): - hmag = np.linalg.norm(ho[:])**2 - if hmag < 1E-8: + hmag = np.linalg.norm(ho[:]) ** 2 + if hmag < 1e-8: return np.array([0.0, 0.0, 1.0, 0.0]) hm = hmag - hn = ho/np.sqrt(hmag) + hn = ho / np.sqrt(hmag) s = constants.tfit[0] + constants.tfit[1] * hmag for ii in range(2, 21): - hm = hm*hmag + hm = hm * hmag s = s + constants.tfit[ii] * hm s = 2.0 * np.arccos(s) diff = np.abs(s - np.pi) - if diff < 1E-8: + if diff < 1e-8: return np.array([hn[0], hn[1], hn[2], np.pi]) else: return np.array([hn[0], hn[1], hn[2], s]) @@ -115,14 +115,14 @@ def ho2ax(ho): @njit(cache=True, nogil=True) def ax2ro(ax): - if np.abs(ax[3]) < 1E-8: + if np.abs(ax[3]) < 1e-8: return np.array([0.0, 0.0, 1.0, 0.0]) - elif np.abs(ax[3] - np.pi) < 1E-8: + elif np.abs(ax[3] - np.pi) < 1e-8: return np.array([ax[0], ax[1], ax[2], np.inf]) else: - return np.array([ax[0], ax[1], ax[2], np.tan(ax[3]*0.5)]) + return np.array([ax[0], ax[1], ax[2], np.tan(ax[3] * 0.5)]) @njit(cache=True, nogil=True) @@ -133,21 +133,21 @@ def ro2qu(ro): @njit(cache=True, nogil=True) def ro2ax(ro): - if np.abs(ro[3]) < 1E-8: + if np.abs(ro[3]) < 1e-8: return np.array([0.0, 0.0, 1.0, 0.0]) elif ro[3] == np.inf: return np.array([ro[0], ro[1], ro[2], np.pi]) else: - ang = 2.0*np.arctan(ro[3]) - mag = 1.0/np.linalg.norm(ro[0:3]) - return np.array([ro[0]*mag, ro[1]*mag, ro[2]*mag, ang]) + ang = 2.0 * np.arctan(ro[3]) + mag = 1.0 / np.linalg.norm(ro[0:3]) + return np.array([ro[0] * mag, ro[1] * mag, ro[2] * mag, ang]) @njit(cache=True, nogil=True) def ax2qu(ro): - if np.abs(ro[3]) < 1E-8: + if np.abs(ro[3]) < 1e-8: return np.array([1.0, 0.0, 0.0, 0.0]) else: - c = np.cos(ro[3]*0.5) - s = np.sin(ro[3]*0.5) - return np.array([c, ro[0]*s, ro[1]*s, ro[2]*s]) + c = np.cos(ro[3] * 0.5) + s = np.sin(ro[3] * 0.5) + return np.array([c, ro[0] * s, ro[1] * s, ro[2] * s]) diff --git a/hexrd/sampleOrientations/rfz.py b/hexrd/hedm/sampleOrientations/rfz.py similarity index 51% rename from hexrd/sampleOrientations/rfz.py rename to hexrd/hedm/sampleOrientations/rfz.py index 36556ca4e..04f1df338 100644 --- a/hexrd/sampleOrientations/rfz.py +++ b/hexrd/hedm/sampleOrientations/rfz.py @@ -1,14 +1,14 @@ import numpy as np import numba -from hexrd.constants import FZtypeArray, FZorderArray -from hexrd import constants +from hexrd.core.constants import FZtypeArray, FZorderArray +from hexrd.core import constants @numba.njit(cache=True, nogil=True) def getFZtypeandOrder(pgnum): - FZtype = FZtypeArray[pgnum-1] - FZorder = FZorderArray[pgnum-1] + FZtype = FZtypeArray[pgnum - 1] + FZorder = FZorderArray[pgnum - 1] return np.array([FZtype, FZorder]) @@ -24,9 +24,9 @@ def insideCyclicFZ(ro, FZorder): res = True else: if FZorder == 2: - res = np.abs(ro[1]*ro[3]) <= constants.BP[FZorder-1] + res = np.abs(ro[1] * ro[3]) <= constants.BP[FZorder - 1] else: - res = np.abs(ro[2]*ro[3]) <= constants.BP[FZorder-1] + res = np.abs(ro[2] * ro[3]) <= constants.BP[FZorder - 1] return res @@ -38,41 +38,38 @@ def insideDihedralFZ(ro, FZorder): else: rod = ro[0:3] * ro[3] - c1 = np.abs(rod[2]) <= constants.BP[FZorder-1] + c1 = np.abs(rod[2]) <= constants.BP[FZorder - 1] if c1: - if FZorder == 2: - c2 = np.logical_and(np.abs(rod[0]) <= 1.0, - np.abs(rod[1]) <= 1.0) + if FZorder == 2: + c2 = np.logical_and(np.abs(rod[0]) <= 1.0, np.abs(rod[1]) <= 1.0) elif FZorder == 3: - srt = np.sqrt(3.0)/2.0 - c2 = np.abs(srt*rod[0] + 0.5*rod[1]) <= 1.0 - c3 = np.abs(srt*rod[0] - 0.5*rod[1]) <= 1.0 + srt = np.sqrt(3.0) / 2.0 + c2 = np.abs(srt * rod[0] + 0.5 * rod[1]) <= 1.0 + c3 = np.abs(srt * rod[0] - 0.5 * rod[1]) <= 1.0 c4 = np.abs(rod[1]) <= 1.0 - return np.logical_and(c2, - np.logical_and(c3, c4)) + return np.logical_and(c2, np.logical_and(c3, c4)) elif FZorder == 4: - r22 = 1.0/np.sqrt(2.0) - c2 = np.logical_and(np.abs(rod[0]) <= 1.0, - np.abs(rod[1]) <= 1.0) - c3 = np.logical_and(r22*np.abs(rod[0]+rod[1]) <= 1.0, - r22*np.abs(rod[0]-rod[1]) <= 1.0) + r22 = 1.0 / np.sqrt(2.0) + c2 = np.logical_and(np.abs(rod[0]) <= 1.0, np.abs(rod[1]) <= 1.0) + c3 = np.logical_and( + r22 * np.abs(rod[0] + rod[1]) <= 1.0, + r22 * np.abs(rod[0] - rod[1]) <= 1.0, + ) return np.logical_and(c2, c3) elif FZorder == 6: - srt = np.sqrt(3.0)/2.0 - c2 = np.abs(0.5*rod[0] + srt*rod[1]) < 1.0 - c2 = np.logical_and(c2, - np.abs(srt*rod[0] + 0.5*rod[1]) < 1.0) - c2 = np.logical_and(c2, - np.abs(0.5*rod[0] - srt*rod[1]) < 1.0) - c2 = np.logical_and(c2, - np.abs(srt*rod[0] - 0.5*rod[1]) < 1.0) - c2 = np.logical_and(c2, - np.logical_and(np.abs(rod[0]) <= 1.0, - np.abs(rod[1]) <= 1.0)) + srt = np.sqrt(3.0) / 2.0 + c2 = np.abs(0.5 * rod[0] + srt * rod[1]) < 1.0 + c2 = np.logical_and(c2, np.abs(srt * rod[0] + 0.5 * rod[1]) < 1.0) + c2 = np.logical_and(c2, np.abs(0.5 * rod[0] - srt * rod[1]) < 1.0) + c2 = np.logical_and(c2, np.abs(srt * rod[0] - 0.5 * rod[1]) < 1.0) + c2 = np.logical_and( + c2, + np.logical_and(np.abs(rod[0]) <= 1.0, np.abs(rod[1]) <= 1.0), + ) return c2 else: return False @@ -83,11 +80,11 @@ def insideCubicFZ(ro, kwrd): rod = np.abs(ro[0:3] * ro[3]) if kwrd == 'oct': - c1 = (np.max(rod) - constants.BP[3]) <= 1E-8 + c1 = (np.max(rod) - constants.BP[3]) <= 1e-8 else: c1 = True - c2 = (rod[0]+rod[1]+rod[2] - 1.0) <= 1E-8 + c2 = (rod[0] + rod[1] + rod[2] - 1.0) <= 1e-8 res = np.logical_and(c1, c2) return res @@ -95,7 +92,7 @@ def insideCubicFZ(ro, kwrd): @numba.njit(cache=True, nogil=True) def insideFZ(ro, pgnum): res = getFZtypeandOrder(pgnum) - FZtype = res[0] + FZtype = res[0] FZorder = res[1] if FZtype == 0: diff --git a/hexrd/sampleOrientations/sampleRFZ.py b/hexrd/hedm/sampleOrientations/sampleRFZ.py similarity index 72% rename from hexrd/sampleOrientations/sampleRFZ.py rename to hexrd/hedm/sampleOrientations/sampleRFZ.py index 27b50fd4b..0cd7dbf70 100644 --- a/hexrd/sampleOrientations/sampleRFZ.py +++ b/hexrd/hedm/sampleOrientations/sampleRFZ.py @@ -2,26 +2,22 @@ import numba from numba import prange -from hexrd.sampleOrientations.conversions import cu2ro, ro2qu -from hexrd.sampleOrientations.rfz import insideFZ -from hexrd import constants +from hexrd.hedm.sampleOrientations.conversions import cu2ro, ro2qu +from hexrd.hedm.sampleOrientations.rfz import insideFZ +from hexrd.core import constants @numba.njit(cache=True, nogil=True, parallel=True) -def _sample(pgnum, - N, - delta, - shift, - ap_2): +def _sample(pgnum, N, delta, shift, ap_2): - N3 = (2*N+1)**3 + N3 = (2 * N + 1) ** 3 res = np.full((N3, 4), np.nan, dtype=np.float64) - for ii in prange(-N, N+1): + for ii in prange(-N, N + 1): xx = (ii + shift) * delta - for jj in prange(-N, N+1): + for jj in prange(-N, N + 1): yy = (jj + shift) * delta - for kk in prange(-N, N+1): + for kk in prange(-N, N + 1): zz = (kk + shift) * delta cu = np.array([xx, yy, zz]) ma = np.max(np.abs(cu)) @@ -29,13 +25,17 @@ def _sample(pgnum, if ma <= ap_2: ro = cu2ro(cu) if insideFZ(ro, pgnum): - idx = (ii+N)*(2*N+1)**2 + (jj+N)*(2*N+1) + (kk+N) - res[idx,:] = ro2qu(ro) + idx = ( + (ii + N) * (2 * N + 1) ** 2 + + (jj + N) * (2 * N + 1) + + (kk + N) + ) + res[idx, :] = ro2qu(ro) return res -class sampleRFZ: +class sampleRFZ: """This class samples the rodrigues fundamental zone of a point group uniformly in the density sense and returns a list of orientations which are spaced, @@ -46,8 +46,8 @@ class sampleRFZ: Note ---- Details can be found in: - S. Singh and M. De Graef, "Orientation sampling for - dictionary-based diffraction pattern indexing methods". + S. Singh and M. De Graef, "Orientation sampling for + dictionary-based diffraction pattern indexing methods". MSMSE 24, 085013 (2016) Attributes @@ -59,10 +59,9 @@ class sampleRFZ: """ - def __init__(self, - pgnum, - sampling_type='default', - average_angular_spacing=3.0): + def __init__( + self, pgnum, sampling_type='default', average_angular_spacing=3.0 + ): """__init__ method of the sampleRFZ class. @@ -92,19 +91,20 @@ def sampling_N(self): """ if self.sampling_type.lower() == 'default': - return np.rint(131.97049 / (self.avg_ang_spacing - 0.03732)).astype(np.int32) + return np.rint( + 131.97049 / (self.avg_ang_spacing - 0.03732) + ).astype(np.int32) elif self.sampling_type.lower() == 'special': - return np.rint(125.70471 / (self.avg_ang_spacing - 0.07127)).astype(np.int32) + return np.rint( + 125.70471 / (self.avg_ang_spacing - 0.07127) + ).astype(np.int32) def sample(self): - res = _sample(self.pgnum, - self.cubN, - self.delta, - self.shift, - self.ap_2) - mask = ~np.isnan(res[:,0]) - res = res[mask,:] + res = _sample(self.pgnum, self.cubN, self.delta, self.shift, self.ap_2) + mask = ~np.isnan(res[:, 0]) + res = res[mask, :] self.orientations = res + def sample_if_possible(self): required_attributes = ('pgnum', 'avg_ang_spacing', 'sampling_type') if not all(hasattr(self, x) for x in required_attributes): @@ -134,7 +134,6 @@ def sampling_type(self, stype): def avg_ang_spacing(self): return self._avg_ang_spacing - @avg_ang_spacing.setter def avg_ang_spacing(self, ang): self._avg_ang_spacing = ang diff --git a/hexrd/hedm/xrdutil/__init__.py b/hexrd/hedm/xrdutil/__init__.py new file mode 100644 index 000000000..d42777b8d --- /dev/null +++ b/hexrd/hedm/xrdutil/__init__.py @@ -0,0 +1,7 @@ +from .utils import * +from .utils import _fetch_hkls_from_planedata +from .utils import _filter_hkls_eta_ome + +#TODO: Fully separate out the utils.py scripts +from hexrd.hed.xrdutil.utils import * +from hexrd.laue.xrdutil.utils import * \ No newline at end of file diff --git a/hexrd/xrdutil/utils.py b/hexrd/hedm/xrdutil/utils.py similarity index 69% rename from hexrd/xrdutil/utils.py rename to hexrd/hedm/xrdutil/utils.py index bf89283b3..0f7a4d606 100644 --- a/hexrd/xrdutil/utils.py +++ b/hexrd/hedm/xrdutil/utils.py @@ -28,29 +28,30 @@ from typing import Optional, Union, Any, Generator -from hexrd.material.crystallography import PlaneData -from hexrd.distortion.distortionabc import DistortionABC +from hexrd.core.material.crystallography import PlaneData +from hexrd.core.distortion.distortionabc import DistortionABC import numba import numpy as np import numba -from hexrd import constants -from hexrd import matrixutil as mutil -from hexrd import rotations as rot -from hexrd import gridutil as gutil +from hexrd.core import constants +from hexrd.core import matrixutil as mutil +from hexrd.core import rotations as rot +from hexrd.core import gridutil as gutil -from hexrd.material.crystallography import processWavelength, PlaneData +from hexrd.hed.xrdutil.utils import _project_on_detector_plane +from hexrd.core.material.crystallography import processWavelength, PlaneData -from hexrd.transforms import xfcapi -from hexrd.valunits import valWUnit +from hexrd.core.transforms import xfcapi +from hexrd.core.valunits import valWUnit -from hexrd import distortion as distortion_pkg +from hexrd.core import distortion as distortion_pkg -from hexrd.deprecation import deprecated +from hexrd.core.deprecation import deprecated -simlp = 'hexrd.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' +simlp = 'hexrd.hedm.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' # ============================================================================= # PARAMETERS @@ -508,9 +509,8 @@ def _filter_hkls_eta_ome( angMask_eta = np.zeros(len(angles), dtype=bool) for etas in eta_range: angMask_eta = np.logical_or( - angMask_eta, xfcapi.validate_angle_ranges( - angles[:, 1], etas[0], etas[1] - ) + angMask_eta, + xfcapi.validate_angle_ranges(angles[:, 1], etas[0], etas[1]), ) ccw = True @@ -536,369 +536,6 @@ def _filter_hkls_eta_ome( return allAngs, allHKLs -def _project_on_detector_plane( - allAngs: np.ndarray, - rMat_d: np.ndarray, - rMat_c: np.ndarray, - chi: float, - tVec_d: np.ndarray, - tVec_c: np.ndarray, - tVec_s: np.ndarray, - distortion: DistortionABC, - beamVec: np.ndarray = constants.beam_vec, -) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - utility routine for projecting a list of (tth, eta, ome) onto the - detector plane parameterized by the args - """ - gVec_cs = xfcapi.angles_to_gvec( - allAngs, chi=chi, rmat_c=rMat_c, beam_vec=beamVec - ) - - rMat_ss = xfcapi.make_sample_rmat(chi, allAngs[:, 2]) - - tmp_xys = xfcapi.gvec_to_xy( - gVec_cs, - rMat_d, - rMat_ss, - rMat_c, - tVec_d, - tVec_s, - tVec_c, - beam_vec=beamVec, - ) - - valid_mask = ~(np.isnan(tmp_xys[:, 0]) | np.isnan(tmp_xys[:, 1])) - - det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) - - # apply distortion if specified - if distortion is not None: - det_xy = distortion.apply_inverse(det_xy) - - return det_xy, rMat_ss, valid_mask - - -def _project_on_detector_cylinder( - allAngs: np.ndarray, - chi: float, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - physical_size: np.ndarray, - angle_extent: float, - distortion: DistortionABC = None, - beamVec: np.ndarray = constants.beam_vec, - etaVec: np.ndarray = constants.eta_vec, - tVec_s: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, - tVec_c: np.ndarray = constants.zeros_3x1, -) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - utility routine for projecting a list of (tth, eta, ome) onto the - detector plane parameterized by the args. this function does the - computation for a cylindrical detector - """ - dVec_cs = xfcapi.angles_to_dvec( - allAngs, chi=chi, rmat_c=np.eye(3), beam_vec=beamVec, eta_vec=etaVec - ) - - rMat_ss = np.tile(rmat_s, [allAngs.shape[0], 1, 1]) - - tmp_xys, valid_mask = _dvecToDetectorXYcylinder( - dVec_cs, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=tVec_s, - rmat_s=rmat_s, - tVec_c=tVec_c, - ) - - det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) - - # apply distortion if specified - if distortion is not None: - det_xy = distortion.apply_inverse(det_xy) - - return det_xy, rMat_ss, valid_mask - - -def _dvecToDetectorXYcylinder( - dVec_cs: np.ndarray, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - physical_size: np.ndarray, - angle_extent: float, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -) -> tuple[np.ndarray, np.ndarray]: - - cvec = _unitvec_to_cylinder( - dVec_cs, - caxis, - paxis, - radius, - tVec_d, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - cvec_det, valid_mask = _clip_to_cylindrical_detector( - cvec, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - xy_det = _dewarp_from_cylinder( - cvec_det, - tVec_d, - caxis, - paxis, - radius, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s, - ) - - return xy_det, valid_mask - - -def _unitvec_to_cylinder( - uvw: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - tvec: np.ndarray, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -) -> np.ndarray: - """ - get point where unitvector uvw - intersect the cylindrical detector. - this will give points which are - outside the actual panel. the points - will be clipped to the panel later - - Parameters - ---------- - uvw : numpy.ndarray - unit vectors stacked row wise (nx3) shape - - Returns - ------- - numpy.ndarray - (x,y,z) vectors point which intersect with - the cylinder with (nx3) shape - """ - naxis = np.cross(caxis, paxis) - naxis = naxis / np.linalg.norm(naxis) - - tvec_c_l = np.dot(rmat_s, tVec_c) - - delta = tvec - (radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l)) - num = uvw.shape[0] - cx = np.atleast_2d(caxis).T - - delta_t = np.tile(delta, [num, 1]) - - t1 = np.dot(uvw, delta.T) - t2 = np.squeeze(np.dot(uvw, cx)) - t3 = np.squeeze(np.dot(delta, cx)) - t4 = np.dot(uvw, cx) - - A = np.squeeze(1 - t4**2) - B = t1 - t2 * t3 - C = radius**2 - np.linalg.norm(delta) ** 2 + t3**2 - - mask = np.abs(A) < 1e-10 - beta = np.zeros( - [ - num, - ] - ) - - beta[~mask] = (B[~mask] + np.sqrt(B[~mask] ** 2 + A[~mask] * C)) / A[~mask] - - beta[mask] = np.nan - return np.tile(beta, [3, 1]).T * uvw - - -def _clip_to_cylindrical_detector( - uvw: np.ndarray, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - physical_size: np.ndarray, - angle_extent: float, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -) -> tuple[np.ndarray, np.ndarray]: - """ - takes in the intersection points uvw - with the cylindrical detector and - prunes out points which don't actually - hit the actual panel - - Parameters - ---------- - uvw : numpy.ndarray - unit vectors stacked row wise (nx3) shape - - Returns - ------- - numpy.ndarray - (x,y,z) vectors point which fall on panel - with (mx3) shape - """ - # first get rid of points which are above - # or below the detector - naxis = np.cross(caxis, paxis) - num = uvw.shape[0] - - cx = np.atleast_2d(caxis).T - nx = np.atleast_2d(naxis).T - - tvec_c_l = np.dot(rmat_s, tVec_c) - - delta = tVec_d - ( - radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) - ) - - delta_t = np.tile(delta, [num, 1]) - - uvwp = uvw - delta_t - dp = np.dot(uvwp, cx) - - uvwpxy = uvwp - np.tile(dp, [1, 3]) * np.tile(cx, [1, num]).T - - size = physical_size - tvec = np.atleast_2d(tVec_d).T - - # ycomp = uvwp - np.tile(tVec_d,[num, 1]) - mask1 = np.squeeze(np.abs(dp) > size[0] * 0.5) - uvwp[mask1, :] = np.nan - - # next get rid of points that fall outside - # the polar angle range - - ang = np.dot(uvwpxy, nx) / radius - ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) - - ang = np.arccos(ang) - mask2 = np.squeeze(ang >= angle_extent) - mask = np.logical_or(mask1, mask2) - res = uvw.copy() - res[mask, :] = np.nan - - return res, ~mask - - -def _dewarp_from_cylinder( - uvw: np.ndarray, - tVec_d: np.ndarray, - caxis: np.ndarray, - paxis: np.ndarray, - radius: float, - tVec_s: np.ndarray = constants.zeros_3x1, - tVec_c: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, -): - """ - routine to convert cylindrical coordinates - to cartesian coordinates in image frame - """ - naxis = np.cross(caxis, paxis) - naxis = naxis / np.linalg.norm(naxis) - - cx = np.atleast_2d(caxis).T - px = np.atleast_2d(paxis).T - nx = np.atleast_2d(naxis).T - num = uvw.shape[0] - - tvec_c_l = np.dot(rmat_s, tVec_c) - - delta = tVec_d - ( - radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) - ) - - delta_t = np.tile(delta, [num, 1]) - - uvwp = uvw - delta_t - - uvwpxy = uvwp - np.tile(np.dot(uvwp, cx), [1, 3]) * np.tile(cx, [1, num]).T - - sgn = np.sign(np.dot(uvwpxy, px)) - sgn[sgn == 0.0] = 1.0 - ang = np.dot(uvwpxy, nx) / radius - ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) - ang = np.arccos(ang) - xcrd = np.squeeze(radius * ang * sgn) - ycrd = np.squeeze(np.dot(uvwp, cx)) - return np.vstack((xcrd, ycrd)).T - - -def _warp_to_cylinder( - cart: np.ndarray, - tVec_d: np.ndarray, - radius: float, - caxis: np.ndarray, - paxis: np.ndarray, - tVec_s: np.ndarray = constants.zeros_3x1, - rmat_s: np.ndarray = constants.identity_3x3, - tVec_c: np.ndarray = constants.zeros_3x1, - normalize: bool = True, -) -> np.ndarray: - """ - routine to convert cartesian coordinates - in image frame to cylindrical coordinates - """ - tvec = np.atleast_2d(tVec_d).T - if tVec_s.ndim == 1: - tVec_s = np.atleast_2d(tVec_s).T - if tVec_c.ndim == 1: - tVec_c = np.atleast_2d(tVec_c).T - num = cart.shape[0] - naxis = np.cross(paxis, caxis) - x = cart[:, 0] - y = cart[:, 1] - th = x / radius - xp = radius * np.sin(th) - xn = radius * (1 - np.cos(th)) - - ccomp = np.tile(y, [3, 1]).T * np.tile(caxis, [num, 1]) - pcomp = np.tile(xp, [3, 1]).T * np.tile(paxis, [num, 1]) - ncomp = np.tile(xn, [3, 1]).T * np.tile(naxis, [num, 1]) - cart3d = pcomp + ccomp + ncomp - - tVec_c_l = np.dot(rmat_s, tVec_c) - - res = cart3d + np.tile(tvec - tVec_s - tVec_c_l, [1, num]).T - - if normalize: - return res / np.tile(np.linalg.norm(res, axis=1), [3, 1]).T - else: - return res - - def _dvec_to_angs( dvecs: np.ndarray, bvec: np.ndarray, evec: np.ndarray ) -> tuple[np.ndarray, np.ndarray]: @@ -1017,8 +654,15 @@ def simulateGVecs( else: # ??? preallocate for speed? det_xy, rMat_ss, _ = _project_on_detector_plane( - allAngs, rMat_d, rMat_c, chi, tVec_d, tVec_c, tVec_s, distortion, - beamVec=beam_vector + allAngs, + rMat_d, + rMat_c, + chi, + tVec_d, + tVec_c, + tVec_s, + distortion, + beamVec=beam_vector, ) on_panel = np.logical_and( @@ -1056,143 +700,6 @@ def simulateGVecs( return valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps -@deprecated(new_func=simlp, removal_date='2025-01-01') -def simulateLauePattern( - hkls, - bMat, - rmat_d, - tvec_d, - panel_dims, - panel_buffer=5, - minEnergy=8, - maxEnergy=24, - rmat_s=np.eye(3), - grain_params=None, - distortion=None, - beamVec=None, -): - - if beamVec is None: - beamVec = constants.beam_vec - - # parse energy ranges - multipleEnergyRanges = False - if hasattr(maxEnergy, '__len__'): - assert len(maxEnergy) == len( - minEnergy - ), 'energy cutoff ranges must have the same length' - multipleEnergyRanges = True - lmin = [processWavelength(e) for e in maxEnergy] - lmax = [processWavelength(e) for e in minEnergy] - else: - lmin = processWavelength(maxEnergy) - lmax = processWavelength(minEnergy) - - # process crystal rmats and inverse stretches - if grain_params is None: - grain_params = np.atleast_2d( - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0] - ) - - n_grains = len(grain_params) - - # dummy translation vector... make input - tvec_s = np.zeros((3, 1)) - - # number of hkls - nhkls_tot = hkls.shape[1] - - # unit G-vectors in crystal frame - ghat_c = mutil.unitVector(np.dot(bMat, hkls)) - - # pre-allocate output arrays - xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) - hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) - angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) - dspacing = np.nan * np.ones((n_grains, nhkls_tot)) - energy = np.nan * np.ones((n_grains, nhkls_tot)) - - """ - LOOP OVER GRAINS - """ - - for iG, gp in enumerate(grain_params): - rmat_c = xfcapi.make_rmat_of_expmap(gp[:3]) - tvec_c = gp[3:6].reshape(3, 1) - vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) - - # stretch them: V^(-1) * R * Gc - ghat_s_str = mutil.unitVector(np.dot(vInv_s, np.dot(rmat_c, ghat_c))) - ghat_c_str = np.dot(rmat_c.T, ghat_s_str) - - # project - dpts = xfcapi.gvec_to_xy( - ghat_c_str.T, - rmat_d, - rmat_s, - rmat_c, - tvec_d, - tvec_s, - tvec_c, - beam_vec=beamVec, - ).T - - # check intersections with detector plane - canIntersect = ~np.isnan(dpts[0, :]) - npts_in = sum(canIntersect) - - if np.any(canIntersect): - dpts = dpts[:, canIntersect].reshape(2, npts_in) - dhkl = hkls[:, canIntersect].reshape(3, npts_in) - - rmat_b = xfcapi.make_beam_rmat(beamVec, constants.eta_vec) - - # back to angles - tth_eta, gvec_l = xfcapi.xy_to_gvec( - dpts.T, rmat_d, rmat_s, tvec_d, tvec_s, tvec_c, rmat_b=rmat_b - ) - tth_eta = np.vstack(tth_eta).T - - # warp measured points - if distortion is not None: - dpts = distortion.apply_inverse(dpts) - - # plane spacings and energies - dsp = 1.0 / mutil.columnNorm(np.dot(bMat, dhkl)) - wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) - - # find on spatial extent of detector - xTest = np.logical_and( - dpts[0, :] >= -0.5 * panel_dims[1] + panel_buffer, - dpts[0, :] <= 0.5 * panel_dims[1] - panel_buffer, - ) - yTest = np.logical_and( - dpts[1, :] >= -0.5 * panel_dims[0] + panel_buffer, - dpts[1, :] <= 0.5 * panel_dims[0] - panel_buffer, - ) - - onDetector = np.logical_and(xTest, yTest) - if multipleEnergyRanges: - validEnergy = np.zeros(len(wlen), dtype=bool) - for i in range(len(lmin)): - validEnergy = validEnergy | np.logical_and( - wlen >= lmin[i], wlen <= lmax[i] - ) - else: - validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) - - # index for valid reflections - keepers = np.where(np.logical_and(onDetector, validEnergy))[0] - - # assign output arrays - xy_det[iG][keepers, :] = dpts[:, keepers].T - hkls_in[iG][:, keepers] = dhkl[:, keepers] - angles[iG][keepers, :] = tth_eta[keepers, :] - dspacing[iG, keepers] = dsp[keepers] - energy[iG, keepers] = processWavelength(wlen[keepers]) - return xy_det, hkls_in, angles, dspacing, energy - - @numba.njit(nogil=True, cache=True) def _expand_pixels( original: np.ndarray, w: float, h: float, result: np.ndarray @@ -1481,7 +988,7 @@ def make_reflection_patches( def extract_detector_transformation( - detector_params: Union[dict[str, Any], np.ndarray] + detector_params: Union[dict[str, Any], np.ndarray], ) -> tuple[np.ndarray, np.ndarray, float, np.ndarray]: """ Construct arrays from detector parameters. diff --git a/hexrd/laue/fitting/calibration/laue.py b/hexrd/laue/fitting/calibration/laue.py new file mode 100644 index 000000000..3b0b9645e --- /dev/null +++ b/hexrd/laue/fitting/calibration/laue.py @@ -0,0 +1,600 @@ +import copy +from typing import Optional + +import numpy as np +from scipy import ndimage +from scipy.integrate import nquad +from scipy.optimize import leastsq +from skimage import filters +from skimage.feature import blob_log + +# TODO: Resolve extra-workflow-dependency +from hexrd.hedm import xrdutil +from hexrd.core.constants import fwhm_to_sigma +from hexrd.core.instrument import switch_xray_source +from hexrd.core.rotations import angleAxisOfRotMat, RotMatEuler +from hexrd.core.transforms import xfcapi +from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl + +# TODO: Resolve extra-workflow-dependency +from ....core.fitting.calibration.calibrator import Calibrator +from ....core.fitting.calibration.abstract_grain import AbstractGrainCalibrator +from ....core.fitting.calibration.lmfit_param_handling import ( + create_grain_params, + DEFAULT_EULER_CONVENTION, + rename_to_avoid_collision, +) + + +class LaueCalibrator(AbstractGrainCalibrator): + """A Laue calibrator "is-a" specific case for a grain calibrator. + + Just like a grain calibrator, a Laue calibrator is calibrating + grain parameters. + + There are some unique properties for Laue, though, such as having a + varying energy range rather than a constant energy value. Also, we + do not utilize any omega periods. + """ + + type = 'laue' + + def __init__( + self, + instr, + material, + grain_params, + default_refinements=None, + min_energy=5, + max_energy=25, + tth_distortion=None, + calibration_picks=None, + euler_convention=DEFAULT_EULER_CONVENTION, + xray_source: Optional[str] = None, + ): + super().__init__( + instr, + material, + grain_params, + default_refinements, + calibration_picks, + euler_convention, + ) + self.energy_cutoffs = [min_energy, max_energy] + self.xray_source = xray_source + + self._tth_distortion = tth_distortion + self._update_tth_distortion_panels() + + @property + def name(self): + return self.material.name + + @property + def tth_distortion(self): + return self._tth_distortion + + @tth_distortion.setter + def tth_distortion(self, v): + self._tth_distortion = v + self._update_tth_distortion_panels() + + def _update_tth_distortion_panels(self): + # Make sure the panels in the tth distortion are the same + # as those on the instrument, so their beam vectors get modified + # accordingly. + if self._tth_distortion is None: + return + + self._tth_distortion = copy.deepcopy(self._tth_distortion) + for det_key, obj in self._tth_distortion.items(): + obj.panel = self.instr.detectors[det_key] + + @property + def energy_cutoffs(self): + return self._energy_cutoffs + + @energy_cutoffs.setter + def energy_cutoffs(self, x): + assert len(x) == 2, "input must have 2 elements" + assert x[1] > x[0], "first element must be < than second" + self._energy_cutoffs = x + self.plane_data.wavelength = self.energy_cutoffs[-1] + self.plane_data.exclusions = None + + def autopick_points( + self, + raw_img_dict, + tth_tol=5.0, + eta_tol=5.0, + npdiv=2, + do_smoothing=True, + smoothing_sigma=2, + use_blob_detection=True, + blob_threshold=0.25, + fit_peaks=True, + min_peak_int=1.0, + fit_tth_tol=0.1, + ): + """ + Parameters + ---------- + raw_img_dict : TYPE + DESCRIPTION. + tth_tol : TYPE, optional + DESCRIPTION. The default is 5.. + eta_tol : TYPE, optional + DESCRIPTION. The default is 5.. + npdiv : TYPE, optional + DESCRIPTION. The default is 2. + do_smoothing : TYPE, optional + DESCRIPTION. The default is True. + smoothing_sigma : TYPE, optional + DESCRIPTION. The default is 2. + use_blob_detection : TYPE, optional + DESCRIPTION. The default is True. + blob_threshold : TYPE, optional + DESCRIPTION. The default is 0.25. + fit_peaks : TYPE, optional + DESCRIPTION. The default is True. + + Returns + ------- + None. + + """ + + with switch_xray_source(self.instr, self.xray_source): + return self._autopick_points( + raw_img_dict=raw_img_dict, + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + do_smoothing=do_smoothing, + smoothing_sigma=smoothing_sigma, + use_blob_detection=use_blob_detection, + blob_threshold=blob_threshold, + fit_peaks=fit_peaks, + min_peak_int=min_peak_int, + fit_tth_tol=fit_tth_tol, + ) + + def _autopick_points( + self, + raw_img_dict, + tth_tol=5.0, + eta_tol=5.0, + npdiv=2, + do_smoothing=True, + smoothing_sigma=2, + use_blob_detection=True, + blob_threshold=0.25, + fit_peaks=True, + min_peak_int=1.0, + fit_tth_tol=0.1, + ): + labelStructure = ndimage.generate_binary_structure(2, 1) + rmat_s = np.eye(3) # !!! forcing to identity + omega = 0.0 # !!! same ^^^ + + rmat_c = xfcapi.make_rmat_of_expmap(self.grain_params[:3]) + tvec_c = self.grain_params[3:6] + # vinv_s = self.grain_params[6:12] # !!!: patches don't take this yet + + # run simulation + # ???: could we get this from overlays? + laue_sim = self.instr.simulate_laue_pattern( + self.plane_data, + minEnergy=self.energy_cutoffs[0], + maxEnergy=self.energy_cutoffs[1], + rmat_s=None, + grain_params=np.atleast_2d(self.grain_params), + ) + + # loop over detectors for results + refl_dict = dict.fromkeys(self.instr.detectors) + for det_key, det in self.instr.detectors.items(): + det_config = det.config_dict( + chi=self.instr.chi, + tvec=self.instr.tvec, + beam_vector=self.instr.beam_vector, + ) + + xy_det, hkls, angles, dspacing, energy = laue_sim[det_key] + ''' + valid_xy = [] + valid_hkls = [] + valid_angs = [] + valid_energy = [] + ''' + # !!! not necessary to loop over grains since we can only handle 1 + # for gid in range(len(xy_det)): + gid = 0 + # find valid reflections + valid_refl = ~np.isnan(xy_det[gid][:, 0]) + valid_xy = xy_det[gid][valid_refl, :] + valid_hkls = hkls[gid][:, valid_refl] + valid_angs = angles[gid][valid_refl, :] + valid_energy = energy[gid][valid_refl] + + # make patches + refl_patches = xrdutil.make_reflection_patches( + det_config, + valid_angs, + det.angularPixelSize(valid_xy), + rmat_c=rmat_c, + tvec_c=tvec_c, + tth_tol=tth_tol, + eta_tol=eta_tol, + npdiv=npdiv, + quiet=True, + ) + + reflInfoList = [] + img = raw_img_dict[det_key] + native_area = det.pixel_area + num_patches = len(valid_angs) + meas_xy = np.nan * np.ones((num_patches, 2)) + meas_angs = np.nan * np.ones((num_patches, 2)) + for iRefl, patch in enumerate(refl_patches): + # check for overrun + irow = patch[-1][0] + jcol = patch[-1][1] + if np.any( + [irow < 0, irow >= det.rows, jcol < 0, jcol >= det.cols] + ): + continue + if not np.all( + det.clip_to_panel( + np.vstack( + [patch[1][0].flatten(), patch[1][1].flatten()] + ).T + )[1] + ): + continue + # use nearest interpolation + spot_data = img[irow, jcol] * patch[3] * npdiv**2 / native_area + spot_data -= np.amin(spot_data) + patch_size = spot_data.shape + + sigmax = 0.25 * np.min(spot_data.shape) * fwhm_to_sigma + + # optional gaussian smoothing + if do_smoothing: + spot_data = filters.gaussian(spot_data, smoothing_sigma) + + if use_blob_detection: + spot_data_scl = 2.0 * spot_data / np.max(spot_data) - 1.0 + + # Compute radii in the 3rd column. + blobs_log = blob_log( + spot_data_scl, + min_sigma=2, + max_sigma=min(sigmax, 20), + num_sigma=10, + threshold=blob_threshold, + overlap=0.1, + ) + numPeaks = len(blobs_log) + else: + labels, numPeaks = ndimage.label( + spot_data > np.percentile(spot_data, 99), + structure=labelStructure, + ) + slabels = np.arange(1, numPeaks + 1) + tth_edges = patch[0][0][0, :] + eta_edges = patch[0][1][:, 0] + delta_tth = tth_edges[1] - tth_edges[0] + delta_eta = eta_edges[1] - eta_edges[0] + if numPeaks > 0: + peakId = iRefl + if use_blob_detection: + coms = blobs_log[:, :2] + else: + coms = np.array( + ndimage.center_of_mass( + spot_data, labels=labels, index=slabels + ) + ) + if numPeaks > 1: + # + center = np.r_[spot_data.shape] * 0.5 + com_diff = coms - np.tile(center, (numPeaks, 1)) + closest_peak_idx = np.argmin( + np.sum(com_diff**2, axis=1) + ) + # + else: + closest_peak_idx = 0 + # + coms = coms[closest_peak_idx] + # + if fit_peaks: + sigm = 0.2 * np.min(spot_data.shape) + if use_blob_detection: + sigm = min(blobs_log[closest_peak_idx, 2], sigm) + y0, x0 = coms.flatten() + ampl = float(spot_data[int(y0), int(x0)]) + # y0, x0 = 0.5*np.array(spot_data.shape) + # ampl = np.max(spot_data) + a_par = c_par = 0.5 / float(sigm**2) + b_par = 0.0 + bgx = bgy = 0.0 + bkg = np.min(spot_data) + params = [ + ampl, + a_par, + b_par, + c_par, + x0, + y0, + bgx, + bgy, + bkg, + ] + # + result = leastsq(gaussian_2d, params, args=(spot_data)) + # + fit_par = result[0] + # + coms = np.array([fit_par[5], fit_par[4]]) + ''' + print("%s, %d, (%.2f, %.2f), (%d, %d)" + % (det_key, iRefl, coms[0], coms[1], + patch_size[0], patch_size[1])) + ''' + row_cen = fit_tth_tol * patch_size[0] + col_cen = fit_tth_tol * patch_size[1] + if np.any( + [ + coms[0] < row_cen, + coms[0] >= patch_size[0] - row_cen, + coms[1] < col_cen, + coms[1] >= patch_size[1] - col_cen, + ] + ): + continue + if fit_par[0] < min_peak_int: + continue + + # intensities + spot_intensity, int_err = nquad( + gaussian_2d_int, + [[0.0, 2.0 * y0], [0.0, 2.0 * x0]], + args=fit_par, + ) + com_angs = np.hstack( + [ + tth_edges[0] + (0.5 + coms[1]) * delta_tth, + eta_edges[0] + (0.5 + coms[0]) * delta_eta, + ] + ) + + # grab intensities + if not fit_peaks: + if use_blob_detection: + spot_intensity = 10 + max_intensity = 10 + else: + spot_intensity = np.sum( + spot_data[labels == slabels[closest_peak_idx]] + ) + max_intensity = np.max( + spot_data[labels == slabels[closest_peak_idx]] + ) + else: + max_intensity = np.max(spot_data) + # need xy coords + # !!! forcing ome = 0. -- could be inconsistent with rmat_s + cmv = np.atleast_2d(np.hstack([com_angs, omega])) + gvec_c = xfcapi.angles_to_gvec( + cmv, + chi=self.instr.chi, + rmat_c=rmat_c, + beam_vec=self.instr.beam_vector, + ) + new_xy = xfcapi.gvec_to_xy( + gvec_c, + det.rmat, + rmat_s, + rmat_c, + det.tvec, + self.instr.tvec, + tvec_c, + beam_vec=self.instr.beam_vector, + ) + meas_xy[iRefl, :] = new_xy + if det.distortion is not None: + meas_xy[iRefl, :] = det.distortion.apply_inverse( + meas_xy[iRefl, :] + ) + meas_angs[iRefl, :] = com_angs + else: + peakId = -999 + # + spot_intensity = np.nan + max_intensity = np.nan + reflInfoList.append( + [ + peakId, + valid_hkls[:, iRefl], + (spot_intensity, max_intensity), + valid_energy[iRefl], + valid_angs[iRefl, :], + meas_angs[iRefl, :], + meas_xy[iRefl, :], + ] + ) + reflInfo = np.array( + [tuple(i) for i in reflInfoList], dtype=reflInfo_dtype + ) + refl_dict[det_key] = reflInfo + + # Convert to our data_dict format + data_dict = { + 'pick_xys': {}, + 'hkls': {}, + } + for det, det_picks in refl_dict.items(): + data_dict['pick_xys'].setdefault(det, []) + data_dict['hkls'].setdefault(det, []) + for entry in det_picks: + hkl = entry[1].astype(int).tolist() + cart = entry[6] + data_dict['hkls'][det].append(hkl) + data_dict['pick_xys'][det].append(cart) + + self.data_dict = data_dict + return data_dict + + def _evaluate(self): + data_dict = self.data_dict + + # grab reflection data from picks input + pick_hkls_dict = {} + pick_xys_dict = {} + for det_key in self.instr.detectors: + # find valid reflections and recast hkls to int + xys = np.asarray(data_dict['pick_xys'][det_key], dtype=float) + hkls = np.asarray(data_dict['hkls'][det_key], dtype=int) + + valid_idx = ~np.isnan(xys[:, 0]) + + # fill local dicts + pick_hkls_dict[det_key] = np.atleast_2d(hkls[valid_idx, :]).T + pick_xys_dict[det_key] = np.atleast_2d(xys[valid_idx, :]) + + return pick_hkls_dict, pick_xys_dict + + def residual(self): + with switch_xray_source(self.instr, self.xray_source): + return self._residual() + + def _residual(self): + # need this for laue obj + pick_hkls_dict, pick_xys_dict = self._evaluate() + + # munge energy cutoffs + energy_cutoffs = np.r_[0.5, 1.5] * np.asarray(self.energy_cutoffs) + + return sxcal_obj_func( + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + energy_cutoffs, + ) + + def model(self): + with switch_xray_source(self.instr, self.xray_source): + return self._model() + + def _model(self): + # need this for laue obj + pick_hkls_dict, pick_xys_dict = self._evaluate() + + return sxcal_obj_func( + [self.grain_params], + self.instr, + pick_xys_dict, + pick_hkls_dict, + self.bmatx, + self.energy_cutoffs, + sim_only=True, + ) + + +# Objective function for Laue fitting +def sxcal_obj_func( + grain_params, + instr, + meas_xy, + hkls_idx, + bmat, + energy_cutoffs, + sim_only=False, +): + """ + Objective function for Laue-based fitting. + + + energy_cutoffs are [minEnergy, maxEnergy] where min/maxEnergy can be lists + + """ + # right now just stuck on the end and assumed + # to all be the same length... FIX THIS + calc_xy = {} + calc_ang = {} + for det_key, panel in instr.detectors.items(): + # Simulate Laue pattern: + # returns xy_det, hkls_in, angles, dspacing, energy + sim_results = panel.simulate_laue_pattern( + [hkls_idx[det_key], bmat], + minEnergy=energy_cutoffs[0], + maxEnergy=energy_cutoffs[1], + grain_params=grain_params, + beam_vec=instr.beam_vector, + ) + + calc_xy_tmp = sim_results[0][0] + + idx = ~np.isnan(calc_xy_tmp[:, 0]) + calc_xy[det_key] = calc_xy_tmp[idx, :] + + if sim_only: + # Grab angles too. We dont use them otherwise. + # FIXME: might need tth correction if there is a distortion. + calc_angs_tmp = sim_results[2][0] + calc_ang[det_key] = calc_angs_tmp[idx, :] + + # return values + if sim_only: + return {k: [calc_xy[k], calc_ang[k]] for k in calc_xy} + + meas_xy_all = np.vstack(list(meas_xy.values())) + calc_xy_all = np.vstack(list(calc_xy.values())) + + diff_vecs_xy = calc_xy_all - meas_xy_all + return diff_vecs_xy.flatten() + + +def gaussian_2d(p, data): + shape = data.shape + x, y = np.meshgrid(range(shape[1]), range(shape[0])) + func = ( + p[0] + * np.exp( + -( + p[1] * (x - p[4]) * (x - p[4]) + + p[2] * (x - p[4]) * (y - p[5]) + + p[3] * (y - p[5]) * (y - p[5]) + ) + ) + + p[6] * (x - p[4]) + + p[7] * (y - p[5]) + + p[8] + ) + return func.flatten() - data.flatten() + + +def gaussian_2d_int(y, x, *p): + func = p[0] * np.exp( + -( + p[1] * (x - p[4]) * (x - p[4]) + + p[2] * (x - p[4]) * (y - p[5]) + + p[3] * (y - p[5]) * (y - p[5]) + ) + ) + return func.flatten() + + +reflInfo_dtype = [ + ('iRefl', int), + ('hkl', (int, 3)), + ('intensity', (float, 2)), + ('energy', float), + ('predAngles', (float, 2)), + ('measAngles', (float, 2)), + ('measXY', (float, 2)), +] diff --git a/hexrd/laue/instrument/__init__.py b/hexrd/laue/instrument/__init__.py new file mode 100644 index 000000000..024003040 --- /dev/null +++ b/hexrd/laue/instrument/__init__.py @@ -0,0 +1,13 @@ +from hexrd.core.instrument.hedm_instrument import ( + calc_angles_from_beam_vec, + calc_beam_vec, + centers_of_edge_vec, + GenerateEtaOmeMaps, + GrainDataWriter, + HEDMInstrument, + max_tth, + switch_xray_source, + unwrap_dict_to_h5, + unwrap_h5_to_dict, +) +from hexrd.core.instrument.detector import Detector diff --git a/hexrd/laue/xrdutil/utils.py b/hexrd/laue/xrdutil/utils.py new file mode 100644 index 000000000..f66e9ca2c --- /dev/null +++ b/hexrd/laue/xrdutil/utils.py @@ -0,0 +1,198 @@ +#! /usr/bin/env python3 +# ============================================================ +# Copyright (c) 2012, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# Written by Joel Bernier and others. +# LLNL-CODE-529294. +# All rights reserved. +# +# This file is part of HEXRD. For details on dowloading the source, +# see the file COPYING. +# +# Please also see the file LICENSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License (as published by the Free +# Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program (see file LICENSE); if not, write to +# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA or visit . +# ============================================================ + + +import numpy as np + +from hexrd.core import constants +from hexrd.core import matrixutil as mutil + +from hexrd.core.material.crystallography import processWavelength + +from hexrd.core.transforms import xfcapi + +from hexrd.core.deprecation import deprecated + + +simlp = 'hexrd.hedm.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +distortion_key = 'distortion' + +d2r = piby180 = constants.d2r +r2d = constants.r2d + +epsf = constants.epsf # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 +sqrt_epsf = constants.sqrt_epsf # ~1.5e-8 + +bHat_l_DFLT = constants.beam_vec.flatten() +eHat_l_DFLT = constants.eta_vec.flatten() + +nans_1x2 = np.nan * np.ones((1, 2)) + +validateAngleRanges = xfcapi.validate_angle_ranges + +@deprecated(new_func=simlp, removal_date='2026-01-01') +def simulateLauePattern( + hkls, + bMat, + rmat_d, + tvec_d, + panel_dims, + panel_buffer=5, + minEnergy=8, + maxEnergy=24, + rmat_s=np.eye(3), + grain_params=None, + distortion=None, + beamVec=None, +): + + if beamVec is None: + beamVec = constants.beam_vec + + # parse energy ranges + multipleEnergyRanges = False + if hasattr(maxEnergy, '__len__'): + assert len(maxEnergy) == len( + minEnergy + ), 'energy cutoff ranges must have the same length' + multipleEnergyRanges = True + lmin = [processWavelength(e) for e in maxEnergy] + lmax = [processWavelength(e) for e in minEnergy] + else: + lmin = processWavelength(maxEnergy) + lmax = processWavelength(minEnergy) + + # process crystal rmats and inverse stretches + if grain_params is None: + grain_params = np.atleast_2d( + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0] + ) + + n_grains = len(grain_params) + + # dummy translation vector... make input + tvec_s = np.zeros((3, 1)) + + # number of hkls + nhkls_tot = hkls.shape[1] + + # unit G-vectors in crystal frame + ghat_c = mutil.unitVector(np.dot(bMat, hkls)) + + # pre-allocate output arrays + xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan * np.ones((n_grains, nhkls_tot)) + energy = np.nan * np.ones((n_grains, nhkls_tot)) + + """ + LOOP OVER GRAINS + """ + + for iG, gp in enumerate(grain_params): + rmat_c = xfcapi.make_rmat_of_expmap(gp[:3]) + tvec_c = gp[3:6].reshape(3, 1) + vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) + + # stretch them: V^(-1) * R * Gc + ghat_s_str = mutil.unitVector(np.dot(vInv_s, np.dot(rmat_c, ghat_c))) + ghat_c_str = np.dot(rmat_c.T, ghat_s_str) + + # project + dpts = xfcapi.gvec_to_xy( + ghat_c_str.T, + rmat_d, + rmat_s, + rmat_c, + tvec_d, + tvec_s, + tvec_c, + beam_vec=beamVec, + ).T + + # check intersections with detector plane + canIntersect = ~np.isnan(dpts[0, :]) + npts_in = sum(canIntersect) + + if np.any(canIntersect): + dpts = dpts[:, canIntersect].reshape(2, npts_in) + dhkl = hkls[:, canIntersect].reshape(3, npts_in) + + rmat_b = xfcapi.make_beam_rmat(beamVec, constants.eta_vec) + + # back to angles + tth_eta, gvec_l = xfcapi.xy_to_gvec( + dpts.T, rmat_d, rmat_s, tvec_d, tvec_s, tvec_c, rmat_b=rmat_b + ) + tth_eta = np.vstack(tth_eta).T + + # warp measured points + if distortion is not None: + dpts = distortion.apply_inverse(dpts) + + # plane spacings and energies + dsp = 1.0 / mutil.columnNorm(np.dot(bMat, dhkl)) + wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) + + # find on spatial extent of detector + xTest = np.logical_and( + dpts[0, :] >= -0.5 * panel_dims[1] + panel_buffer, + dpts[0, :] <= 0.5 * panel_dims[1] - panel_buffer, + ) + yTest = np.logical_and( + dpts[1, :] >= -0.5 * panel_dims[0] + panel_buffer, + dpts[1, :] <= 0.5 * panel_dims[0] - panel_buffer, + ) + + onDetector = np.logical_and(xTest, yTest) + if multipleEnergyRanges: + validEnergy = np.zeros(len(wlen), dtype=bool) + for i in range(len(lmin)): + validEnergy = validEnergy | np.logical_and( + wlen >= lmin[i], wlen <= lmax[i] + ) + else: + validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) + + # index for valid reflections + keepers = np.where(np.logical_and(onDetector, validEnergy))[0] + + # assign output arrays + xy_det[iG][keepers, :] = dpts[:, keepers].T + hkls_in[iG][:, keepers] = dhkl[:, keepers] + angles[iG][keepers, :] = tth_eta[keepers, :] + dspacing[iG, keepers] = dsp[keepers] + energy[iG, keepers] = processWavelength(wlen[keepers]) + return xy_det, hkls_in, angles, dspacing, energy diff --git a/hexrd/module_map.py b/hexrd/module_map.py new file mode 100644 index 000000000..1a1b43b88 --- /dev/null +++ b/hexrd/module_map.py @@ -0,0 +1,197 @@ +# The following dynamically generates aliases for the remapped modules based +# on the file_map +from collections import defaultdict +import importlib +import importlib.abc +import importlib.machinery +from pathlib import Path +import sys +from typing import Union + + +def path_to_module(path: Path) -> str: + """ + Convert a path to a module name. + + e.g. + * "package_remapper/remapper.py" -> "package_remapper.remapper" + * "package_remapper/__init__.py" -> "package_remapper" + """ + if path.suffix not in (".py", ""): + raise ValueError(f"Expected a .py file, got {path}") + + path = path.with_suffix("") + if path.parts[-1] == "__init__": + path = path.parent + return path.as_posix().replace("/", ".") + + +HEXRD_PACKAGE_PATH = Path(__file__).parent +file_map: dict[Path, list[Path]] = defaultdict(list) +with open(HEXRD_PACKAGE_PATH / "file_table.tsv", "r") as f: + for line in f: + if not line.strip(): + continue + kv = line.strip().split() + if len(kv) != 2: + continue + k, v = line.strip().split() + file_map[Path(k)].append(Path(v)) + +module_map: dict[str, tuple[str, Path]] = {} + +for old_path, new_paths in file_map.items(): + if old_path.suffix not in ("", ".py") or not "hexrd" in old_path.parts: + continue + old_module_path = path_to_module(old_path) + # Default to pick the core module if it exists. Otherwise pick the first one. + selected_path = new_paths[0] + for new_path in new_paths: + if 'core' in new_path.parts: + selected_path = new_path + break + module_map[old_module_path] = ( + path_to_module(selected_path), + HEXRD_PACKAGE_PATH.parent / selected_path, + ) + + +class ModuleAlias: + def __init__(self, current_path: list[str]): + self.current_path = current_path + + def __getattr__(self, name): + full_path = self.current_path + [name] + full_name = ".".join(full_path) + if full_name in module_map: + module, _fp = module_map[full_name] + if isinstance(module, ModuleAlias): + return module + else: + return importlib.import_module(module) + current_module = ".".join(self.current_path) + raise AttributeError( + f"Module `{current_module}` has no attribute {name}" + ) + + +flattened_module_map: dict[str, Union[ModuleAlias, str]] = {} + +for key, (mapped_module, _mapped_fp) in module_map.items(): + parts = mapped_module.split(".") + for i in range(len(parts) - 1): + module = ".".join(parts[: i + 1]) + if module not in flattened_module_map: + flattened_module_map[module] = ModuleAlias(parts[:i]) + flattened_module_map[key] = mapped_module + +def get(alias: str) -> Union[ModuleAlias, str, None]: + """ + Returns the the module or an alias to it if it exists. + """ + if alias in flattened_module_map: + return flattened_module_map[alias] + return None + + +class ModuleSpecWithParent(importlib.machinery.ModuleSpec): + def __init__( + self, name, loader, *, origin=None, parent=None, is_package=False + ): + super().__init__(name, loader, origin=origin, is_package=is_package) + self._parent = parent + + @property + def parent(self): + return self._parent + + +class ModuleAliasImporter(importlib.abc.MetaPathFinder, importlib.abc.Loader): + def find_spec(self, fullname, path, target=None): + if fullname in module_map: + mapped_module, mapped_fp = module_map[fullname] + # We only want to remap modules that go somewhere else. + # If we are already trying to import something that exists, let + # the other importers take care of it so we don't just loop forever. + if fullname == mapped_module: + return None + + return importlib.machinery.ModuleSpec( + fullname, + self, + origin=mapped_fp.as_posix(), + is_package=mapped_fp.name == "__init__.py", + ) + return None + + def load_module(self, fullname): + """ + This is a deprecated implementation path, but it is a lot easier to do override it this way + than to override it with create and exec_module. + """ + if fullname not in module_map: + raise ImportError(f"Module {fullname} not found in module_map") + + mapped_module, _mapped_fp = module_map[fullname] + base_mod = importlib.import_module(mapped_module) + + extra_candidates: list[str] = [] + for old_path, new_paths in file_map.items(): + if old_path.suffix not in ("", ".py") or not "hexrd" in old_path.parts: + continue + try: + old_mod = path_to_module(old_path) + except ValueError: + continue + + if old_mod == fullname or old_mod.startswith(fullname + "."): + for p in new_paths: + candidate = path_to_module(p) + if candidate != mapped_module: + extra_candidates.append(candidate) + + if extra_candidates: + seen = set() + deduped: list[str] = [] + for c in extra_candidates: + if c not in seen: + seen.add(c) + deduped.append(c) + for candidate in deduped: + try: + cand_mod = importlib.import_module(candidate) + except Exception: + continue + + if hasattr(base_mod, "__path__") and hasattr(cand_mod, "__path__"): + try: + for p in list(cand_mod.__path__): + if p not in base_mod.__path__: + base_mod.__path__.append(p) + except Exception: + pass + + base_all = getattr(base_mod, "__all__", None) + cand_all = getattr(cand_mod, "__all__", None) + if cand_all: + if base_all is None: + base_mod.__all__ = list(cand_all) + else: + for name in cand_all: + if name not in base_all: + base_all.append(name) + base_mod.__all__ = base_all + + for name, val in cand_mod.__dict__.items(): + if name in ("__name__", "__file__", "__package__", "__path__", "__loader__", "__spec__"): + continue + if name not in base_mod.__dict__: + base_mod.__dict__[name] = val + + sys.modules[fullname] = base_mod + return sys.modules[fullname] + + +# We need to redirect __all__ attempts to import hexrd things into our own +# handler. +sys.meta_path.insert(0, ModuleAliasImporter()) diff --git a/hexrd/powder/fitting/calibration/instrument.py b/hexrd/powder/fitting/calibration/instrument.py new file mode 100644 index 000000000..a9326ac20 --- /dev/null +++ b/hexrd/powder/fitting/calibration/instrument.py @@ -0,0 +1,228 @@ +import logging +from typing import Optional + +import lmfit +import numpy as np + +from ....core.fitting.calibration.lmfit_param_handling import ( + add_engineering_constraints, + create_instr_params, + DEFAULT_EULER_CONVENTION, + update_instrument_from_params, + validate_params_list, +) +from ....core.fitting.calibration.relative_constraints import ( + create_relative_constraints, + RelativeConstraints, + RelativeConstraintsType, +) + +logger = logging.getLogger() +logger.setLevel('INFO') + + +def _normalized_ssqr(resd): + return np.sum(resd * resd) / len(resd) + + +class InstrumentCalibrator: + def __init__( + self, + *args, + engineering_constraints=None, + euler_convention=DEFAULT_EULER_CONVENTION, + relative_constraints_type=RelativeConstraintsType.none, + ): + """ + Model for instrument calibration class as a function of + + Parameters + ---------- + *args : TYPE + DESCRIPTION. + + Returns + ------- + None. + + Notes + ----- + Flags are set on calibrators + """ + assert len(args) > 0, "must have at least one calibrator" + self.calibrators = args + for calib in self.calibrators: + assert ( + calib.instr is self.instr + ), "all calibrators must refer to the same instrument" + self._engineering_constraints = engineering_constraints + self._relative_constraints = create_relative_constraints( + relative_constraints_type, self.instr + ) + self.euler_convention = euler_convention + + self.params = self.make_lmfit_params() + self.fitter = lmfit.Minimizer( + self.minimizer_function, self.params, nan_policy='omit' + ) + + def make_lmfit_params(self): + params = create_instr_params( + self.instr, + euler_convention=self.euler_convention, + relative_constraints=self.relative_constraints, + ) + + for calibrator in self.calibrators: + # We pass the params to the calibrator so it can ensure it + # creates unique parameter names. The calibrator will keep + # track of the names it chooses itself. + params += calibrator.create_lmfit_params(params) + + # Perform validation on the params before proceeding + validate_params_list(params) + + params_dict = lmfit.Parameters() + params_dict.add_many(*params) + + add_engineering_constraints(params_dict, self.engineering_constraints) + return params_dict + + def update_all_from_params(self, params): + # Update instrument and material from the lmfit parameters + update_instrument_from_params( + self.instr, + params, + self.euler_convention, + self.relative_constraints, + ) + + for calibrator in self.calibrators: + calibrator.update_from_lmfit_params(params) + + @property + def instr(self): + return self.calibrators[0].instr + + @property + def tth_distortion(self): + return self.calibrators[0].tth_distortion + + @tth_distortion.setter + def tth_distortion(self, v): + for calibrator in self.calibrators: + calibrator.tth_distortion = v + + def minimizer_function(self, params): + self.update_all_from_params(params) + return self.residual() + + def residual(self): + return np.hstack([x.residual() for x in self.calibrators]) + + def minimize(self, method='least_squares', odict=None): + if odict is None: + odict = {} + + if method == 'least_squares': + # Set defaults to the odict, if they are missing + odict = { + "ftol": 1e-8, + "xtol": 1e-8, + "gtol": 1e-8, + "verbose": 2, + "max_nfev": 1000, + "x_scale": "jac", + "method": "trf", + "jac": "3-point", + **odict, + } + + result = self.fitter.least_squares(self.params, **odict) + else: + result = self.fitter.scalar_minimize( + method=method, params=self.params, max_nfev=50000, **odict + ) + + return result + + @property + def engineering_constraints(self): + return self._engineering_constraints + + @engineering_constraints.setter + def engineering_constraints(self, v): + if v == self._engineering_constraints: + return + + valid_settings = [ + None, + 'None', + 'TARDIS', + ] + if v not in valid_settings: + valid_str = ', '.join(map(valid_settings, str)) + msg = ( + f'Invalid engineering constraint "{v}". Valid constraints ' + f'are: "{valid_str}"' + ) + raise Exception(msg) + + self._engineering_constraints = v + self.params = self.make_lmfit_params() + + @property + def relative_constraints_type(self): + return self._relative_constraints.type + + @relative_constraints_type.setter + def relative_constraints_type(self, v: Optional[RelativeConstraintsType]): + v = v if v is not None else RelativeConstraintsType.none + + current = getattr(self, '_relative_constraints', None) + if current is None or current.type != v: + self.relative_constraints = create_relative_constraints( + v, self.instr + ) + + @property + def relative_constraints(self) -> RelativeConstraints: + return self._relative_constraints + + @relative_constraints.setter + def relative_constraints(self, v: RelativeConstraints): + self._relative_constraints = v + self.params = self.make_lmfit_params() + + def reset_lmfit_params(self): + self.params = self.make_lmfit_params() + + def reset_relative_constraint_params(self): + # Set them back to zero. + self.relative_constraints.reset() + + def run_calibration(self, odict): + resd0 = self.residual() + nrm_ssr_0 = _normalized_ssqr(resd0) + + result = self.minimize(odict=odict) + + resd1 = self.residual() + + nrm_ssr_1 = _normalized_ssqr(resd1) + + delta_r = 1.0 - nrm_ssr_1 / nrm_ssr_0 + + if delta_r > 0: + logger.info('OPTIMIZATION SUCCESSFUL') + else: + logger.warning('no improvement in residual') + + logger.info('normalized initial ssr: %.4e' % nrm_ssr_0) + logger.info('normalized final ssr: %.4e' % nrm_ssr_1) + logger.info('change in resdiual: %.4e' % delta_r) + + self.params = result.params + self.update_all_from_params(self.params) + + return result diff --git a/hexrd/powder/fitting/calibration/powder.py b/hexrd/powder/fitting/calibration/powder.py new file mode 100644 index 000000000..ed061be17 --- /dev/null +++ b/hexrd/powder/fitting/calibration/powder.py @@ -0,0 +1,391 @@ +import copy +from typing import Optional + +import numpy as np + +from hexrd.core import matrixutil as mutil +from hexrd.core.instrument import calc_angles_from_beam_vec, switch_xray_source +from hexrd.core.utils.hkl import hkl_to_str, str_to_hkl + +from ....core.fitting.calibration.calibrator import Calibrator +from ....core.fitting.calibration.lmfit_param_handling import ( + create_material_params, + update_material_from_params, +) + +nfields_powder_data = 8 + + +class PowderCalibrator(Calibrator): + type = 'powder' + + def __init__( + self, + instr, + material, + img_dict, + default_refinements=None, + tth_tol=None, + eta_tol=0.25, + fwhm_estimate=None, + min_pk_sep=1e-3, + min_ampl=0.0, + pktype='pvoigt', + bgtype='linear', + tth_distortion=None, + calibration_picks=None, + xray_source: Optional[str] = None, + ): + assert list(instr.detectors.keys()) == list( + img_dict.keys() + ), "instrument and image dict must have the same keys" + + self.instr = instr + self.material = material + self.img_dict = img_dict + self.default_refinements = default_refinements + self.xray_source = xray_source + + # for polar interpolation + if tth_tol is not None: + # This modifies the width on the plane data. Default to whatever + # is on the plane data, so only set it if it is not None. + self.tth_tol = tth_tol + + self.eta_tol = eta_tol + self.fwhm_estimate = fwhm_estimate + self.min_pk_sep = min_pk_sep + self.min_ampl = min_ampl + self.pktype = pktype + self.bgtype = bgtype + + self._tth_distortion = tth_distortion + self._update_tth_distortion_panels() + + self.plane_data.wavelength = instr.xrs_beam_energy(xray_source) + + self.param_names = [] + + self.data_dict = None + if calibration_picks is not None: + # container for calibration data + self.calibration_picks = calibration_picks + + @property + def tth_distortion(self): + return self._tth_distortion + + @tth_distortion.setter + def tth_distortion(self, v): + self._tth_distortion = v + self._update_tth_distortion_panels() + + def _update_tth_distortion_panels(self): + # Make sure the panels in the tth distortion are the same + # as those on the instrument, so their beam vectors get modified + # accordingly. + if self._tth_distortion is None: + return + + self._tth_distortion = copy.deepcopy(self._tth_distortion) + for det_key, obj in self._tth_distortion.items(): + obj.panel = self.instr.detectors[det_key] + + def create_lmfit_params(self, current_params): + # There shouldn't be more than one calibrator for a given material, so + # just assume we have a unique name... + params = create_material_params( + self.material, self.default_refinements + ) + + # If multiple powder calibrators were used for the same material (such + # as in 2XRS), then don't add params again. + param_names = [x[0] for x in current_params] + params = [x for x in params if x[0] not in param_names] + + self.param_names = [x[0] for x in params] + return params + + def update_from_lmfit_params(self, params_dict): + if self.param_names: + update_material_from_params(params_dict, self.material) + + @property + def plane_data(self): + return self.material.planeData + + @property + def tth_tol(self): + tth_tol = self.plane_data.tThWidth + return np.degrees(tth_tol) if tth_tol is not None else tth_tol + + @tth_tol.setter + def tth_tol(self, x): + assert np.isscalar(x), "tth_tol must be a scalar value" + self.plane_data.tThWidth = np.radians(self.tth_tol) + + @property + def spectrum_kwargs(self): + return dict( + pktype=self.pktype, + bgtype=self.bgtype, + fwhm_init=self.fwhm_estimate, + min_ampl=self.min_ampl, + min_pk_sep=self.min_pk_sep, + ) + + @property + def calibration_picks(self): + # Convert this from our internal data dict format + picks = {} + for det_key, data in self.data_dict.items(): + picks[det_key] = {} + for ringset in data: + for row in ringset: + # Rows 3, 4, and 5 are the hkl + hkl_str = hkl_to_str(row[3:6].astype(int)) + picks[det_key].setdefault(hkl_str, []) + # Rows 0 and 1 are the xy coordinates + picks[det_key][hkl_str].append(row[:2].tolist()) + + return picks + + @calibration_picks.setter + def calibration_picks(self, v): + # Convert this to our internal data dict format + data_dict = {} + for det_key, hkl_picks in v.items(): + data_dict[det_key] = [] + for hkl_str, picks in hkl_picks.items(): + if len(picks) == 0: + # Just skip over it + continue + + data = np.zeros((len(picks), 8), dtype=np.float64) + # Rows 0 and 1 are the xy coordinates + data[:, :2] = np.asarray(picks) + # Rows 3, 4, and 5 are the hkl + data[:, 3:6] = str_to_hkl(hkl_str) + data_dict[det_key].append(data) + + self.data_dict = data_dict + + def autopick_points(self, fit_tth_tol=5.0, int_cutoff=1e-4): + """ + return the RHS for the instrument DOF and image dict + + The format is a dict over detectors, each containing + + [index over ring sets] + [index over azimuthal patch] + [xy_meas, tth_meas, hkl, dsp_ref, eta_ref] + + FIXME: can not yet handle tth ranges with multiple peaks! + """ + # If needed, change the x-ray source before proceeding. + # This does nothing for single x-ray sources. + with switch_xray_source(self.instr, self.xray_source): + return self._autopick_points(fit_tth_tol, int_cutoff) + + def _autopick_points(self, fit_tth_tol=5.0, int_cutoff=1e-4): + # ideal tth + dsp_ideal = np.atleast_1d(self.plane_data.getPlaneSpacings()) + hkls_ref = self.plane_data.hkls.T + dsp0 = [] + hkls = [] + for idx in self.plane_data.getMergedRanges()[0]: + if len(idx) > 1: + eqv, uidx = mutil.findDuplicateVectors( + np.atleast_2d(dsp_ideal[idx]) + ) + if len(uidx) < len(idx): + # if here, at least one peak is degenerate + uidx = np.asarray(idx)[uidx] + else: + uidx = np.asarray(idx) + else: + uidx = np.asarray(idx) + dsp0.append(dsp_ideal[uidx]) + hkls.append(hkls_ref[uidx]) + + # Perform interpolation and fitting + fitting_kwargs = { + 'int_cutoff': int_cutoff, + 'fit_tth_tol': fit_tth_tol, + 'spectrum_kwargs': self.spectrum_kwargs, + } + kwargs = { + 'plane_data': self.plane_data, + 'imgser_dict': self.img_dict, + 'tth_tol': self.tth_tol, + 'eta_tol': self.eta_tol, + 'npdiv': 2, + 'collapse_eta': True, + 'collapse_tth': False, + 'do_interpolation': True, + 'do_fitting': True, + 'fitting_kwargs': fitting_kwargs, + 'tth_distortion': self.tth_distortion, + } + powder_lines = self.instr.extract_line_positions(**kwargs) + + # Now loop over the ringsets and convert to the calibration format + rhs = {} + for det_key, panel in self.instr.detectors.items(): + rhs[det_key] = [] + for i_ring, ringset in enumerate(powder_lines[det_key]): + this_dsp0 = dsp0[i_ring] + this_hkl = hkls[i_ring] + npeaks = len(this_dsp0) + + ret = [] + for angs, intensities, tth_meas in ringset: + if len(intensities) == 0: + continue + + # We only run this on one image. Grab that one. + tth_meas = tth_meas[0] + if tth_meas is None: + continue + + # Convert to radians + tth_meas = np.radians(tth_meas) + + # reference eta + eta_ref_tile = np.tile(angs[1], npeaks) + + # push back through mapping to cartesian (x, y) + xy_meas = panel.angles_to_cart( + np.vstack([tth_meas, eta_ref_tile]).T, + tvec_s=self.instr.tvec, + apply_distortion=True, + ) + + # cat results + output = np.hstack( + [ + xy_meas, + tth_meas.reshape(npeaks, 1), + this_hkl, + this_dsp0.reshape(npeaks, 1), + eta_ref_tile.reshape(npeaks, 1), + ] + ) + ret.append(output) + + if not ret: + ret.append(np.empty((0, nfields_powder_data))) + + rhs[det_key].append(np.vstack(ret)) + + self.data_dict = rhs + return rhs + + def _evaluate(self, output='residual'): + """ + Evaluate the powder diffraction model. + + Parameters + ---------- + output : TYPE, optional + DESCRIPTION. The default is 'residual'. + + Raises + ------ + RuntimeError + DESCRIPTION. + + Returns + ------- + TYPE + DESCRIPTION. + + """ + # In case the beam energy was modified, ensure it is updated + # on the plane data as well. + self.plane_data.wavelength = self.instr.beam_energy + + # need this for dsp + bmat = self.plane_data.latVecOps['B'] + wlen = self.instr.beam_wavelength + + # build residual + retval = np.array([], dtype=float) + for det_key, panel in self.instr.detectors.items(): + if len(self.data_dict[det_key]) == 0: + continue + else: + # recast as array + pdata = np.vstack(self.data_dict[det_key]) + + """ + Here is the strategy: + 1. remap the feature points from raw cartesian to + (tth, eta) under the current mapping + 2. use the lattice and hkls to calculate the ideal tth0 + 3. push the (tth0, eta) values back through the mapping to + raw cartesian coordinates + 4. build residual on the measured and recalculated (x, y) + """ + # push measured (x, y) ring points through current mapping + # to (tth, eta) + meas_xy = pdata[:, :2] + updated_angles, _ = panel.cart_to_angles( + meas_xy, tvec_s=self.instr.tvec, apply_distortion=True + ) + + # derive ideal tth positions from additional ring point info + hkls = pdata[:, 3:6] + gvecs = np.dot(hkls, bmat.T) + dsp0 = 1.0 / np.sqrt(np.sum(gvecs * gvecs, axis=1)) + + # updated reference Bragg angles + tth0 = 2.0 * np.arcsin(0.5 * wlen / dsp0) + + # !!! get eta from mapped markers rather than ref + # eta0 = pdata[:, -1] + eta0 = updated_angles[:, 1] + + # apply tth distortion + if self.tth_distortion is not None: + # !!! sd has ref to detector so is updated + sd = self.tth_distortion[det_key] + tmp = sd.apply(meas_xy, return_nominal=False) + corr_angs = tmp + np.vstack([tth0, np.zeros_like(tth0)]).T + tth0, eta0 = corr_angs.T + + # map updated (tth0, eta0) back to cartesian coordinates + tth_eta = np.vstack([tth0, eta0]).T + + # output + if output == 'residual': + # retval = np.append( + # retval, + # meas_xy.flatten() - calc_xy.flatten() + # ) + retval = np.append( + retval, updated_angles[:, 0].flatten() - tth0.flatten() + ) + elif output == 'model': + calc_xy = panel.angles_to_cart( + tth_eta, tvec_s=self.instr.tvec, apply_distortion=True + ) + retval = np.append(retval, calc_xy.flatten()) + else: + raise RuntimeError( + "unrecognized output flag '%s'" % output + ) + + return retval + + def residual(self): + # If needed, change the x-ray source before proceeding. + # This does nothing for single x-ray sources. + with switch_xray_source(self.instr, self.xray_source): + return self._evaluate(output='residual') + + def model(self): + # If needed, change the x-ray source before proceeding. + # This does nothing for single x-ray sources. + with switch_xray_source(self.instr, self.xray_source): + return self._evaluate(output='model') diff --git a/hexrd/powder/fitting/calibration/structureless.py b/hexrd/powder/fitting/calibration/structureless.py new file mode 100644 index 000000000..a64ae70a9 --- /dev/null +++ b/hexrd/powder/fitting/calibration/structureless.py @@ -0,0 +1,295 @@ +import copy +from typing import Optional + +import lmfit +import numpy as np + +from hexrd.core.instrument import switch_xray_source + +from ....core.fitting.calibration.lmfit_param_handling import ( + add_engineering_constraints, + create_instr_params, + create_tth_parameters, + DEFAULT_EULER_CONVENTION, + tth_parameter_prefixes, + update_instrument_from_params, +) +from ....core.fitting.calibration.relative_constraints import ( + create_relative_constraints, + RelativeConstraints, + RelativeConstraintsType, +) + + +class StructurelessCalibrator: + """ + this class implements the equivalent of the + powder calibrator but without constraining + the optimization to a structure. in this + implementation, the location of the constant + two theta line that a set of points lie on + is also an optimization parameter. + + unlike the previous implementations, this routine + is based on the lmfit module to implement the + more complicated constraints for the TARDIS box + + if TARDIS_constraints are set to True, then the following + additional linear constraint is added to the calibration + + 22.83 mm <= |IMAGE-PLATE-2 tvec[1]| + |IMAGE-PLATE-2 tvec[1]| <= 23.43 mm + + """ + + def __init__( + self, + instr, + data, + tth_distortion=None, + engineering_constraints=None, + relative_constraints_type=RelativeConstraintsType.none, + euler_convention=DEFAULT_EULER_CONVENTION, + ): + + self._instr = instr + self._data = data + self._tth_distortion = tth_distortion + self._engineering_constraints = engineering_constraints + self._relative_constraints = create_relative_constraints( + relative_constraints_type, self.instr + ) + self.euler_convention = euler_convention + self._update_tth_distortion_panels() + self.make_lmfit_params() + self.set_minimizer() + + def make_lmfit_params(self): + params = [] + params += create_instr_params( + self.instr, + self.euler_convention, + self.relative_constraints, + ) + params += create_tth_parameters(self.instr, self.meas_angles) + + params_dict = lmfit.Parameters() + params_dict.add_many(*params) + + add_engineering_constraints(params_dict, self.engineering_constraints) + self.params = params_dict + return params_dict + + def calc_residual(self, params): + update_instrument_from_params( + self.instr, + params, + self.euler_convention, + self.relative_constraints, + ) + + # Store these in variables so they are only computed once. + meas_angles = self.meas_angles + tth_correction = self.tth_correction + + residual = [] + prefixes = tth_parameter_prefixes(self.instr) + for xray_source in self.data: + prefix = prefixes[xray_source] + for ii, (rng, corr_rng) in enumerate( + zip(meas_angles[xray_source], tth_correction[xray_source]) + ): + for det_name, panel in self.instr.detectors.items(): + if rng[det_name] is None or rng[det_name].size == 0: + continue + + tth_rng = params[f'{prefix}{ii}'].value + tth_updated = np.degrees(rng[det_name][:, 0]) + delta_tth = tth_updated - tth_rng + if corr_rng[det_name] is not None: + delta_tth -= np.degrees(corr_rng[det_name]) + residual.append(delta_tth) + + return np.hstack(residual) + + def set_minimizer(self): + self.fitter = lmfit.Minimizer( + self.calc_residual, self.params, nan_policy='omit' + ) + + def run_calibration(self, method='least_squares', odict=None): + """ + odict is the options dictionary + """ + if odict is None: + odict = {} + + if method == 'least_squares': + fdict = { + "ftol": 1e-8, + "xtol": 1e-8, + "gtol": 1e-8, + "verbose": 2, + "max_nfev": 1000, + "x_scale": "jac", + "method": "trf", + "jac": "3-point", + } + fdict.update(odict) + + self.res = self.fitter.least_squares(self.params, **fdict) + else: + fdict = odict + self.res = self.fitter.scalar_minimize( + method=method, params=self.params, max_nfev=50000, **fdict + ) + + self.params = self.res.params + # res = self.fitter.least_squares(**fdict) + return self.res + + @property + def tth_distortion(self): + return self._tth_distortion + + @tth_distortion.setter + def tth_distortion(self, v): + self._tth_distortion = v + self._update_tth_distortion_panels() + # No need to update lmfit parameters + + def _update_tth_distortion_panels(self): + # Make sure the panels in the tth distortion are the same + # as those on the instrument, so their beam vectors get modified + # accordingly. + if self._tth_distortion is None: + return + + self._tth_distortion = copy.deepcopy(self._tth_distortion) + for det_key, obj in self.tth_distortion.items(): + obj.panel = self.instr.detectors[det_key] + + @property + def relative_constraints_type(self): + return self._relative_constraints.type + + @relative_constraints_type.setter + def relative_constraints_type(self, v: Optional[RelativeConstraintsType]): + v = v if v is not None else RelativeConstraintsType.none + + current = getattr(self, '_relative_constraints', None) + if current is None or current.type != v: + self.relative_constraints = create_relative_constraints( + v, self.instr + ) + + @property + def relative_constraints(self) -> RelativeConstraints: + return self._relative_constraints + + @relative_constraints.setter + def relative_constraints(self, v: RelativeConstraints): + self._relative_constraints = v + self.params = self.make_lmfit_params() + + @property + def engineering_constraints(self): + return self._engineering_constraints + + @engineering_constraints.setter + def engineering_constraints(self, v): + if v == self._engineering_constraints: + return + + valid_settings = [ + None, + 'None', + 'TARDIS', + ] + if v not in valid_settings: + valid_str = ', '.join(map(valid_settings, str)) + msg = ( + f'Invalid engineering constraint "{v}". Valid constraints ' + f'are: "{valid_str}"' + ) + raise Exception(msg) + + self._engineering_constraints = v + self.make_lmfit_params() + + @property + def instr(self): + return self._instr + + @instr.setter + def instr(self, ins): + self._instr = ins + self.make_lmfit_params() + self._update_tth_distortion_panels() + + @property + def data(self): + return self._data + + @data.setter + def data(self, dat): + self._data = dat + self.make_lmfit_params() + + @property + def residual(self): + return self.calc_residual(self.params) + + @property + def meas_angles(self) -> dict: + """ + this property will return a dictionary + of angles based on current instrument + parameters. + """ + angles_dict = {} + for xray_source, rings in self.data.items(): + with switch_xray_source(self.instr, xray_source): + ang_list = [] + for rng in rings: + ang_dict = dict.fromkeys(self.instr.detectors) + for det_name, meas_xy in rng.items(): + + panel = self.instr.detectors[det_name] + angles, _ = panel.cart_to_angles( + meas_xy, + tvec_s=self.instr.tvec, + apply_distortion=True, + ) + ang_dict[det_name] = angles + ang_list.append(ang_dict) + + angles_dict[xray_source] = ang_list + + return angles_dict + + @property + def tth_correction(self) -> dict: + ret = {} + for xray_source, rings in self.data.items(): + with switch_xray_source(self.instr, xray_source): + corr_list = [] + for rng in rings: + corr_dict = dict.fromkeys(self.instr.detectors) + if self.tth_distortion is not None: + for det_name, meas_xy in rng.items(): + # !!! sd has ref to detector so is updated + sd = self.tth_distortion[det_name] + tth_corr = sd.apply( + meas_xy, + return_nominal=False, + )[:, 0] + corr_dict[det_name] = tth_corr + corr_list.append(corr_dict) + + ret[xray_source] = corr_list + + return ret + + @property + def two_XRS(self): + return self.instr.has_multi_beam diff --git a/hexrd/powder/instrument/__init__.py b/hexrd/powder/instrument/__init__.py new file mode 100644 index 000000000..396a0d078 --- /dev/null +++ b/hexrd/powder/instrument/__init__.py @@ -0,0 +1,13 @@ +from .hedm_instrument import ( + calc_angles_from_beam_vec, + calc_beam_vec, + centers_of_edge_vec, + GenerateEtaOmeMaps, + GrainDataWriter, + HEDMInstrument, + max_tth, + switch_xray_source, + unwrap_dict_to_h5, + unwrap_h5_to_dict, +) +from hexrd.core.instrument.detector import Detector diff --git a/hexrd/wppf/LeBailCalibration.py b/hexrd/powder/wppf/LeBailCalibration.py similarity index 65% rename from hexrd/wppf/LeBailCalibration.py rename to hexrd/powder/wppf/LeBailCalibration.py index 428a71122..1a1be7e5b 100644 --- a/hexrd/wppf/LeBailCalibration.py +++ b/hexrd/powder/wppf/LeBailCalibration.py @@ -3,27 +3,28 @@ from numpy.polynomial.chebyshev import Chebyshev import lmfit import warnings -from hexrd.wppf.peakfunctions import \ +from hexrd.powder.wppf.peakfunctions import \ calc_rwp, computespectrum_pvfcj, \ computespectrum_pvtch,\ computespectrum_pvpink,\ calc_Iobs_pvfcj,\ calc_Iobs_pvtch,\ calc_Iobs_pvpink -from hexrd.wppf.spectrum import Spectrum -from hexrd.wppf import wppfsupport, LeBail -from hexrd.wppf.phase import Phases_LeBail, Material_LeBail -from hexrd.imageutil import snip1d, snip1d_quad -from hexrd.material import Material -from hexrd.valunits import valWUnit -from hexrd.constants import keVToAngstrom - -from hexrd import instrument -from hexrd import imageseries -from hexrd.imageseries import omega -from hexrd.projections.polar import PolarView +from hexrd.powder.wppf.spectrum import Spectrum +from hexrd.powder.wppf import wppfsupport, LeBail +from hexrd.powder.wppf.phase import Phases_LeBail, Material_LeBail +from hexrd.core.imageutil import snip1d, snip1d_quad +from hexrd.core.material import Material +from hexrd.core.valunits import valWUnit +from hexrd.core.constants import keVToAngstrom + +from hexrd.core import instrument +from hexrd.core import imageseries +from hexrd.core.imageseries import omega +from hexrd.core.projections.polar import PolarView import time + class LeBailCalibrator: """ ====================================================================== @@ -47,20 +48,23 @@ class LeBailCalibrator: ====================================================================== ====================================================================== """ - def __init__(self, - instrument, - img_dict, - extent=(0.,90.,0.,360.), - pixel_size=(0.1, 1.0), - params=None, - phases=None, - azimuthal_step=5.0, - bkgmethod={'chebyshev': 3}, - peakshape="pvtch", - intensity_init=None, - apply_solid_angle_correction=False, - apply_lp_correction=False, - polarization=None): + + def __init__( + self, + instrument, + img_dict, + extent=(0.0, 90.0, 0.0, 360.0), + pixel_size=(0.1, 1.0), + params=None, + phases=None, + azimuthal_step=5.0, + bkgmethod={'chebyshev': 3}, + peakshape="pvtch", + intensity_init=None, + apply_solid_angle_correction=False, + apply_lp_correction=False, + polarization=None, + ): self.bkgmethod = bkgmethod self.peakshape = peakshape @@ -115,8 +119,7 @@ def calctth(self): dsp = self.phases[p].dsp[allowed] tth_min = self.tth_min tth_max = self.tth_max - limit = np.logical_and(t >= tth_min, - t <= tth_max) + limit = np.logical_and(t >= tth_min, t <= tth_max) self.tth[p][k] = t[limit] self.hkls[p][k] = hkl[limit, :] self.dsp[p][k] = dsp[limit] @@ -134,8 +137,8 @@ def initialize_Icalc(self): Icalc = {} g = {} prefix = f"azpos{ii}" - lo = self.lineouts[prefix].data[:,1] - if(self.intensity_init is None): + lo = self.lineouts[prefix].data[:, 1] + if self.intensity_init is None: if np.nanmax(lo) > 0: n10 = np.floor(np.log10(np.nanmax(lo))) - 2 else: @@ -144,7 +147,7 @@ def initialize_Icalc(self): for p in self.phases: Icalc[p] = {} for k, l in self.phases.wavelength.items(): - Icalc[p][k] = (10**n10)*np.ones(self.tth[p][k].shape) + Icalc[p][k] = (10**n10) * np.ones(self.tth[p][k].shape) self.Icalc[prefix] = Icalc @@ -152,41 +155,39 @@ def initialize_Icalc(self): self.refine_instrument = False def prepare_polarview(self): - self.masked = self.pv.warp_image(self.img_dict, \ - pad_with_nans=True, \ - do_interpolation=True) + self.masked = self.pv.warp_image( + self.img_dict, pad_with_nans=True, do_interpolation=True + ) lo = self.masked.sum(axis=0) / np.sum(~self.masked.mask, axis=0) - self.fulllineout = np.vstack((self.tth_list,lo)).T + self.fulllineout = np.vstack((self.tth_list, lo)).T self.prepare_lineouts() def prepare_lineouts(self): self.lineouts = {} if hasattr(self, 'masked'): - azch = self.azimuthal_chunks - tth = self.tth_list - for ii in range(azch.shape[0]-1): + azch = self.azimuthal_chunks + tth = self.tth_list + for ii in range(azch.shape[0] - 1): istr = azch[ii] - istp = azch[ii+1] - lo = self.masked[istr:istp,:].sum(axis=0) / \ - np.sum(~self.masked[istr:istp,:].mask, axis=0) - data = np.ma.vstack((tth,lo)).T + istp = azch[ii + 1] + lo = self.masked[istr:istp, :].sum(axis=0) / np.sum( + ~self.masked[istr:istp, :].mask, axis=0 + ) + data = np.ma.vstack((tth, lo)).T key = f"azpos{ii}" self.lineouts[key] = data - - def computespectrum(self, - instr_updated, - lp_updated): + def computespectrum(self, instr_updated, lp_updated): """ this function calls the computespectrum function in the lebaillight class for all the azimuthal positions and accumulates the error vector from each of those lineouts. this is more or less a book keeping function rather """ - errvec = np.empty([0,]) + errvec = np.empty([0]) rwp = [] - for k,v in self.lineouts_sim.items(): + for k, v in self.lineouts_sim.items(): v.params = self.params if instr_updated: v.lineout = self.lineouts[k] @@ -199,17 +200,16 @@ def computespectrum(self, v.computespectrum() ww = v.weights - evec = ww*(v.spectrum_expt._y - - v.spectrum_sim._y)**2 + evec = ww * (v.spectrum_expt._y - v.spectrum_sim._y) ** 2 evec = np.sqrt(evec) evec = np.nan_to_num(evec) - errvec = np.concatenate((errvec,evec)) + errvec = np.concatenate((errvec, evec)) - weighted_expt = np.nan_to_num(ww*v.spectrum_expt._y**2) + weighted_expt = np.nan_to_num(ww * v.spectrum_expt._y**2) wss = np.trapz(evec, v.tth_list) den = np.trapz(weighted_expt, v.tth_list) - r = np.sqrt(wss/den)*100. + r = np.sqrt(wss / den) * 100.0 if ~np.isnan(r): rwp.append(r) @@ -228,15 +228,16 @@ def calcrwp(self, params): lp_updated = self.update_param_vals(params) self.update_shkl(params) instr_updated = self.update_instrument(params) - errvec, rwp = self.computespectrum(instr_updated, - lp_updated) + errvec, rwp = self.computespectrum(instr_updated, lp_updated) self.Rwp = np.mean(rwp) self.nfev += 1 self.Rwplist = np.append(self.Rwplist, self.Rwp) if np.mod(self.nfev, 10) == 0: - msg = (f"refinement ongoing... \n weighted residual at " - f"iteration # {self.nfev} = {self.Rwp}\n") + msg = ( + f"refinement ongoing... \n weighted residual at " + f"iteration # {self.nfev} = {self.Rwp}\n" + ) print(msg) return errvec @@ -248,8 +249,10 @@ def initialize_lmfit_parameters(self): for p in self.params: par = self.params[p] - if(par.vary): - params.add(p, value=par.value, min=par.min, max=par.max, vary=True) + if par.vary: + params.add( + p, value=par.value, min=par.min, max=par.max, vary=True + ) return params def Refine(self): @@ -271,16 +274,19 @@ def Refine(self): self.res = res if self.res.success: - msg = (f"\n \n optimization successful: {self.res.message}. \n" - f"weighted residual error = {self.Rwp}") + msg = ( + f"\n \n optimization successful: {self.res.message}. \n" + f"weighted residual error = {self.Rwp}" + ) else: - msg = (f"\n \n optimization unsuccessful: {self.res.message}. \n" - f"weighted residual error = {self.Rwp}") + msg = ( + f"\n \n optimization unsuccessful: {self.res.message}. \n" + f"weighted residual error = {self.Rwp}" + ) print(msg) - def update_param_vals(self, - params): + def update_param_vals(self, params): """ @date 03/12/2021 SS 1.0 original take values in parameters and set the @@ -313,7 +319,7 @@ def update_param_vals(self, elif nn in self.params: lp.append(self.params[nn].value) - if(not lpvary): + if not lpvary: pass else: lp = self.phases[p].Required_lp(lp) @@ -339,18 +345,17 @@ def update_shkl(self, params): eq_const = self.phases[p].eq_constraints mname = self.phases[p].name key = [f"{mname}_{s}" for s in shkl_name] - for s,k in zip(shkl_name,key): + for s, k in zip(shkl_name, key): if k in params: shkl_dict[s] = params[k].value else: shkl_dict[s] = self.params[k].value - self.phases[p].shkl = wppfsupport._fill_shkl(\ - shkl_dict, eq_const) + self.phases[p].shkl = wppfsupport._fill_shkl(shkl_dict, eq_const) def update_instrument(self, params): instr_updated = False - for key,det in self._instrument.detectors.items(): + for key, det in self._instrument.detectors.items(): for ii in range(3): pname = f"{key}_tvec{ii}" if pname in params: @@ -372,7 +377,6 @@ def bkgdegree(self): if "chebyshev" in self.bkgmethod.keys(): return self.bkgmethod["chebyshev"] - @property def instrument(self): return self._instrument @@ -381,11 +385,13 @@ def instrument(self): def instrument(self, ins): if isinstance(ins, instrument.HEDMInstrument): self._instrument = ins - self.pv = PolarView(self.extent[0:2], - ins, - eta_min=self.extent[2], - eta_max=self.extent[3], - pixel_size=self.pixel_size) + self.pv = PolarView( + self.extent[0:2], + ins, + eta_min=self.extent[2], + eta_max=self.extent[3], + pixel_size=self.pixel_size, + ) self.prepare_polarview() else: @@ -418,17 +424,20 @@ def extent(self, ext): if hasattr(self, "instrument"): if hasattr(self, "pixel_size"): - self.pv = PolarView(ext[0:2], - self.instrument, - eta_min=ext[2], - eta_max=ext[3], - pixel_size=self.pixel_size) + self.pv = PolarView( + ext[0:2], + self.instrument, + eta_min=ext[2], + eta_max=ext[3], + pixel_size=self.pixel_size, + ) self.prepare_polarview() """ this property returns a azimuthal range over which the summation is performed to get the lineouts """ + @property def azimuthal_chunks(self): extent = self.extent @@ -436,21 +445,20 @@ def azimuthal_chunks(self): azlim = extent[2:] pxsz = self.pixel_size[1] shp = self.masked.shape[0] - npix = int(np.round(step/pxsz)) - return np.r_[np.arange(0,shp,npix),shp] + npix = int(np.round(step / pxsz)) + return np.r_[np.arange(0, shp, npix), shp] @property def tth_list(self): - return np.squeeze(np.degrees(self.pv.angular_grid[1][0,:])) + return np.squeeze(np.degrees(self.pv.angular_grid[1][0, :])) @property def wavelength(self): lam = keVToAngstrom(self.instrument.beam_energy) - return {"lam1": - [valWUnit('lp', 'length', lam, 'angstrom'),1.0]} + return {"lam1": [valWUnit('lp', 'length', lam, 'angstrom'), 1.0]} def striphkl(self, g): - return str(g)[1:-1].replace(" ","") + return str(g)[1:-1].replace(" ", "") @property def refine_background(self): @@ -463,7 +471,10 @@ def refine_background(self, val): self._refine_background = val prefix = "azpos" for ii in range(len(self.lineouts)): - pname = [f"{prefix}{ii}_bkg_C{jj}" for jj in range(self.bkgdegree)] + pname = [ + f"{prefix}{ii}_bkg_C{jj}" + for jj in range(self.bkgdegree) + ] for p in pname: self.params[p].vary = val else: @@ -484,7 +495,7 @@ def refine_instrument(self, val): for key in self.instrument.detectors: pnametvec = [f"{key}_tvec{i}" for i in range(3)] pnametilt = [f"{key}_tilt{i}" for i in range(3)] - for ptv,pti in zip(pnametvec,pnametilt): + for ptv, pti in zip(pnametvec, pnametilt): self.params[ptv].vary = val self.params[pti].vary = val else: @@ -533,11 +544,13 @@ def pixel_size(self, px_sz): if hasattr(self, "instrument"): if hasattr(self, "extent"): - self.pv = PolarView(self.extent[0:2], - ins, - eta_min=self.extent[2], - eta_max=self.extent[3], - pixel_size=px_sz) + self.pv = PolarView( + self.extent[0:2], + ins, + eta_min=self.extent[2], + eta_max=self.extent[3], + pixel_size=px_sz, + ) self.prepare_polarview() @property @@ -568,7 +581,6 @@ def lpcorrection(self, val): msg = "only boolean values accepted" raise ValueError(msg) - @property def img_dict(self): @@ -582,8 +594,7 @@ def img_dict(self): if self.lpcorrection: hpol, vpol = self.polarization for dname, det in self.instrument.detectors.items(): - lp = det.polarization_factor(hpol, vpol) *\ - det.lorentz_factor() + lp = det.polarization_factor(hpol, vpol) * det.lorentz_factor() imd[dname] = imd[dname] / lp return imd @@ -616,11 +627,11 @@ def azimuthal_step(self, val): @property def tth_min(self): - return self.extent[0]+self.pixel_size[0]*0.5 + return self.extent[0] + self.pixel_size[0] * 0.5 @property def tth_max(self): - return self.extent[1]-+self.pixel_size[0]*0.5 + return self.extent[1] - +self.pixel_size[0] * 0.5 @property def peakshape(self): @@ -640,21 +651,25 @@ def peakshape(self, val): elif val == "pvpink": self._peakshape = 2 else: - msg = (f"invalid peak shape string. " + msg = ( + f"invalid peak shape string. " f"must be: \n" f"1. pvfcj: pseudo voight (Finger, Cox, Jephcoat)\n" f"2. pvtch: pseudo voight (Thompson, Cox, Hastings)\n" - f"3. pvpink: Pink beam (Von Dreele)") + f"3. pvpink: Pink beam (Von Dreele)" + ) raise ValueError(msg) elif isinstance(val, int): - if val >=0 and val <=2: + if val >= 0 and val <= 2: self._peakshape = val else: - msg = (f"invalid peak shape int. " + msg = ( + f"invalid peak shape int. " f"must be: \n" f"1. 0: pseudo voight (Finger, Cox, Jephcoat)\n" f"2. 1: pseudo voight (Thompson, Cox, Hastings)\n" - f"3. 2: Pink beam (Von Dreele)") + f"3. 2: Pink beam (Von Dreele)" + ) raise ValueError(msg) """ @@ -662,13 +677,13 @@ def peakshape(self, val): """ if hasattr(self, 'params'): params = wppfsupport._generate_default_parameters_Rietveld( - self.phases, self.peakshape) + self.phases, self.peakshape + ) for p in params: if p in self.params: params[p] = self.params[p] self._params = params - @property def phases(self): return self._phases @@ -684,8 +699,8 @@ def phases(self, phase_info): >> @DETAILS: load the phases for the LeBail fits """ - if(phase_info is not None): - if(isinstance(phase_info, Phases_LeBail)): + if phase_info is not None: + if isinstance(phase_info, Phases_LeBail): """ directly passing the phase class """ @@ -693,62 +708,62 @@ def phases(self, phase_info): else: - if(hasattr(self, 'wavelength')): - if(self.wavelength is not None): + if hasattr(self, 'wavelength'): + if self.wavelength is not None: p = Phases_LeBail(wavelength=self.wavelength) else: p = Phases_LeBail() - if(isinstance(phase_info, dict)): + if isinstance(phase_info, dict): """ initialize class using a dictionary with key as material file and values as the name of each phase """ for material_file in phase_info: material_names = phase_info[material_file] - if(not isinstance(material_names, list)): + if not isinstance(material_names, list): material_names = [material_names] p.add_many(material_file, material_names) - elif(isinstance(phase_info, str)): + elif isinstance(phase_info, str): """ load from a yaml file """ - if(path.exists(phase_info)): + if path.exists(phase_info): p.load(phase_info) else: raise FileError('phase file doesn\'t exist.') - elif(isinstance(phase_info, Material)): + elif isinstance(phase_info, Material): p[phase_info.name] = Material_LeBail( fhdf=None, xtal=None, dmin=None, - material_obj=phase_info) + material_obj=phase_info, + ) - elif(isinstance(phase_info, list)): + elif isinstance(phase_info, list): for mat in phase_info: p[mat.name] = Material_LeBail( - fhdf=None, - xtal=None, - dmin=None, - material_obj=mat) + fhdf=None, xtal=None, dmin=None, material_obj=mat + ) p.num_phases += 1 for mat in p: - p[mat].pf = 1.0/p.num_phases + p[mat].pf = 1.0 / p.num_phases self._phases = p self.calctth() for p in self.phases: - self.phases[p].valid_shkl, \ - self.phases[p].eq_constraints, \ - self.phases[p].rqd_index, \ - self.phases[p].trig_ptype = \ - wppfsupport._required_shkl_names(self.phases[p]) + ( + self.phases[p].valid_shkl, + self.phases[p].eq_constraints, + self.phases[p].rqd_index, + self.phases[p].trig_ptype, + ) = wppfsupport._required_shkl_names(self.phases[p]) @property def params(self): @@ -765,11 +780,12 @@ def params(self, param_info): to some default values (lattice constants are for CeO2) """ from scipy.special import roots_legendre + xn, wn = roots_legendre(16) self.xn = xn[8:] self.wn = wn[8:] - if(param_info is not None): + if param_info is not None: pl = wppfsupport._generate_default_parameters_LeBail( self.phases, self.peakshape) self.lebail_param_list = [p for p in pl] @@ -782,21 +798,25 @@ def params(self, param_info): else: params = lmfit.Parameters() - if(isinstance(param_info, dict)): + if isinstance(param_info, dict): """ initialize class using dictionary read from the yaml file """ for k in param_info: v = param_info[k] - params.add(k, value=float(v[0]), - min=float(v[1]), max=float(v[2]), - vary=bool(v[3])) - - elif(isinstance(param_info, str)): + params.add( + k, + value=float(v[0]), + min=float(v[1]), + max=float(v[2]), + vary=bool(v[3]), + ) + + elif isinstance(param_info, str): """ load from a yaml file """ - if(path.exists(param_info)): + if path.exists(param_info): params.load(param_info) else: raise FileError('input spectrum file doesn\'t exist.') @@ -805,8 +825,7 @@ def params(self, param_info): this part initializes the lattice parameters in the """ for p in self.phases: - wppfsupport._add_lp_to_params( - params, self.phases[p]) + wppfsupport._add_lp_to_params(params, self.phases[p]) self._params = params else: @@ -816,7 +835,9 @@ def params(self, param_info): self.lebail_param_list = [p for p in params] wppfsupport._add_detector_geometry(params, self.instrument) if "chebyshev" in self.bkgmethod.keys(): - wppfsupport._add_background(params, self.lineouts, self.bkgdegree) + wppfsupport._add_background( + params, self.lineouts, self.bkgdegree + ) self._params = params @property @@ -826,21 +847,23 @@ def shkl(self): shkl[p] = self.phases[p].shkl return shkl - def calc_simulated(self): self.lineouts_sim = {} for key, lo in self.lineouts.items(): - self.lineouts_sim[key] = LeBaillight(key, - lo, - self.Icalc[key], - self.tth, - self.hkls, - self.dsp, - self.shkl, - self.lebail_param_list, - self.params, - self.peakshape, - self.bkgmethod) + self.lineouts_sim[key] = LeBaillight( + key, + lo, + self.Icalc[key], + self.tth, + self.hkls, + self.dsp, + self.shkl, + self.lebail_param_list, + self.params, + self.peakshape, + self.bkgmethod, + ) + class LeBaillight: """ @@ -848,18 +871,21 @@ class LeBaillight: simple computation of diffraction spectrum given the parameters and intensity values """ - def __init__(self, - name, - lineout, - Icalc, - tth, - hkls, - dsp, - shkl, - lebail_param_list, - params, - peakshape, - bkgmethod): + + def __init__( + self, + name, + lineout, + Icalc, + tth, + hkls, + dsp, + shkl, + lebail_param_list, + params, + peakshape, + bkgmethod, + ): self.name = name self.lebail_param_list = lebail_param_list @@ -887,12 +913,20 @@ def computespectrum(self): Ic = self.Icalc[p][k] - shft_c = np.cos(0.5*np.radians(self.tth[p][k]))*self.params["shft"].value - trns_c = np.sin(np.radians(self.tth[p][k]))*self.params["trns"].value - tth = self.tth[p][k] + \ - self.params["zero_error"].value + \ - shft_c + \ - trns_c + shft_c = ( + np.cos(0.5 * np.radians(self.tth[p][k])) + * self.params["shft"].value + ) + trns_c = ( + np.sin(np.radians(self.tth[p][k])) + * self.params["trns"].value + ) + tth = ( + self.tth[p][k] + + self.params["zero_error"].value + + shft_c + + trns_c + ) dsp = self.dsp[p][k] hkls = self.hkls[p][k] @@ -901,74 +935,97 @@ def computespectrum(self): name = p eta_n = f"{name}_eta_fwhm" eta_fwhm = self.params[eta_n].value - strain_direction_dot_product = 0. + strain_direction_dot_product = 0.0 is_in_sublattice = False - cag = np.array([self.params["U"].value, - self.params["V"].value, - self.params["W"].value]) + cag = np.array( + [ + self.params["U"].value, + self.params["V"].value, + self.params["W"].value, + ] + ) gaussschrerr = self.params["P"].value - lorbroad = np.array([self.params["X"].value, - self.params["Y"].value]) - anisbroad = np.array([self.params["Xe"].value, - self.params["Ye"].value, - self.params["Xs"].value]) + lorbroad = np.array( + [self.params["X"].value, self.params["Y"].value] + ) + anisbroad = np.array( + [ + self.params["Xe"].value, + self.params["Ye"].value, + self.params["Xs"].value, + ] + ) if self.peakshape == 0: HL = self.params["HL"].value SL = self.params["SL"].value - args = (cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - HL, - SL, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic, - self.xn, - self.wn) + args = ( + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + HL, + SL, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + self.xn, + self.wn, + ) elif self.peakshape == 1: - args = (cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic) + args = ( + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + ) elif self.peakshape == 2: - alpha = np.array([self.params["alpha0"].value, - self.params["alpha1"].value]) - beta = np.array([self.params["beta0"].value, - self.params["beta1"].value]) - args = (alpha, - beta, - cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic) + alpha = np.array( + [ + self.params["alpha0"].value, + self.params["alpha1"].value, + ] + ) + beta = np.array( + [ + self.params["beta0"].value, + self.params["beta1"].value, + ] + ) + args = ( + alpha, + beta, + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + ) y += self.computespectrum_fcn(*args) @@ -984,7 +1041,7 @@ def CalcIobs(self): self.Iobs = {} spec_expt = self.spectrum_expt.data_array - spec_sim = self.spectrum_sim.data_array + spec_sim = self.spectrum_sim.data_array tth_list = np.ascontiguousarray(self.tth_list) for p in self.tth: @@ -993,12 +1050,20 @@ def CalcIobs(self): Ic = self.Icalc[p][k] - shft_c = np.cos(0.5*np.radians(self.tth[p][k]))*self.params["shft"].value - trns_c = np.sin(np.radians(self.tth[p][k]))*self.params["trns"].value - tth = self.tth[p][k] + \ - self.params["zero_error"].value + \ - shft_c + \ - trns_c + shft_c = ( + np.cos(0.5 * np.radians(self.tth[p][k])) + * self.params["shft"].value + ) + trns_c = ( + np.sin(np.radians(self.tth[p][k])) + * self.params["trns"].value + ) + tth = ( + self.tth[p][k] + + self.params["zero_error"].value + + shft_c + + trns_c + ) dsp = self.dsp[p][k] hkls = self.hkls[p][k] @@ -1007,81 +1072,104 @@ def CalcIobs(self): name = p eta_n = f"{name}_eta_fwhm" eta_fwhm = self.params[eta_n].value - strain_direction_dot_product = 0. + strain_direction_dot_product = 0.0 is_in_sublattice = False - cag = np.array([self.params["U"].value, - self.params["V"].value, - self.params["W"].value]) + cag = np.array( + [ + self.params["U"].value, + self.params["V"].value, + self.params["W"].value, + ] + ) gaussschrerr = self.params["P"].value - lorbroad = np.array([self.params["X"].value, - self.params["Y"].value]) - anisbroad = np.array([self.params["Xe"].value, - self.params["Ye"].value, - self.params["Xs"].value]) + lorbroad = np.array( + [self.params["X"].value, self.params["Y"].value] + ) + anisbroad = np.array( + [ + self.params["Xe"].value, + self.params["Ye"].value, + self.params["Xs"].value, + ] + ) if self.peakshape == 0: HL = self.params["HL"].value SL = self.params["SL"].value - args = (cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - HL, - SL, - self.xn, - self.wn, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic, - spec_expt, - spec_sim) + args = ( + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + HL, + SL, + self.xn, + self.wn, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + spec_expt, + spec_sim, + ) elif self.peakshape == 1: - args = (cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic, - spec_expt, - spec_sim) + args = ( + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + spec_expt, + spec_sim, + ) elif self.peakshape == 2: - alpha = np.array([self.params["alpha0"].value, - self.params["alpha1"].value]) - beta = np.array([self.params["beta0"].value, - self.params["beta1"].value]) - args = (alpha, - beta, - cag, - gaussschrerr, - lorbroad, - anisbroad, - shkl, - eta_fwhm, - tth, - dsp, - hkls, - strain_direction_dot_product, - is_in_sublattice, - tth_list, - Ic, - spec_expt, - spec_sim) + alpha = np.array( + [ + self.params["alpha0"].value, + self.params["alpha1"].value, + ] + ) + beta = np.array( + [ + self.params["beta0"].value, + self.params["beta1"].value, + ] + ) + args = ( + alpha, + beta, + cag, + gaussschrerr, + lorbroad, + anisbroad, + shkl, + eta_fwhm, + tth, + dsp, + hkls, + strain_direction_dot_product, + is_in_sublattice, + tth_list, + Ic, + spec_expt, + spec_sim, + ) self.Iobs[p][k] = self.calc_Iobs_fcn(*args) self.Icalc = self.Iobs @@ -1089,12 +1177,11 @@ def CalcIobs(self): @property def weights(self): lo = self.lineout - weights = np.divide(1., np.sqrt(lo.data[:,1])) + weights = np.divide(1.0, np.sqrt(lo.data[:, 1])) weights[np.isinf(weights)] = 0.0 return weights - @property def bkgdegree(self): if "chebyshev" in self.bkgmethod.keys(): @@ -1102,37 +1189,38 @@ def bkgdegree(self): @property def tth_step(self): - return (self.lineout.data[1,0]-self.lineout.data[0,0]) + return self.lineout.data[1, 0] - self.lineout.data[0, 0] @property def background(self): tth, I = self.spectrum_expt.data - mask = self.mask[:,1] + mask = self.mask[:, 1] if "chebyshev" in self.bkgmethod.keys(): - pname = [f"{self.name}_bkg_C{ii}" - for ii in range(self.bkgdegree)] + pname = [f"{self.name}_bkg_C{ii}" for ii in range(self.bkgdegree)] coef = [self.params[p].value for p in pname] - c = Chebyshev(coef,domain=[tth[0],tth[-1]]) + c = Chebyshev(coef, domain=[tth[0], tth[-1]]) bkg = c(tth) bkg[mask] = np.nan elif 'snip1d' in self.bkgmethod.keys(): - ww = np.rint(self.bkgmethod["snip1d"][0] / - self.tth_step).astype(np.int32) + ww = np.rint(self.bkgmethod["snip1d"][0] / self.tth_step).astype( + np.int32 + ) numiter = self.bkgmethod["snip1d"][1] - bkg = np.squeeze(snip1d_quad(np.atleast_2d(I), - w=ww, numiter=numiter)) + bkg = np.squeeze( + snip1d_quad(np.atleast_2d(I), w=ww, numiter=numiter) + ) bkg[mask] = np.nan return bkg @property def spectrum_sim(self): tth, I = self._spectrum_sim.data - mask = self.mask[:,1] + mask = self.mask[:, 1] # I[mask] = np.nan I += self.background @@ -1141,9 +1229,9 @@ def spectrum_sim(self): @property def spectrum_expt(self): d = self.lineout.data - mask = self.mask[:,1] + mask = self.mask[:, 1] # d[mask,1] = np.nan - return Spectrum(x=d[:,0], y=d[:,1]) + return Spectrum(x=d[:, 0], y=d[:, 1]) @property def params(self): @@ -1165,17 +1253,20 @@ def params(self, params): self._params[p].vary = params[p].vary else: from scipy.special import roots_legendre + xn, wn = roots_legendre(16) self.xn = xn[8:] self.wn = wn[8:] self._params = lmfit.Parameters() for p in params: if (p in self.lebail_param_list) or (self.name in p): - self._params.add(name=p, - value=params[p].value, - max=params[p].max, - min=params[p].min, - vary=params[p].vary) + self._params.add( + name=p, + value=params[p].value, + max=params[p].max, + min=params[p].min, + vary=params[p].vary, + ) # if hasattr(self, "tth") and \ # hasattr(self, "dsp") and \ @@ -1190,8 +1281,8 @@ def lineout(self): return self._lineout @lineout.setter - def lineout(self,lo): - if isinstance(lo,np.ma.MaskedArray): + def lineout(self, lo): + if isinstance(lo, np.ma.MaskedArray): self._lineout = lo else: msg = f"only masked arrays input is allowed." @@ -1206,14 +1297,12 @@ def mask(self): @property def tth_list(self): - return self.lineout[:,0].data - + return self.lineout[:, 0].data @property def tth(self): return self._tth - @tth.setter def tth(self, val): if isinstance(val, dict): @@ -1221,13 +1310,13 @@ def tth(self, val): # if hasattr(self,"dsp"): # self.computespectrum() else: - msg = (f"two theta vallues need " - f"to be in a dictionary") + msg = f"two theta vallues need " f"to be in a dictionary" raise ValueError(msg) @property def hkls(self): return self._hkls + @hkls.setter def hkls(self, val): if isinstance(val, dict): @@ -1237,8 +1326,7 @@ def hkls(self, val): # hasattr(self,"lineout"): # self.computespectrum() else: - msg = (f"two theta vallues need " - f"to be in a dictionary") + msg = f"two theta vallues need " f"to be in a dictionary" raise ValueError(msg) @property @@ -1251,8 +1339,7 @@ def dsp(self, val): self._dsp = val # self.computespectrum() else: - msg = (f"two theta vallues need " - f"to be in a dictionary") + msg = f"two theta vallues need " f"to be in a dictionary" raise ValueError(msg) @property @@ -1261,8 +1348,7 @@ def mask(self): @property def tth_list(self): - return self.lineout[:,0].data - + return self.lineout[:, 0].data @property def Icalc(self): diff --git a/hexrd/wppf/RietveldHEDM.py b/hexrd/powder/wppf/RietveldHEDM.py similarity index 100% rename from hexrd/wppf/RietveldHEDM.py rename to hexrd/powder/wppf/RietveldHEDM.py diff --git a/hexrd/wppf/WPPF.py b/hexrd/powder/wppf/WPPF.py similarity index 98% rename from hexrd/wppf/WPPF.py rename to hexrd/powder/wppf/WPPF.py index df027ae44..e691c816a 100644 --- a/hexrd/wppf/WPPF.py +++ b/hexrd/powder/wppf/WPPF.py @@ -18,12 +18,12 @@ # hexrd imports # ------------- -from hexrd import constants -from hexrd.imageutil import snip1d_quad -from hexrd.material import Material -from hexrd.transforms.xfcapi import angles_to_gvec -from hexrd.valunits import valWUnit -from hexrd.wppf.peakfunctions import ( +from hexrd.core import constants +from hexrd.core.imageutil import snip1d_quad +from hexrd.core.material import Material +from hexrd.core.transforms.xfcapi import angles_to_gvec +from hexrd.core.valunits import valWUnit +from hexrd.powder.wppf.peakfunctions import ( calc_rwp, computespectrum_pvfcj, computespectrum_pvtch, @@ -32,9 +32,9 @@ calc_Iobs_pvtch, calc_Iobs_pvpink, ) -from hexrd.wppf import wppfsupport -from hexrd.wppf.spectrum import Spectrum -from hexrd.wppf.phase import ( +from hexrd.powder.wppf import wppfsupport +from hexrd.powder.wppf.spectrum import Spectrum +from hexrd.powder.wppf.phase import ( Phases_LeBail, Phases_Rietveld, Material_LeBail, @@ -132,7 +132,7 @@ def bkgmethod(self, v): # In case the degree has changed, slice off any extra at the end, # and in case it is less, pad with zeros. if len(self.bkg_coef) > degree + 1: - self.bkg_coef = self.bkg_coef[:degree + 1] + self.bkg_coef = self.bkg_coef[: degree + 1] elif len(self.bkg_coef) < degree + 1: pad_width = (0, degree + 1 - len(self.bkg_coef)) self.bkg_coef = np.pad(self.bkg_coef, pad_width) @@ -173,8 +173,7 @@ def cheb_coef(self): @property def cheb_polynomial(self): return np.polynomial.Chebyshev( - self.cheb_coef, - domain=[self.tth_list[0], self.tth_list[-1]] + self.cheb_coef, domain=[self.tth_list[0], self.tth_list[-1]] ) @property @@ -374,9 +373,7 @@ def spectrum_expt(self, expt_spectrum): for s in expt_spec_list: self._spectrum_expt.append( Spectrum( - x=s[:, 0], - y=s[:, 1], - name="expt_spectrum" + x=s[:, 0], y=s[:, 1], name="expt_spectrum" ) ) @@ -1145,8 +1142,13 @@ def computespectrum(self): self.sf_lfactor[p][k] * lam / self.phases[p].lparms[0]) ) - tth = self.tth[p][k] + self.zero_error + \ - shft_c + trns_c + sf_shift + tth = ( + self.tth[p][k] + + self.zero_error + + shft_c + + trns_c + + sf_shift + ) dsp = self.dsp[p][k] hkls = self.hkls[p][k] @@ -2042,8 +2044,10 @@ def Refine(self): self.Rwplist = np.append(self.Rwplist, self.Rwp) self.gofFlist = np.append(self.gofFlist, self.gofF) - msg = (f"Finished iteration. Rwp: " - f"{self.Rwp*100.0:.2f} % and chi^2: {self.gofF:.2f}") + msg = ( + f"Finished iteration. Rwp: " + f"{self.Rwp*100.0:.2f} % and chi^2: {self.gofF:.2f}" + ) print(msg) else: print("Nothing to refine...") @@ -2353,7 +2357,7 @@ def separate_regions(masked_spec_array): m0 = np.concatenate(([False], mask, [False])) idx = np.flatnonzero(m0[1:] != m0[:-1]) gidx = [(idx[i], idx[i + 1]) for i in range(0, len(idx), 2)] - return [array[idx[i]: idx[i + 1], :] for i in range(0, len(idx), 2)], gidx + return [array[idx[i] : idx[i + 1], :] for i in range(0, len(idx), 2)], gidx def join_regions(vector_list, global_index, global_shape): @@ -2370,7 +2374,7 @@ def join_regions(vector_list, global_index, global_shape): ) out_vector[:] = np.nan for s, ids in zip(vector_list, global_index): - out_vector[ids[0]: ids[1]] = s + out_vector[ids[0] : ids[1]] = s # out_array = np.ma.masked_array(out_array, mask = np.isnan(out_array)) return out_vector diff --git a/hexrd/powder/wppf/__init__.py b/hexrd/powder/wppf/__init__.py new file mode 100644 index 000000000..fb501c91e --- /dev/null +++ b/hexrd/powder/wppf/__init__.py @@ -0,0 +1,2 @@ +from hexrd.powder.wppf.WPPF import LeBail +from hexrd.powder.wppf.WPPF import Rietveld diff --git a/hexrd/wppf/amorphous.py b/hexrd/powder/wppf/amorphous.py similarity index 99% rename from hexrd/wppf/amorphous.py rename to hexrd/powder/wppf/amorphous.py index 5ba3e278a..57d270d66 100644 --- a/hexrd/wppf/amorphous.py +++ b/hexrd/powder/wppf/amorphous.py @@ -2,7 +2,7 @@ import warnings from scipy.interpolate import CubicSpline from scipy.ndimage import gaussian_filter -from hexrd.wppf.peakfunctions import ( +from hexrd.powder.wppf.peakfunctions import ( _split_unit_gaussian as sp_gauss, _split_unit_pv as sp_pv) @@ -349,4 +349,4 @@ def peak_model(self): if self.model_type == "split_gaussian": return sp_gauss elif self.model_type == "split_pv": - return sp_pv + return sp_pv \ No newline at end of file diff --git a/hexrd/wppf/derivatives.py b/hexrd/powder/wppf/derivatives.py similarity index 96% rename from hexrd/wppf/derivatives.py rename to hexrd/powder/wppf/derivatives.py index d1e4cce33..9ea96ef57 100644 --- a/hexrd/wppf/derivatives.py +++ b/hexrd/powder/wppf/derivatives.py @@ -1,6 +1,6 @@ import numpy as np from numba import njit -from hexrd.wppf.peakfunctions import _unit_gaussian, _unit_lorentzian +from hexrd.powder.wppf.peakfunctions import _unit_gaussian, _unit_lorentzian """ naming convention for the derivative is as follows: diff --git a/hexrd/wppf/peakfunctions.py b/hexrd/powder/wppf/peakfunctions.py similarity index 99% rename from hexrd/wppf/peakfunctions.py rename to hexrd/powder/wppf/peakfunctions.py index d05131e71..8119656ee 100644 --- a/hexrd/wppf/peakfunctions.py +++ b/hexrd/powder/wppf/peakfunctions.py @@ -27,9 +27,9 @@ import numpy as np import copy -from hexrd import constants +from hexrd.core import constants from numba import vectorize, float64, njit, prange -from hexrd.fitting.peakfunctions import erfc, exp1exp +from hexrd.core.fitting.peakfunctions import erfc, exp1exp # from scipy.special import erfc, exp1 diff --git a/hexrd/wppf/phase.py b/hexrd/powder/wppf/phase.py similarity index 87% rename from hexrd/wppf/phase.py rename to hexrd/powder/wppf/phase.py index afc9e901f..27e7c6a10 100644 --- a/hexrd/wppf/phase.py +++ b/hexrd/powder/wppf/phase.py @@ -8,16 +8,16 @@ import numpy as np import yaml -from hexrd import constants -from hexrd.material import Material, symmetry, symbols -from hexrd.material.spacegroup import Allowed_HKLs, SpaceGroup -from hexrd.material.unitcell import _calcstar, _rqpDict -from hexrd.valunits import valWUnit -from hexrd.wppf.xtal import ( +from hexrd.core import constants +from hexrd.core.material import Material, symmetry, symbols +from hexrd.core.material.spacegroup import Allowed_HKLs, SpaceGroup +from hexrd.core.material.unitcell import _calcstar, _rqpDict +from hexrd.core.valunits import valWUnit +from hexrd.powder.wppf.xtal import ( _calc_dspacing, _get_tth, _calcxrsf, _calc_extinction_factor, _calc_absorption_factor, _get_sf_hkl_factors, ) -import hexrd.resources +import hexrd.core.resources def _kev(x): @@ -207,12 +207,16 @@ def _calcrmt(self): """ direct metric tensor """ - self.dmt = np.array([[a**2, a*b*cg, a*c*cb], - [a*b*cg, b**2, b*c*ca], - [a*c*cb, b*c*ca, c**2]]) + self.dmt = np.array( + [ + [a**2, a * b * cg, a * c * cb], + [a * b * cg, b**2, b * c * ca], + [a * c * cb, b * c * ca, c**2], + ] + ) self.vol = np.sqrt(np.linalg.det(self.dmt)) - if(self.vol < 1e-5): + if self.vol < 1e-5: warnings.warn('unitcell volume is suspiciously small') """ @@ -288,10 +292,10 @@ def get_sf_hkl_factors(self): def sf_and_twin_probability(self): self.sf_alpha = None - self.twin_beta = None + self.twin_beta = None if self.sgnum == 225: self.sf_alpha = 0.0 - self.twin_beta = 0.0 + self.twin_beta = 0.0 def GenerateRecipPGSym(self): @@ -317,9 +321,11 @@ def GenerateRecipPGSym(self): def CalcMaxGIndex(self): self.ih = 1 - while (1.0 / self.CalcLength( - np.array([self.ih, 0, 0], dtype=np.float64), 'r') - > self.dmin): + while ( + 1.0 + / self.CalcLength(np.array([self.ih, 0, 0], dtype=np.float64), 'r') + > self.dmin + ): self.ih = self.ih + 1 self.ik = 1 while (1.0 / self.CalcLength( @@ -355,11 +361,11 @@ def removeinversion(self, ksym): """ klist = [] for i in range(ksym.shape[0]): - k = ksym[i,:] + k = ksym[i, :] kk = list(k) nkk = list(-k) if not klist: - if(np.sum(k) > np.sum(-k)): + if np.sum(k) > np.sum(-k): klist.append(kk) else: klist.append(nkk) @@ -382,9 +388,9 @@ def ChooseSymmetric(self, hkllist, InversionSymmetry=True): mask = np.ones(hkllist.shape[0], dtype=bool) laue = InversionSymmetry for i, g in enumerate(hkllist): - if(mask[i]): + if mask[i]: geqv = self.CalcStar(g, 'r', applyLaue=laue) - for r in geqv[1:, ]: + for r in geqv[1:,]: rid = np.where(np.all(r == hkllist, axis=1)) mask[rid] = False hkl = hkllist[mask, :].astype(np.int32) @@ -407,8 +413,14 @@ def SortHKL(self, hkllist): for g in hkllist: glen.append(np.round(self.CalcLength(g, 'r'), 8)) # glen = np.atleast_2d(np.array(glen,dtype=float)).T - dtype = [('glen', float), ('max', int), ('sum', int), - ('h', int), ('k', int), ('l', int)] + dtype = [ + ('glen', float), + ('max', int), + ('sum', int), + ('h', int), + ('k', int), + ('l', int), + ] a = [] for i, gl in enumerate(glen): g = hkllist[i, :] @@ -432,23 +444,28 @@ def getHKLs(self, dmin): are sampled for unique hkls. By convention we will ignore all l < 0 """ - hmin = -self.ih-1 + hmin = -self.ih - 1 hmax = self.ih - kmin = -self.ik-1 + kmin = -self.ik - 1 kmax = self.ik lmin = -1 lmax = self.il - hkllist = np.array([[ih, ik, il] for ih in np.arange(hmax, hmin, -1) - for ik in np.arange(kmax, kmin, -1) - for il in np.arange(lmax, lmin, -1)]) + hkllist = np.array( + [ + [ih, ik, il] + for ih in np.arange(hmax, hmin, -1) + for ik in np.arange(kmax, kmin, -1) + for il in np.arange(lmax, lmin, -1) + ] + ) hkl_allowed = Allowed_HKLs(self.sgnum, hkllist) hkl = [] hkl_dsp = [] for g in hkl_allowed: # ignore [0 0 0] as it is the direct beam - if(np.sum(np.abs(g)) != 0): - dspace = 1./self.CalcLength(g, 'r') - if(dspace >= dmin): + if np.sum(np.abs(g)) != 0: + dspace = 1.0 / self.CalcLength(g, 'r') + if dspace >= dmin: hkl_dsp.append(g) """ we now have a list of g vectors which are all within dmin range @@ -537,7 +554,7 @@ def _init_from_materials(self, material_obj): # Now grab Rietveld-specific stuff # inverse of absorption length - self.abs_fact = 1e-4 * (1./material_obj.absorption_length) + self.abs_fact = 1e-4 * (1.0 / material_obj.absorption_length) # acceleration voltage and wavelength self.voltage = material_obj.unitcell.voltage @@ -549,7 +566,7 @@ def _init_from_materials(self, material_obj): # Debye-Waller factors including anisotropic ones self.U = material_obj.unitcell.U self.aniU = False - if(self.U.ndim > 1): + if self.U.ndim > 1: self.aniU = True self.betaij = material_obj.unitcell.betaij @@ -592,26 +609,30 @@ def calcBetaij(self): self.betaij = np.zeros([3, 3, self.atom_ntype]) for i in range(self.U.shape[0]): U = self.U[i, :] - self.betaij[:, :, i] = np.array([[U[0], U[3], U[4]], - [U[3], U[1], U[5]], - [U[4], U[5], U[2]]]) + self.betaij[:, :, i] = np.array( + [[U[0], U[3], U[4]], [U[3], U[1], U[5]], [U[4], U[5], U[2]]] + ) - self.betaij[:, :, i] *= 2. * np.pi**2 * self.aij + self.betaij[:, :, i] *= 2.0 * np.pi**2 * self.aij def CalcWavelength(self): # wavelength in nm - self.wavelength = constants.cPlanck * \ - constants.cLight / \ - constants.cCharge / \ - self.voltage + self.wavelength = ( + constants.cPlanck + * constants.cLight + / constants.cCharge + / self.voltage + ) self.wavelength *= 1e9 # self.CalcAnomalous() def CalcKeV(self): - self.kev = constants.cPlanck * \ - constants.cLight / \ - constants.cCharge / \ - self.wavelength + self.kev = ( + constants.cPlanck + * constants.cLight + / constants.cCharge + / self.wavelength + ) self.kev *= 1e-3 @@ -621,9 +642,13 @@ def _calcrmt(self): bst = self.CalcLength([0, 1, 0], 'r') cst = self.CalcLength([0, 0, 1], 'r') - self.aij = np.array([[ast**2, ast*bst, ast*cst], - [bst*ast, bst**2, bst*cst], - [cst*ast, cst*bst, cst**2]]) + self.aij = np.array( + [ + [ast**2, ast * bst, ast * cst], + [bst * ast, bst**2, bst * cst], + [cst * ast, cst * bst, cst**2], + ] + ) def _calchkls(self): super()._calc_hkls() @@ -633,30 +658,33 @@ def _calchkls(self): choices are 'd' (direct), 'r' (reciprocal) and 'c' (cartesian)''' def TransSpace(self, v_in, inspace, outspace): - if(inspace == 'd'): - if(outspace == 'r'): + if inspace == 'd': + if outspace == 'r': v_out = np.dot(v_in, self.dmt) - elif(outspace == 'c'): + elif outspace == 'c': v_out = np.dot(self.dsm, v_in) else: raise ValueError( - 'inspace in ''d'' but outspace can''t be identified') - elif(inspace == 'r'): - if(outspace == 'd'): + 'inspace in ' 'd' ' but outspace can' 't be identified' + ) + elif inspace == 'r': + if outspace == 'd': v_out = np.dot(v_in, self.rmt) - elif(outspace == 'c'): + elif outspace == 'c': v_out = np.dot(self.rsm, v_in) else: raise ValueError( - 'inspace in ''r'' but outspace can''t be identified') - elif(inspace == 'c'): - if(outspace == 'r'): + 'inspace in ' 'r' ' but outspace can' 't be identified' + ) + elif inspace == 'c': + if outspace == 'r': v_out = np.dot(v_in, self.rsm) - elif(outspace == 'd'): + elif outspace == 'd': v_out = np.dot(v_in, self.dsm) else: raise ValueError( - 'inspace in ''c'' but outspace can''t be identified') + 'inspace in ' 'c' ' but outspace can' 't be identified' + ) else: raise ValueError('incorrect inspace argument') return v_out @@ -679,7 +707,7 @@ def CalcPositions(self): n = 1 r = self.atom_pos[i, 0:3] - r = np.hstack((r, 1.)) + r = np.hstack((r, 1.0)) asym_pos.append(np.broadcast_to(r[0:3], [1, 3])) @@ -691,18 +719,18 @@ def CalcPositions(self): # coordinates between 0-1 rr = rnew[0:3] rr = np.modf(rr)[0] - rr[rr < 0.] += 1. - rr[np.abs(rr) < 1.0E-6] = 0. + rr[rr < 0.0] += 1.0 + rr[np.abs(rr) < 1.0e-6] = 0.0 # check if this is new isnew = True for j in range(n): - if(np.sum(np.abs(rr - asym_pos[i][j, :])) < 1E-4): + if np.sum(np.abs(rr - asym_pos[i][j, :])) < 1e-4: isnew = False break # if its new add this to the list - if(isnew): + if isnew: asym_pos[i] = np.vstack((asym_pos[i], rr)) n += 1 @@ -714,15 +742,17 @@ def CalcPositions(self): def InitializeInterpTable(self): f_anomalous_data = [] - data = importlib.resources.open_binary(hexrd.resources, 'Anomalous.h5') + data = importlib.resources.open_binary( + hexrd.core.resources, 'Anomalous.h5' + ) with h5py.File(data, 'r') as fid: for i in range(0, self.atom_ntype): Z = self.atom_type[i] elem = constants.ptableinverse[Z] - gid = fid.get('/'+elem) + gid = fid.get('/' + elem) data = np.array(gid.get('data')) - data = data[:,[7,1,2]] + data = data[:, [7, 1, 2]] f_anomalous_data.append(data) n = max([x.shape[0] for x in f_anomalous_data]) @@ -735,38 +765,38 @@ def InitializeInterpTable(self): for i in range(self.atom_ntype): nd = f_anomalous_data[i].shape[0] self.f_anomalous_data_sizes[i] = nd - self.f_anomalous_data[i,:nd,:] = f_anomalous_data[i] + self.f_anomalous_data[i, :nd, :] = f_anomalous_data[i] def CalcXRSF(self, wavelength, w_int): """ the 1E-2 is to convert to A^-2 since the fitting is done in those units """ - fNT = np.zeros([self.atom_ntype,]) - frel = np.zeros([self.atom_ntype,]) - scatfac = np.zeros([self.atom_ntype,11]) + fNT = np.zeros([self.atom_ntype]) + frel = np.zeros([self.atom_ntype]) + scatfac = np.zeros([self.atom_ntype, 11]) f_anomalous_data = self.f_anomalous_data aniU = self.aniU - occ = self.atom_pos[:,3] + occ = self.atom_pos[:, 3] if aniU: betaij = self.betaij else: betaij = self.U - self.numat = np.zeros(self.atom_ntype,dtype=np.int32) + self.numat = np.zeros(self.atom_ntype, dtype=np.int32) for i in range(0, self.atom_ntype): self.numat[i] = self.asym_pos[i].shape[0] Z = self.atom_type[i] elem = constants.ptableinverse[Z] - scatfac[i,:] = constants.scatfac[elem] + scatfac[i, :] = constants.scatfac[elem] frel[i] = constants.frel[elem] fNT[i] = constants.fNT[elem] - self.asym_pos_arr = np.zeros([self.numat.max(),self.atom_ntype, 3]) + self.asym_pos_arr = np.zeros([self.numat.max(), self.atom_ntype, 3]) for i in range(0, self.atom_ntype): nn = self.numat[i] - self.asym_pos_arr[:nn,i,:] = self.asym_pos[i] + self.asym_pos_arr[:nn, i, :] = self.asym_pos[i] nref = self.hkls.shape[0] @@ -806,15 +836,9 @@ def calc_extinction(self, particle_size_D, ) - def calc_absorption(self, - tth, - phi, - wavelength): + def calc_absorption(self, tth, phi, wavelength): abs_fact = self.abs_fact - absorption = _calc_absorption_factor(abs_fact, - tth, - phi, - wavelength) + absorption = _calc_absorption_factor(abs_fact, tth, phi, wavelength) return absorption @@ -859,9 +883,11 @@ def __init__(self, material_file=None, """ wavelength_nm = {} for k, v in wavelength.items(): - if(v[0].unit == 'angstrom'): + if v[0].unit == 'angstrom': wavelength_nm[k] = [ - valWUnit('lp', 'length', v[0].getVal("nm"), 'nm'), v[1]] + valWUnit('lp', 'length', v[0].getVal("nm"), 'nm'), + v[1], + ] else: wavelength_nm[k] = v @@ -918,9 +944,9 @@ def add_many(self, material_file, material_keys): def load(self, fname): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 06/08/2020 SS 1.0 original - >> @DETAILS: load parameters from yaml file + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 06/08/2020 SS 1.0 original + >> @DETAILS: load parameters from yaml file """ with open(fname) as file: dic = yaml.load(file, Loader=yaml.SafeLoader) @@ -931,9 +957,9 @@ def load(self, fname): def dump(self, fname): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 06/08/2020 SS 1.0 original - >> @DETAILS: dump parameters to yaml file + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 06/08/2020 SS 1.0 original + >> @DETAILS: dump parameters to yaml file """ dic = {self.material_file: [m for m in self]} with open(fname, 'w') as f: diff --git a/hexrd/wppf/spectrum.py b/hexrd/powder/wppf/spectrum.py similarity index 77% rename from hexrd/wppf/spectrum.py rename to hexrd/powder/wppf/spectrum.py index c87960e01..84e702875 100644 --- a/hexrd/wppf/spectrum.py +++ b/hexrd/powder/wppf/spectrum.py @@ -2,6 +2,7 @@ import h5py from os import path + class Spectrum: """ ================================================================================== @@ -18,11 +19,11 @@ class Spectrum: def __init__(self, x=None, y=None, name=''): if x is None: - self._x = np.linspace(10., 100., 500) + self._x = np.linspace(10.0, 100.0, 500) else: self._x = x if y is None: - self._y = np.log(self._x ** 2) - (self._x * 0.2) ** 2 + self._y = np.log(self._x**2) - (self._x * 0.2) ** 2 else: self._y = y self.name = name @@ -69,51 +70,51 @@ def rebin(self, bin_size): new_x = np.arange(x_min, x_max + 0.1 * bin_size, bin_size) bins = np.hstack((x_min - bin_size * 0.5, new_x + bin_size * 0.5)) - new_y = (np.histogram(x, bins, weights=y) - [0] / np.histogram(x, bins)[0]) + new_y = np.histogram(x, bins, weights=y)[0] / np.histogram(x, bins)[0] return Spectrum(new_x, new_y) def dump_hdf5(self, file, name): """ - >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov - >> @DATE: 01/15/2021 SS 1.0 original - >> @DETAILS: dump the class to a hdf5 file. the file argument could either be a - string or a h5.File instance. If it is a filename, then HDF5 file - is created, a Spectrum group is created and data is written out. - Else data written to Spectrum group in existing file object - >> @PARAMS file file name string or h5py.File object - name name ID of the spectrum e.g. experimental or simulated or background + >> @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov + >> @DATE: 01/15/2021 SS 1.0 original + >> @DETAILS: dump the class to a hdf5 file. the file argument could either be a + string or a h5.File instance. If it is a filename, then HDF5 file + is created, a Spectrum group is created and data is written out. + Else data written to Spectrum group in existing file object + >> @PARAMS file file name string or h5py.File object + name name ID of the spectrum e.g. experimental or simulated or background """ - if(isinstance(file, str)): + if isinstance(file, str): fexist = path.isfile(file) - if(fexist): + if fexist: fid = h5py.File(file, 'r+') else: fid = h5py.File(file, 'x') - elif(isinstance(file, h5py.File)): + elif isinstance(file, h5py.File): fid = file else: raise RuntimeError( 'Parameters: dump_hdf5 Pass in a filename \ - string or h5py.File object') + string or h5py.File object' + ) - name_spectrum = 'Spectrum/'+name - if(name_spectrum in fid): - del(fid[name_spectrum]) + name_spectrum = 'Spectrum/' + name + if name_spectrum in fid: + del fid[name_spectrum] gid = fid.create_group(name_spectrum) tth, I = self.data # make sure these arrays are not zero sized - if(tth.shape[0] > 0): + if tth.shape[0] > 0: did = gid.create_dataset("tth", tth.shape, dtype=np.float64) did.write_direct(tth.astype(np.float64)) - if(I.shape[0] > 0): + if I.shape[0] > 0: did = gid.create_dataset("intensity", I.shape, dtype=np.float64) did.write_direct(I.astype(np.float64)) @@ -128,22 +129,22 @@ def data(self): f_bkg = interp1d(x_bkg, y_bkg, kind='linear') # find overlapping x and y values: - ind = np.where((self._x <= np.max(x_bkg)) & - (self._x >= np.min(x_bkg))) + ind = np.where( + (self._x <= np.max(x_bkg)) & (self._x >= np.min(x_bkg)) + ) x = self._x[ind] y = self._y[ind] if len(x) == 0: - """ if there is no overlapping between background - and Spectrum, raise an error """ + """if there is no overlapping between background + and Spectrum, raise an error""" raise BkgNotInRangeError(self.name) y = y * self._scaling + self.offset - f_bkg(x) else: - """ if Spectrum and bkg have the same - x basis we just delete y-y_bkg""" - x, y = self._x, self._y * \ - self._scaling + self.offset - y_bkg + """if Spectrum and bkg have the same + x basis we just delete y-y_bkg""" + x, y = self._x, self._y * self._scaling + self.offset - y_bkg else: x, y = self.original_data @@ -171,8 +172,7 @@ def data_array(self): @property def original_data(self): - return self._x, self._y * self._scaling +\ - self.offset + return self._x, self._y * self._scaling + self.offset @property def x(self): @@ -203,13 +203,15 @@ def scaling(self, value): def limit(self, x_min, x_max): x, y = self.data - return Spectrum(x[np.where((x_min < x) & (x < x_max))], - y[np.where((x_min < x) & (x < x_max))]) + return Spectrum( + x[np.where((x_min < x) & (x < x_max))], + y[np.where((x_min < x) & (x < x_max))], + ) def extend_to(self, x_value, y_value): """ - Extends the current Spectrum to a specific x_value by filling it - with the y_value. Does not modify inplace but returns a new filled + Extends the current Spectrum to a specific x_value by filling it + with the y_value. Does not modify inplace but returns a new filled Spectrum :param x_value: Point to which extend the Spectrum should be smaller than the lowest x-value in the Spectrum or vice versa @@ -220,15 +222,16 @@ def extend_to(self, x_value, y_value): x_min = np.min(self.x) x_max = np.max(self.x) if x_value < x_min: - x_fill = np.arange(x_min - x_step, x_value - - x_step*0.5, -x_step)[::-1] + x_fill = np.arange( + x_min - x_step, x_value - x_step * 0.5, -x_step + )[::-1] y_fill = np.zeros(x_fill.shape) y_fill.fill(y_value) new_x = np.concatenate((x_fill, self.x)) new_y = np.concatenate((y_fill, self.y)) elif x_value > x_max: - x_fill = np.arange(x_max + x_step, x_value+x_step*0.5, x_step) + x_fill = np.arange(x_max + x_step, x_value + x_step * 0.5, x_step) y_fill = np.zeros(x_fill.shape) y_fill.fill(y_value) @@ -241,6 +244,7 @@ def extend_to(self, x_value, y_value): def plot(self, show=False, *args, **kwargs): import matplotlib.pyplot as plt + plt.plot(self.x, self.y, *args, **kwargs) if show: plt.show() @@ -265,8 +269,9 @@ def __sub__(self, other): other_fcn = interp1d(other_x, other_x, kind='linear') # find overlapping x and y values: - ind = np.where((orig_x <= np.max(other_x)) & - (orig_x >= np.min(other_x))) + ind = np.where( + (orig_x <= np.max(other_x)) & (orig_x >= np.min(other_x)) + ) x = orig_x[ind] y = orig_y[ind] @@ -287,8 +292,9 @@ def __add__(self, other): other_fcn = interp1d(other_x, other_x, kind='linear') # find overlapping x and y values: - ind = np.where((orig_x <= np.max(other_x)) & - (orig_x >= np.min(other_x))) + ind = np.where( + (orig_x <= np.max(other_x)) & (orig_x >= np.min(other_x)) + ) x = orig_x[ind] y = orig_y[ind] diff --git a/hexrd/wppf/texture.py b/hexrd/powder/wppf/texture.py similarity index 97% rename from hexrd/wppf/texture.py rename to hexrd/powder/wppf/texture.py index fc56e2f1a..fbf65099f 100644 --- a/hexrd/wppf/texture.py +++ b/hexrd/powder/wppf/texture.py @@ -5,10 +5,10 @@ # hexrd imports # ------------- -from hexrd.rotations import rotMatOfExpMap -from hexrd.transforms.xfcapi import anglesToGVec -from hexrd import constants -from hexrd.wppf.phase import Material_Rietveld +from hexrd.core.rotations import rotMatOfExpMap +from hexrd.core.transforms.xfcapi import anglesToGVec +from hexrd.core import constants +from hexrd.powder.wppf.phase import Material_Rietveld # 3rd party imports # ----------------- @@ -940,7 +940,7 @@ def write_data(self, prefix): fname = f"{prefix}_{k}.txt" angs = np.degrees(self.angs[k]) intensities = np.atleast_2d(self.intensities[k]).T - data = np.hstack((angs,intensities)) + data = np.hstack((angs, intensities)) np.savetxt(fname, data, delimiter="\t") def stereographic_radius(self, @@ -1430,7 +1430,7 @@ def calculate_harmonic_coefficients(self, params, hkls=None): @property def num_pfs(self): - """ number of pole figures (read only) """ + """number of pole figures (read only)""" return len(self.pfdata) """ @@ -1438,6 +1438,7 @@ def num_pfs(self): in the form of a dictionary with keys as the hkl values and the value as the (tth, eta, omega) array. """ + @property def pfdata(self): return self._pfdata @@ -1525,10 +1526,8 @@ class InversePoleFigures: this class deals with everything related to inverse pole figures. """ - def __init__(self, - sample_dir, - sampling="equiangular", - resolution=5.0): + + def __init__(self, sample_dir, sampling="equiangular", resolution=5.0): """ this is the initialization of the class. the inputs are 1. laue_sym for laue symmetry @@ -1544,9 +1543,7 @@ def __init__(self, self.resolution = resolution self.sampling = sampling - def initialize_crystal_dir(self, - samplingtype, - resolution=5.0): + def initialize_crystal_dir(self, samplingtype, resolution=5.0): """ this function prepares the unit vectors of the stereogram @@ -1559,14 +1556,14 @@ def initialize_crystal_dir(self, if tth == 0: break angs = np.array(angs) - self.crystal_dir = np.zeros([angs.shape[0],3]) - for i,a in enumerate(angs): + self.crystal_dir = np.zeros([angs.shape[0], 3]) + for i, a in enumerate(angs): t, r = a st = np.sin(t) ct = np.cos(t) sr = np.sin(r) cr = np.cos(r) - self.crystal_dir[i,:] = np.array([st*cr,st*sr,ct]) + self.crystal_dir[i, :] = np.array([st * cr, st * sr, ct]) if samplingtype.lower() == "fem": msg = "sampling type FEM not implemented yet." @@ -1574,7 +1571,7 @@ def initialize_crystal_dir(self, @property def sample_dir(self): - """ sample direction for IPF """ + """sample direction for IPF""" return self._sample_dir @sample_dir.setter @@ -1583,13 +1580,13 @@ def sample_dir(self, val): # sample_dir size = nx3 if isinstance(val, str): if val.upper() == "RD": - self._sample_dir = np.atleast_2d([1.,0.,0.]) + self._sample_dir = np.atleast_2d([1.0, 0.0, 0.0]) self._sample_dir_name = "RD" elif val.upper() == "TD": - self._sample_dir = np.atleast_2d([0.,1.,0.]) + self._sample_dir = np.atleast_2d([0.0, 1.0, 0.0]) self._sample_dir_name = "TD" elif val.upper() == "ND": - self._sample_dir = np.atleast_2d([0.,0.,1.]) + self._sample_dir = np.atleast_2d([0.0, 0.0, 1.0]) self._sample_dir_name = "ND" else: msg = f"unknown direction." @@ -1597,8 +1594,10 @@ def sample_dir(self, val): elif isinstance(val, np.array): v = np.atleast_2d(val) if v.shape[1] != 3: - msg = (f"incorrect shape for sample_dir input.\n" - f"expected nx3, got {val.shape[0]}x{val.shape[1]}") + msg = ( + f"incorrect shape for sample_dir input.\n" + f"expected nx3, got {val.shape[0]}x{val.shape[1]}" + ) raise ValueError(msg) self._sample_dir = v self._sample_dir_name = "array" @@ -1610,8 +1609,10 @@ def resolution(self): @resolution.setter def resolution(self, val): if val < 1.0: - msg = (f"the resolution appears to be very fine.\n" - f"Are you sure the value is in degrees?") + msg = ( + f"the resolution appears to be very fine.\n" + f"Are you sure the value is in degrees?" + ) warn(msg) self._resolution = val @@ -1622,17 +1623,19 @@ def sampling(self): @sampling.setter def sampling(self, val): if val.lower() == "equiangular": - self.initialize_crystal_dir("equiangular", - resolution=self.resolution) + self.initialize_crystal_dir( + "equiangular", resolution=self.resolution + ) elif val.lower() == "fem": self.initialize_crystal_dir("FEM") @property def angs(self): - polar = np.arccos(self.crystal_dir[:,2]) - az = np.arctan2(self.crystal_dir[:,1],self.crystal_dir[:,0]) - return np.degrees(np.vstack((polar,az)).T) + polar = np.arccos(self.crystal_dir[:, 2]) + az = np.arctan2(self.crystal_dir[:, 1], self.crystal_dir[:, 0]) + return np.degrees(np.vstack((polar, az)).T) + # These are here only for backward-compatibility diff --git a/hexrd/wppf/wppfsupport.py b/hexrd/powder/wppf/wppfsupport.py similarity index 86% rename from hexrd/wppf/wppfsupport.py rename to hexrd/powder/wppf/wppfsupport.py index 80e2f0ba1..a0caa01c3 100644 --- a/hexrd/wppf/wppfsupport.py +++ b/hexrd/powder/wppf/wppfsupport.py @@ -37,13 +37,23 @@ import lmfit -from hexrd.material.symbols import pstr_spacegroup -from hexrd.wppf.phase import Phases_LeBail, Phases_Rietveld -from hexrd.material import Material -from hexrd.material.unitcell import _rqpDict +from hexrd.core.material.symbols import pstr_spacegroup +from hexrd.powder.wppf.phase import Phases_LeBail, Phases_Rietveld +from hexrd.core.material import Material +from hexrd.core.material.unitcell import _rqpDict import hexrd import numpy as np from hexrd import constants +from hexrd.core.material.symbols import pstr_spacegroup +from lmfit import Parameters as Parameters_lmfit +from hexrd.powder.wppf.phase import Phases_LeBail, Phases_Rietveld +from hexrd.core.material import Material +from hexrd.core.material.unitcell import _rqpDict +import hexrd.core +import numpy as np +from hexrd.core import constants +import warnings + def _generate_default_parameters_pseudovoight(params): """ @@ -53,13 +63,14 @@ def _generate_default_parameters_pseudovoight(params): following: 3 -> cagliotti (instrumental broadening) """ - p = {"zero_error":[0., -1., 1., False], - "trns":[0.0, -1.0, 1.0, False], - "shft":[0.0,-1.0,1.0,False], - "U": [81.5, 0., np.inf, False], - "V": [1.0337, 0., np.inf, False], - "W": [5.18275, 0., np.inf, False] - } + p = { + "zero_error": [0.0, -1.0, 1.0, False], + "trns": [0.0, -1.0, 1.0, False], + "shft": [0.0, -1.0, 1.0, False], + "U": [81.5, 0.0, np.inf, False], + "V": [1.0337, 0.0, np.inf, False], + "W": [5.18275, 0.0, np.inf, False], + } for k, v in p.items(): params.add( @@ -70,8 +81,8 @@ def _generate_default_parameters_pseudovoight(params): vary=v[3], ) -def _add_phase_dependent_parameters_pseudovoight(params, - mat): + +def _add_phase_dependent_parameters_pseudovoight(params, mat): """ add the particle size broadening term P : Gaussian scherrer broadening @@ -79,10 +90,11 @@ def _add_phase_dependent_parameters_pseudovoight(params, Y : Lorentzian microstrain broadening """ name = mat.name - p = {"P": [0., 0., np.inf, False], - "X": [0.5665, 0., np.inf, False], - "Y": [1.90994, 0., np.inf, False] - } + p = { + "P": [0.0, 0.0, np.inf, False], + "X": [0.5665, 0.0, np.inf, False], + "Y": [1.90994, 0.0, np.inf, False], + } for k, v in p.items(): pname = f"{name}_{k}" @@ -95,9 +107,7 @@ def _add_phase_dependent_parameters_pseudovoight(params, ) def _add_pvfcj_parameters(params): - p = {"HL":[1e-3,1e-7,1e-1,False], - "SL":[1e-3,1e-7,1e-1,False] - } + p = {"HL": [1e-3, 1e-7, 1e-1, False], "SL": [1e-3, 1e-7, 1e-1, False]} for k, v in p.items(): params.add( name=k, @@ -108,11 +118,12 @@ def _add_pvfcj_parameters(params): ) def _add_pvpink_parameters(params): - p = {"alpha0":[14.4, -100., 100., False], - "alpha1":[0., -100., 100., False], - "beta0":[3.016, -100., 100., False], - "beta1":[-2.0, -100., 100., False] - } + p = { + "alpha0": [14.4, -100.0, 100.0, False], + "alpha1": [0.0, -100.0, 100.0, False], + "beta0": [3.016, -100.0, 100.0, False], + "beta1": [-2.0, -100.0, 100.0, False], + } for k, v in p.items(): params.add( name=k, @@ -122,16 +133,15 @@ def _add_pvpink_parameters(params): vary=v[3], ) -def _add_chebyshev_background(params, - degree, - init_val): + +def _add_chebyshev_background(params, degree, init_val): """ add coefficients for chebyshev background polynomial. The initial values will be the same as determined by WPPF.chebyshevfit routine """ - for d in range(degree+1): + for d in range(degree + 1): n = f"bkg_{d}" params.add( name=n, @@ -141,8 +151,8 @@ def _add_chebyshev_background(params, vary=False, ) -def _add_stacking_fault_parameters(params, - mat): + +def _add_stacking_fault_parameters(params, mat): """ add stacking fault parameters for cubic systems only """ @@ -155,9 +165,8 @@ def _add_stacking_fault_parameters(params, params.add(twin_beta_name, value=0., min=0., max=1., vary=False) -def _add_Shkl_terms(params, - mat, - return_dict=None): + +def _add_Shkl_terms(params, mat, return_dict=None): """ add the SHKL terms in the anisotropic peak broadening contribution. this depends on the @@ -196,8 +205,8 @@ def _add_Shkl_terms(params, res[s] = 0.0 return res, trig_ptype -def _add_lp_to_params(params, - mat): + +def _add_lp_to_params(params, mat): """ 03/12/2021 SS 1.0 original given a material, add the required @@ -209,7 +218,7 @@ def _add_lp_to_params(params, name = [_lpname[i] for i in rid] phase_name = mat.name for n, l in zip(name, lp): - nn = phase_name+'_'+n + nn = phase_name + '_' + n """ is n is a,b,c, it is one of the length units else it is an angle @@ -379,25 +388,24 @@ def _generate_default_parameters_LeBail(mat, elif peakshape == 2: _add_pvpink_parameters(params) else: - msg = (f"_generate_default_parameters_LeBail: " - f"unknown peak shape.") + msg = f"_generate_default_parameters_LeBail: " f"unknown peak shape." raise ValueError(msg) if "chebyshev" in bkgmethod: deg = bkgmethod["chebyshev"] if not (init_val is None): - if len(init_val) < deg+1: - msg = (f"size of init_val and degree " - f"of polynomial are not consistent. " - f"setting initial guess to zero.") + if len(init_val) < deg + 1: + msg = ( + f"size of init_val and degree " + f"of polynomial are not consistent. " + f"setting initial guess to zero." + ) warnings.warn(msg) - init_val = np.zeros([deg+1,]) + init_val = np.zeros([deg + 1]) else: - init_val = np.zeros([deg+1,]) + init_val = np.zeros([deg + 1]) - _add_chebyshev_background(params, - deg, - init_val) + _add_chebyshev_background(params, deg, init_val) for m in _mat_list(mat): _add_phase_dependent_parameters_pseudovoight(params, m) @@ -413,10 +421,10 @@ def _generate_default_parameters_LeBail(mat, def _add_phase_fractions(mat, params): """ - @author: Saransh Singh, Lawrence Livermore National Lab - @date: 04/01/2021 SS 1.0 original - @details: ass phase fraction to params class - given a list/dict/single instance of material class + @author: Saransh Singh, Lawrence Livermore National Lab + @date: 04/01/2021 SS 1.0 original + @details: ass phase fraction to params class + given a list/dict/single instance of material class """ pf_list = [] mat_list = _mat_list(mat) @@ -508,12 +516,14 @@ def _generate_default_parameters_Rietveld(mat, return params + +# fmt: off _shkl_name = ["s400", "s040", "s004", "s220", "s202", "s022", "s310", "s103", "s031", "s130", "s301", "s013", "s211", "s121", "s112"] _lpname = ['a', 'b', 'c', 'alpha', 'beta', 'gamma'] _nameU = ['U11', 'U22', 'U33', 'U12', 'U13', 'U23'] - +# fmt: on """ function to take care of equality constraints """ @@ -524,8 +534,8 @@ def _fill_shkl(x, eq_const): fill all values of shkl when only reduced set is passed """ - x_ret = np.zeros([15,]) - for ii,n in enumerate(_shkl_name): + x_ret = np.zeros([15]) + for ii, n in enumerate(_shkl_name): if n in x: x_ret[ii] = x[n] else: @@ -534,7 +544,7 @@ def _fill_shkl(x, eq_const): pass else: for c in eq_const: - x_ret[c[1]] = c[2]*x_ret[c[0]] + x_ret[c[1]] = c[2] * x_ret[c[0]] return x_ret @@ -543,7 +553,7 @@ def _required_shkl_names(mat): latticetype = mat.latticeType sgnum = mat.sgnum mname = mat.name - hmsym = pstr_spacegroup[sgnum-1].strip() + hmsym = pstr_spacegroup[sgnum - 1].strip() trig_ptype = False if latticetype == "trigonal" and hmsym[0] == "P": @@ -561,6 +571,7 @@ def _required_shkl_names(mat): return valid_shkl, eq_constraints, rqd_index, trig_ptype + """ this dictionary structure holds information for the shkl coefficeints needed for anisotropic broadening of peaks @@ -589,7 +600,7 @@ def _getnumber(arr): res = np.ones(arr.shape) for i in range(arr.shape[0]): - res[i] = np.sum(arr[0:i+1] == arr[i]) + res[i] = np.sum(arr[0 : i + 1] == arr[i]) res = res.astype(np.int32) return res @@ -601,8 +612,8 @@ def _add_detector_geometry(params, instr): detector as a parameter to the LeBail class such that those can be refined as well """ - if isinstance(instr, hexrd.instrument.HEDMInstrument): - for key,det in instr.detectors.items(): + if isinstance(instr, hexrd.core.instrument.HEDMInstrument): + for key, det in instr.detectors.items(): tvec = det.tvec tilt = det.tilt pnametvec = [f"{key}_tvec{i}" for i in range(3)] @@ -622,7 +633,7 @@ def _add_background(params,lineouts,bkgdegree): def striphkl(g): - return str(g)[1:-1].replace(" ","") + return str(g)[1:-1].replace(" ", "") def _add_intensity_parameters(params,hkls,Icalc,prefix): @@ -682,7 +693,6 @@ def _mat_list(mat): background_methods = { 'spline': None, - 'chebyshev': [ { 'label': 'Chebyshev Polynomial Degree', @@ -690,26 +700,25 @@ def _mat_list(mat): 'min': 0, 'max': 99, 'value': 3, - 'tooltip': 'The polynomial degree used ' - 'for polynomial fit.', + 'tooltip': 'The polynomial degree used ' 'for polynomial fit.', } ], 'snip1d': [ { 'label': 'Snip Width', 'type': float, - 'min': 0., + 'min': 0.0, 'value': 1.0, 'tooltip': 'Maximum width of peak to retain for ' - 'background estimation (in degrees).' + 'background estimation (in degrees).', }, { 'label': 'Snip Num Iterations', 'type': int, 'min': 1, 'max': 99, - 'value':2, - 'tooltip': 'number of snip iterations.' - } + 'value': 2, + 'tooltip': 'number of snip iterations.', + }, ], } diff --git a/hexrd/wppf/xtal.py b/hexrd/powder/wppf/xtal.py similarity index 98% rename from hexrd/wppf/xtal.py rename to hexrd/powder/wppf/xtal.py index af39716f5..ce2aac0ba 100644 --- a/hexrd/wppf/xtal.py +++ b/hexrd/powder/wppf/xtal.py @@ -1,8 +1,8 @@ import numpy as np from numba import njit, prange -from hexrd import constants -from hexrd.material.unitcell import _calcstar +from hexrd.core import constants +from hexrd.core.material.unitcell import _calcstar @njit(cache=True, nogil=True) @@ -86,7 +86,7 @@ def _calcxrayformfactor( f1 and f2 have been tabulated as a function of energy in Anomalous.h5 in hexrd folder - overall f = (f0 + f' + if" +fNT) + overall f = (f0 + f' + if" +fNT) """ f_anomalous = _calcanomalousformfactor( diff --git a/hexrd/resources/instrument_templates/__init__.py b/hexrd/resources/instrument_templates/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/hexrd/sampleOrientations/__init__.py b/hexrd/sampleOrientations/__init__.py deleted file mode 100644 index 29d998782..000000000 --- a/hexrd/sampleOrientations/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from hexrd.sampleOrientations.sampleRFZ import sampleRFZ \ No newline at end of file diff --git a/hexrd/transforms/xfcapi.py b/hexrd/transforms/xfcapi.py deleted file mode 100644 index af0ca3257..000000000 --- a/hexrd/transforms/xfcapi.py +++ /dev/null @@ -1,47 +0,0 @@ -# We will replace these functions with the new versions as we -# add and test them. -# NOTE: we are only importing what is currently being used in hexrd -# and hexrdgui. This is so that we can see clearly what is in use. -from .old_xfcapi import ( - # Old transform functions still in use - anglesToDVec, # new version provided below - anglesToGVec, # new version provided below - detectorXYToGvec, # new version provided below - gvecToDetectorXY, # new version provided below - gvecToDetectorXYArray, # new version provided below - oscillAnglesOfHKLs, - # Utility functions - angularDifference, - makeDetectorRotMat, # New version provided below - makeEtaFrameRotMat, # new version provided below - makeOscillRotMat, # new version provided below - makeOscillRotMatArray, # new version provided below - makeRotMatOfExpMap, - makeRotMatOfQuat, # Use rotations.rotMatOfQuat instead - mapAngle, # Use rotations.mapAngle instead - rowNorm, # use numpy.linalg.norm(..., axis=1) instead - unitRowVector, # new version below - # Constants, - bVec_ref, - eta_ref, - Xl, - Yl, -) - - -from .new_capi.xf_new_capi import ( - # New transform functions - angles_to_dvec, - angles_to_gvec, - gvec_to_xy, # this is gvecToDetectorXY and gvecToDetectorXYArray - make_beam_rmat, # this is makeEtaFrameRotMat - make_detector_rmat, - make_rmat_of_expmap, - make_sample_rmat, # this is makeOscillRotMat and makeOscillRotMatArray - oscill_angles_of_hkls, - quat_distance, - rotate_vecs_about_axis, - unit_vector, # this is unitRowVector - validate_angle_ranges, - xy_to_gvec, -) diff --git a/hexrd/wppf/__init__.py b/hexrd/wppf/__init__.py deleted file mode 100644 index 40887f145..000000000 --- a/hexrd/wppf/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from hexrd.wppf.WPPF import LeBail -from hexrd.wppf.WPPF import Rietveld \ No newline at end of file diff --git a/hexrd/xrdutil/__init__.py b/hexrd/xrdutil/__init__.py deleted file mode 100644 index da7dfa681..000000000 --- a/hexrd/xrdutil/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .utils import * - -# For now import these private members! -from .utils import _project_on_detector_plane -from .utils import _project_on_detector_cylinder -from .utils import _fetch_hkls_from_planedata -from .utils import _filter_hkls_eta_ome diff --git a/pyproject.toml b/pyproject.toml index 6b2e323bd..4de0e1784 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,27 @@ +[project] +name = "hexrd" +dynamic = [ + "version", + "authors", + "description", + "license", + "dependencies", + "scripts", + "classifiers", + "readme", + "requires-python" +] + + [build-system] requires = ["setuptools", "wheel", "numpy<2.0", "setuptools_scm[toml]", "pybind11>=2.11.0"] [tool.black] line-length = 79 skip-string-normalization = true + +[project.optional-dependencies] +test = [ + "pytest", + "coloredlogs", +] \ No newline at end of file diff --git a/scripts/install/install_build_dependencies.py b/scripts/install/install_build_dependencies.py index abc5299f0..3da27d7f3 100755 --- a/scripts/install/install_build_dependencies.py +++ b/scripts/install/install_build_dependencies.py @@ -49,8 +49,10 @@ def download_xtensor(path): shutil.rmtree(target_path) os.makedirs(path, exist_ok=True) - shutil.move(str(Path(temp_dir) / out_dir_name / 'include/xtensor'), - str(Path(path) / 'xtensor/xtensor')) + shutil.move( + str(Path(temp_dir) / out_dir_name / 'include/xtensor'), + str(Path(path) / 'xtensor/xtensor'), + ) return str(target_path) @@ -70,7 +72,8 @@ def download_xtensor_python(path): os.makedirs(path, exist_ok=True) shutil.move( str(Path(temp_dir) / out_dir_name / 'include/xtensor-python'), - str(Path(path) / 'xtensor-python/xtensor-python')) + str(Path(path) / 'xtensor-python/xtensor-python'), + ) return str(target_path) @@ -88,8 +91,10 @@ def download_xtl(path): shutil.rmtree(target_path) os.makedirs(path, exist_ok=True) - shutil.move(str(Path(temp_dir) / out_dir_name / 'include/xtl'), - str(Path(path) / 'xtl/xtl')) + shutil.move( + str(Path(temp_dir) / out_dir_name / 'include/xtl'), + str(Path(path) / 'xtl/xtl'), + ) return str(target_path) @@ -107,8 +112,10 @@ def download_xsimd(path): shutil.rmtree(target_path) os.makedirs(path, exist_ok=True) - shutil.move(str(Path(temp_dir) / out_dir_name / 'include/xsimd'), - str(Path(path) / 'xsimd/xsimd')) + shutil.move( + str(Path(temp_dir) / out_dir_name / 'include/xsimd'), + str(Path(path) / 'xsimd/xsimd'), + ) return str(target_path) @@ -142,8 +149,10 @@ def download_pybind11(path): shutil.rmtree(target_path) os.makedirs(path, exist_ok=True) - shutil.move(str(Path(temp_dir) / out_dir_name / 'include/pybind11'), - str(Path(path) / 'pybind11/pybind11')) + shutil.move( + str(Path(temp_dir) / out_dir_name / 'include/pybind11'), + str(Path(path) / 'pybind11/pybind11'), + ) return str(target_path) diff --git a/setup.py b/setup.py index e80b3c5f4..dd567034c 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ 'fabio>=0.11', 'fast-histogram', 'h5py<3.12', # Currently, h5py 3.12 on Windows fails to import. - # We can remove this version pin when that is fixed. + # We can remove this version pin when that is fixed. 'hdf5plugin', 'lmfit', 'matplotlib', @@ -49,9 +49,10 @@ else: compiler_flags = [] + # Extension for convolution from astropy def get_convolution_extensions(): - c_convolve_pkgdir = Path('hexrd') / 'convolution' + c_convolve_pkgdir = Path('hexrd') / 'core/convolution' src_files = [str(c_convolve_pkgdir / 'src/convolve.c')] @@ -59,15 +60,16 @@ def get_convolution_extensions(): # Add '-Rpass-missed=.*' to ``extra_compile_args`` when compiling with # clang to report missed optimizations _convolve_ext = Extension( - name='hexrd.convolution._convolve', + name='hexrd.core.convolution._convolve', sources=src_files, extra_compile_args=extra_compile_args, include_dirs=[numpy.get_include()], - language='c' + language='c', ) return [_convolve_ext] + def get_include_path(library_name): env_var_hint = os.getenv(f"{library_name.upper()}_INCLUDE_DIR") if env_var_hint is not None and os.path.exists(env_var_hint): @@ -100,6 +102,7 @@ def get_include_path(library_name): # It should exist now return full_path + def get_pybind11_include_path(): # If we can import pybind11, use that include path try: @@ -112,8 +115,9 @@ def get_pybind11_include_path(): # Otherwise, we will download the source and include that return get_include_path('pybind11') + def get_cpp_extensions(): - cpp_transform_pkgdir = Path('hexrd') / 'transforms/cpp_sublibrary' + cpp_transform_pkgdir = Path('hexrd') / 'core/transforms/cpp_sublibrary' extra_compile_args = [ '-O3', @@ -134,7 +138,7 @@ def get_cpp_extensions(): ] transforms_ext = Extension( - name='hexrd.extensions.transforms', + name='hexrd.core.extensions.transforms', sources=[str(cpp_transform_pkgdir / 'src/transforms.cpp')], extra_compile_args=extra_compile_args, include_dirs=include_dirs, @@ -142,7 +146,7 @@ def get_cpp_extensions(): ) inverse_distortion_ext = Extension( - name='hexrd.extensions.inverse_distortion', + name='hexrd.core.extensions.inverse_distortion', sources=[str(cpp_transform_pkgdir / 'src/inverse_distortion.cpp')], extra_compile_args=extra_compile_args, include_dirs=include_dirs, @@ -151,12 +155,13 @@ def get_cpp_extensions(): return [transforms_ext, inverse_distortion_ext] + def get_old_xfcapi_extension_modules(): # for transforms srclist = ['transforms_CAPI.c', 'transforms_CFUNC.c'] - srclist = [os.path.join('hexrd/transforms', f) for f in srclist] + srclist = [os.path.join('hexrd/core/transforms', f) for f in srclist] transforms_mod = Extension( - 'hexrd.extensions._transforms_CAPI', + 'hexrd.core.extensions._transforms_CAPI', sources=srclist, include_dirs=[np_include_dir], extra_compile_args=compiler_flags, @@ -164,16 +169,18 @@ def get_old_xfcapi_extension_modules(): return [transforms_mod] + def get_new_xfcapi_extension_modules(): transforms_mod = Extension( - 'hexrd.extensions._new_transforms_capi', - sources=['hexrd/transforms/new_capi/module.c'], + 'hexrd.core.extensions._new_transforms_capi', + sources=['hexrd/core/transforms/new_capi/module.c'], include_dirs=[np_include_dir], extra_compile_args=compiler_flags, ) return [transforms_mod] + def get_extension_modules(): # Flatten the lists return [ @@ -187,10 +194,11 @@ def get_extension_modules(): for item in sublist ] + ext_modules = get_extension_modules() # use entry_points, not scripts: -entry_points = {'console_scripts': ["hexrd = hexrd.cli.main:main"]} +entry_points = {'console_scripts': ["hexrd = hexrd.hedm.cli.main:main"]} setup( name='hexrd', @@ -217,7 +225,7 @@ def get_extension_modules(): ext_modules=ext_modules, packages=find_packages(), include_package_data=True, - package_data={'': ['Anomalous.h5']}, + package_data={'': ['Anomalous.h5', 'file_table.tsv']}, python_requires='>=3.9', install_requires=install_reqs, ) diff --git a/tests/calibration/test_2xrs_calibration.py b/tests/calibration/test_2xrs_calibration.py index b9e4ba852..431a4c260 100644 --- a/tests/calibration/test_2xrs_calibration.py +++ b/tests/calibration/test_2xrs_calibration.py @@ -4,10 +4,10 @@ import pytest -from hexrd.material.material import load_materials_hdf5 -from hexrd.instrument.hedm_instrument import HEDMInstrument +from hexrd.core.material.material import load_materials_hdf5 +from hexrd.core.instrument.hedm_instrument import HEDMInstrument -from hexrd.fitting.calibration import ( +from hexrd.core.fitting.calibration import ( InstrumentCalibrator, PowderCalibrator, ) @@ -28,7 +28,8 @@ def test_2xrs_calibration(tardis_2xrs_examples_dir, test_data_dir): # Load the picks with open( - tardis_2xrs_examples_dir / 'tardis_2xrs_example.yml', 'r', + tardis_2xrs_examples_dir / 'tardis_2xrs_example.yml', + 'r', encoding='utf-8', ) as rf: conf = yaml.safe_load(rf) diff --git a/tests/calibration/test_calibration.py b/tests/calibration/test_calibration.py index cae0cf81b..ee2fee6da 100644 --- a/tests/calibration/test_calibration.py +++ b/tests/calibration/test_calibration.py @@ -6,10 +6,10 @@ import pytest -from hexrd.material.material import load_materials_hdf5 -from hexrd.instrument.hedm_instrument import HEDMInstrument +from hexrd.core.material.material import load_materials_hdf5 +from hexrd.core.instrument.hedm_instrument import HEDMInstrument -from hexrd.fitting.calibration import ( +from hexrd.core.fitting.calibration import ( InstrumentCalibrator, LaueCalibrator, PowderCalibrator, @@ -113,8 +113,12 @@ def test_calibration(calibration_dir, test_data_dir): ] tvecs = { - 'old': [np.array([x0[k] for k in vec_names]) for vec_names in tvec_names], - 'new': [np.array([x1[k] for k in vec_names]) for vec_names in tvec_names], + 'old': [ + np.array([x0[k] for k in vec_names]) for vec_names in tvec_names + ], + 'new': [ + np.array([x1[k] for k in vec_names]) for vec_names in tvec_names + ], } grain_param_names = [f'LiF_grain_param_{n}' for n in range(12)] @@ -137,9 +141,7 @@ def test_calibration(calibration_dir, test_data_dir): ) -def assert_errors_are_better( - tvecs, grain_params, diamond_a_vals, expected -): +def assert_errors_are_better(tvecs, grain_params, diamond_a_vals, expected): """ Make sure error has decreased during fitting """ diff --git a/tests/calibration/test_group_relative_constraints.py b/tests/calibration/test_group_relative_constraints.py index d6ed60a51..a238ca228 100644 --- a/tests/calibration/test_group_relative_constraints.py +++ b/tests/calibration/test_group_relative_constraints.py @@ -49,9 +49,7 @@ def ceria_example_data(ceria_examples_path: Path) -> dict[str, np.ndarray]: @pytest.fixture def dexelas_composite_instrument(ceria_examples_path: Path) -> HEDMInstrument: - instr_path = ( - ceria_examples_path / 'dexelas.yml' - ) + instr_path = ceria_examples_path / 'dexelas.yml' with open(instr_path, 'r') as rf: config = yaml.safe_load(rf) diff --git a/tests/calibration/test_hedm_calibration.py b/tests/calibration/test_hedm_calibration.py index 1d14f13f4..b14bc0852 100644 --- a/tests/calibration/test_hedm_calibration.py +++ b/tests/calibration/test_hedm_calibration.py @@ -41,10 +41,12 @@ def pull_spots_picks(calibration_dir): path = calibration_dir picks = [] for i in range(3): - picks.append({ - 'pick_xys': np.load(path / f'grain{i + 1}_picks.npz'), - 'hkls': np.load(path / f'grain{i + 1}_pick_hkls.npz'), - }) + picks.append( + { + 'pick_xys': np.load(path / f'grain{i + 1}_picks.npz'), + 'hkls': np.load(path / f'grain{i + 1}_pick_hkls.npz'), + } + ) return picks @@ -53,8 +55,9 @@ def grain_params(calibration_dir): return np.load(calibration_dir / 'grain_params.npy') -def test_calibration(dexelas_instrument, ruby_material, pull_spots_picks, - grain_params): +def test_calibration( + dexelas_instrument, ruby_material, pull_spots_picks, grain_params +): instr = dexelas_instrument diff --git a/tests/calibration/test_instrument_relative_constraints.py b/tests/calibration/test_instrument_relative_constraints.py index 208551100..c4affad38 100644 --- a/tests/calibration/test_instrument_relative_constraints.py +++ b/tests/calibration/test_instrument_relative_constraints.py @@ -5,19 +5,19 @@ import numpy as np import pytest -from hexrd import imageseries -from hexrd.fitting.calibration import ( +from hexrd.core import imageseries +from hexrd.core.fitting.calibration import ( InstrumentCalibrator, PowderCalibrator, ) -from hexrd.fitting.calibration.relative_constraints import ( +from hexrd.core.fitting.calibration.relative_constraints import ( RelativeConstraintsType, ) -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.instrument import HEDMInstrument -from hexrd.material import load_materials_hdf5, Material -from hexrd.rotations import rotMatOfExpMap -from hexrd.utils.hdf5 import unwrap_h5_to_dict +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.instrument import HEDMInstrument +from hexrd.core.material import load_materials_hdf5, Material +from hexrd.core.rotations import rotMatOfExpMap +from hexrd.core.utils.hdf5 import unwrap_h5_to_dict @pytest.fixture diff --git a/tests/calibration/test_laue_auto_pick.py b/tests/calibration/test_laue_auto_pick.py index 875d7da5a..0f51279d5 100644 --- a/tests/calibration/test_laue_auto_pick.py +++ b/tests/calibration/test_laue_auto_pick.py @@ -5,10 +5,9 @@ import pytest -from hexrd.fitting.calibration import LaueCalibrator -from hexrd.material.material import load_materials_hdf5, Material -from hexrd.instrument.hedm_instrument import HEDMInstrument -from collections import defaultdict +from hexrd.core.fitting.calibration import LaueCalibrator +from hexrd.core.material import load_materials_hdf5, Material +from hexrd.core.instrument.hedm_instrument import HEDMInstrument @pytest.fixture diff --git a/tests/calibration/test_powder_auto_pick.py b/tests/calibration/test_powder_auto_pick.py index 64058c4a6..d3a75015b 100644 --- a/tests/calibration/test_powder_auto_pick.py +++ b/tests/calibration/test_powder_auto_pick.py @@ -6,10 +6,10 @@ import pytest from hexrd import imageseries -from hexrd.fitting.calibration import PowderCalibrator -from hexrd.material.material import load_materials_hdf5, Material -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.instrument.hedm_instrument import HEDMInstrument +from hexrd.core.fitting.calibration import PowderCalibrator +from hexrd.core.material.material import load_materials_hdf5, Material +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.instrument.hedm_instrument import HEDMInstrument @pytest.fixture @@ -24,9 +24,7 @@ def ceria_examples_path(eiger_examples_path: Path) -> Path: @pytest.fixture def eiger_instrument(ceria_examples_path: Path) -> HEDMInstrument: - instr_path = ( - ceria_examples_path / 'eiger_ceria_calibrated_composite.hexrd' - ) + instr_path = ceria_examples_path / 'eiger_ceria_calibrated_composite.hexrd' with h5py.File(instr_path, 'r') as rf: return HEDMInstrument(rf) @@ -92,7 +90,7 @@ def hkl_idx(hkl: tuple | list) -> int | None: ) calibrator.autopick_points( - fit_tth_tol=1., + fit_tth_tol=1.0, int_cutoff=1e-4, ) diff --git a/tests/common.py b/tests/common.py index a56163ca9..0da121375 100644 --- a/tests/common.py +++ b/tests/common.py @@ -2,7 +2,7 @@ import numpy as np -import hexrd.constants as ct +import hexrd.core.constants as ct def convert_axis_angle_to_rmat(axis, angle): diff --git a/tests/config/common.py b/tests/config/common.py index de8b25878..1431c3fdd 100644 --- a/tests/config/common.py +++ b/tests/config/common.py @@ -4,7 +4,7 @@ import logging import unittest -from hexrd import config +from hexrd.hedm import config test_data = { @@ -15,7 +15,7 @@ 'file_stem': 'test_%%05d.dat', 'tempdir': tempfile.gettempdir(), 'pathsep': os.path.sep, - } +} class TestConfig(unittest.TestCase): @@ -37,10 +37,8 @@ def tearDownClass(cls): def setUp(self): self.cfgs = config.open(self.file_name) - def tearDown(self): - del(self.cfgs) - + del self.cfgs @classmethod def get_reference_data(cls): diff --git a/tests/config/test_find_orientations.py b/tests/config/test_find_orientations.py index adf0cfaa1..a6c1b4aab 100644 --- a/tests/config/test_find_orientations.py +++ b/tests/config/test_find_orientations.py @@ -6,8 +6,8 @@ from .common import TestConfig, test_data -reference_data = \ -""" +reference_data = ( + """ analysis_name: analysis working_dir: %(tempdir)s material: @@ -63,189 +63,136 @@ find_orientations: orientation_maps: file: null -""" % test_data - +""" + % test_data +) class TestFindOrientationsConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_gvecs(self): self.assertFalse( self.cfgs[0].find_orientations.extract_measured_g_vectors - ) + ) self.assertTrue( self.cfgs[1].find_orientations.extract_measured_g_vectors - ) + ) self.assertTrue( self.cfgs[2].find_orientations.extract_measured_g_vectors - ) - + ) def test_threshold(self): - self.assertEqual( - self.cfgs[0].find_orientations.threshold, - 1 - ) - self.assertEqual( - self.cfgs[1].find_orientations.threshold, - 5 - ) - self.assertEqual( - self.cfgs[2].find_orientations.threshold, - 5 - ) - + self.assertEqual(self.cfgs[0].find_orientations.threshold, 1) + self.assertEqual(self.cfgs[1].find_orientations.threshold, 5) + self.assertEqual(self.cfgs[2].find_orientations.threshold, 5) def test_use_quaternion_grid(self): self.assertEqual( - self.cfgs[0].find_orientations.use_quaternion_grid, - None - ) + self.cfgs[0].find_orientations.use_quaternion_grid, None + ) self.assertRaises( IOError, - getattr, self.cfgs[1].find_orientations, 'use_quaternion_grid' - ) + getattr, + self.cfgs[1].find_orientations, + 'use_quaternion_grid', + ) self.assertEqual( self.cfgs[2].find_orientations.use_quaternion_grid, - test_data['existing_file'] - ) - + test_data['existing_file'], + ) class TestClusteringConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_algorithm(self): self.assertEqual( - self.cfgs[0].find_orientations.clustering.algorithm, - 'dbscan' - ) + self.cfgs[0].find_orientations.clustering.algorithm, 'dbscan' + ) self.assertEqual( - self.cfgs[1].find_orientations.clustering.algorithm, - 'sph-dbscan' - ) + self.cfgs[1].find_orientations.clustering.algorithm, 'sph-dbscan' + ) self.assertEqual( - self.cfgs[2].find_orientations.clustering.algorithm, - 'fclusterdata' - ) + self.cfgs[2].find_orientations.clustering.algorithm, 'fclusterdata' + ) self.assertRaises( RuntimeError, - getattr, self.cfgs[3].find_orientations.clustering, 'algorithm', - ) - + getattr, + self.cfgs[3].find_orientations.clustering, + 'algorithm', + ) def test_completeness(self): self.assertRaises( RuntimeError, - getattr, self.cfgs[0].find_orientations.clustering, 'completeness', - ) + getattr, + self.cfgs[0].find_orientations.clustering, + 'completeness', + ) self.assertEqual( - self.cfgs[1].find_orientations.clustering.completeness, - 0.35 - ) - + self.cfgs[1].find_orientations.clustering.completeness, 0.35 + ) def test_radius(self): self.assertRaises( RuntimeError, - getattr, self.cfgs[0].find_orientations.clustering, 'radius', - ) - self.assertEqual( - self.cfgs[1].find_orientations.clustering.radius, - 1.0 - ) - - + getattr, + self.cfgs[0].find_orientations.clustering, + 'radius', + ) + self.assertEqual(self.cfgs[1].find_orientations.clustering.radius, 1.0) class TestOmegaConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_period(self): self.assertEqual( - self.cfgs[0].find_orientations.omega.period, - [-180, 180] - ) - self.assertEqual( - self.cfgs[1].find_orientations.omega.period, - [0, 360] - ) + self.cfgs[0].find_orientations.omega.period, [-180, 180] + ) + self.assertEqual(self.cfgs[1].find_orientations.omega.period, [0, 360]) ## Do we allow ranges going backwards? - #self.assertEqual( + # self.assertEqual( # self.cfgs[2].find_orientations.omega.period, # [0, -360] # ) self.assertRaises( RuntimeError, - getattr, self.cfgs[3].find_orientations.omega, 'period' - ) - + getattr, + self.cfgs[3].find_orientations.omega, + 'period', + ) def test_tolerance(self): - self.assertEqual( - self.cfgs[0].find_orientations.omega.tolerance, - 0.5 - ) - self.assertEqual( - self.cfgs[1].find_orientations.omega.tolerance, - 1.0 - ) - self.assertEqual( - self.cfgs[2].find_orientations.omega.tolerance, - 3.0 - ) - + self.assertEqual(self.cfgs[0].find_orientations.omega.tolerance, 0.5) + self.assertEqual(self.cfgs[1].find_orientations.omega.tolerance, 1.0) + self.assertEqual(self.cfgs[2].find_orientations.omega.tolerance, 3.0) class TestEtaConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_tolerance(self): - self.assertEqual( - self.cfgs[0].find_orientations.eta.tolerance, - 0.5 - ) - self.assertEqual( - self.cfgs[1].find_orientations.eta.tolerance, - 2.0 - ) - + self.assertEqual(self.cfgs[0].find_orientations.eta.tolerance, 0.5) + self.assertEqual(self.cfgs[1].find_orientations.eta.tolerance, 2.0) def test_mask(self): - self.assertEqual( - self.cfgs[0].find_orientations.eta.mask, - 5 - ) - self.assertEqual( - self.cfgs[1].find_orientations.eta.mask, - 10 - ) - self.assertEqual( - self.cfgs[2].find_orientations.eta.mask, - 10 - ) - + self.assertEqual(self.cfgs[0].find_orientations.eta.mask, 5) + self.assertEqual(self.cfgs[1].find_orientations.eta.mask, 10) + self.assertEqual(self.cfgs[2].find_orientations.eta.mask, 10) def test_range(self): @@ -258,100 +205,83 @@ def test_range(self): class TestSeedSearchConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_hkl_seeds(self): self.assertRaises( RuntimeError, - getattr, self.cfgs[0].find_orientations.seed_search, 'hkl_seeds' - ) + getattr, + self.cfgs[0].find_orientations.seed_search, + 'hkl_seeds', + ) self.assertEqual( - self.cfgs[2].find_orientations.seed_search.hkl_seeds, - [1] - ) + self.cfgs[2].find_orientations.seed_search.hkl_seeds, [1] + ) self.assertEqual( - self.cfgs[3].find_orientations.seed_search.hkl_seeds, - [1, 2] - ) - + self.cfgs[3].find_orientations.seed_search.hkl_seeds, [1, 2] + ) def test_fiber_step(self): self.assertEqual( - self.cfgs[0].find_orientations.seed_search.fiber_step, - 0.5 - ) + self.cfgs[0].find_orientations.seed_search.fiber_step, 0.5 + ) self.assertEqual( - self.cfgs[1].find_orientations.seed_search.fiber_step, - 1.0 - ) + self.cfgs[1].find_orientations.seed_search.fiber_step, 1.0 + ) self.assertEqual( - self.cfgs[2].find_orientations.seed_search.fiber_step, - 2.0 - ) - + self.cfgs[2].find_orientations.seed_search.fiber_step, 2.0 + ) class TestOrientationMapsConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_active_hkls(self): self.assertEqual( - self.cfgs[0].find_orientations.orientation_maps.active_hkls, - None - ) + self.cfgs[0].find_orientations.orientation_maps.active_hkls, None + ) self.assertEqual( - self.cfgs[1].find_orientations.orientation_maps.active_hkls, - [1] - ) + self.cfgs[1].find_orientations.orientation_maps.active_hkls, [1] + ) self.assertEqual( - self.cfgs[2].find_orientations.orientation_maps.active_hkls, - [1, 2] - ) - + self.cfgs[2].find_orientations.orientation_maps.active_hkls, [1, 2] + ) def test_bin_frames(self): self.assertEqual( - self.cfgs[0].find_orientations.orientation_maps.bin_frames, - 1 - ) + self.cfgs[0].find_orientations.orientation_maps.bin_frames, 1 + ) self.assertEqual( - self.cfgs[1].find_orientations.orientation_maps.bin_frames, - 2 - ) - + self.cfgs[1].find_orientations.orientation_maps.bin_frames, 2 + ) def test_file(self): self.assertEqual( self.cfgs[0].find_orientations.orientation_maps.file, - Path(test_data['tempdir']) / "analysis_actmat_eta-ome_maps.npz" + Path(test_data['tempdir']) / "analysis_actmat_eta-ome_maps.npz", ) self.assertEqual( self.cfgs[1].find_orientations.orientation_maps.file, - Path(test_data['tempdir']) / test_data['nonexistent_file'] - ) + Path(test_data['tempdir']) / test_data['nonexistent_file'], + ) self.assertEqual( str(self.cfgs[2].find_orientations.orientation_maps.file), - test_data['existing_file'] - ) + test_data['existing_file'], + ) def test_threshold(self): self.assertRaises( RuntimeError, getattr, self.cfgs[0].find_orientations.orientation_maps, - 'threshold' - ) + 'threshold', + ) self.assertEqual( - self.cfgs[1].find_orientations.orientation_maps.threshold, - 100 - ) + self.cfgs[1].find_orientations.orientation_maps.threshold, 100 + ) diff --git a/tests/config/test_fit_grains.py b/tests/config/test_fit_grains.py index 4d95a3cbf..ae3394c0b 100644 --- a/tests/config/test_fit_grains.py +++ b/tests/config/test_fit_grains.py @@ -3,8 +3,8 @@ from .common import TestConfig, test_data -reference_data = \ -""" +reference_data = ( + """ analysis_name: analysis --- fit_grains: @@ -48,12 +48,13 @@ tthmax: 1.2 sfacmax: 1.3 pintmax: 1.4 -""" % test_data +""" + % test_data +) class TestFitGrainsConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data @@ -65,14 +66,10 @@ def test_do_fit(self): def test_estimate(self): self.assertEqual(self.cfgs[0].fit_grains.estimate, None) # nonexistent file needs to return None + self.assertEqual(self.cfgs[1].fit_grains.estimate, None) self.assertEqual( - self.cfgs[1].fit_grains.estimate, - None - ) - self.assertEqual( - self.cfgs[2].fit_grains.estimate, - test_data['existing_file'] - ) + self.cfgs[2].fit_grains.estimate, test_data['existing_file'] + ) def test_npdiv(self): self.assertEqual(self.cfgs[0].fit_grains.npdiv, 2) @@ -80,9 +77,8 @@ def test_npdiv(self): def test_threshold(self): self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].fit_grains, 'threshold' - ) + RuntimeError, getattr, self.cfgs[0].fit_grains, 'threshold' + ) self.assertEqual(self.cfgs[1].fit_grains.threshold, 1850) def test_tth_max(self): @@ -90,9 +86,8 @@ def test_tth_max(self): self.assertFalse(self.cfgs[1].fit_grains.tth_max) self.assertEqual(self.cfgs[2].fit_grains.tth_max, 15) self.assertRaises( - RuntimeError, - getattr, self.cfgs[3].fit_grains, 'tth_max' - ) + RuntimeError, getattr, self.cfgs[3].fit_grains, 'tth_max' + ) class TestToleranceConfig(TestConfig): @@ -103,45 +98,24 @@ def get_reference_data(cls): def test_eta(self): self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].fit_grains.tolerance, 'eta' - ) - self.assertEqual( - self.cfgs[1].fit_grains.tolerance.eta, - [1, 1] - ) - self.assertEqual( - self.cfgs[2].fit_grains.tolerance.eta, - [1, 2] - ) + RuntimeError, getattr, self.cfgs[0].fit_grains.tolerance, 'eta' + ) + self.assertEqual(self.cfgs[1].fit_grains.tolerance.eta, [1, 1]) + self.assertEqual(self.cfgs[2].fit_grains.tolerance.eta, [1, 2]) def test_omega(self): self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].fit_grains.tolerance, 'omega' - ) - self.assertEqual( - self.cfgs[1].fit_grains.tolerance.omega, - [2, 2] - ) - self.assertEqual( - self.cfgs[2].fit_grains.tolerance.omega, - [3, 4] - ) + RuntimeError, getattr, self.cfgs[0].fit_grains.tolerance, 'omega' + ) + self.assertEqual(self.cfgs[1].fit_grains.tolerance.omega, [2, 2]) + self.assertEqual(self.cfgs[2].fit_grains.tolerance.omega, [3, 4]) def test_tth(self): self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].fit_grains.tolerance, 'tth' - ) - self.assertEqual( - self.cfgs[1].fit_grains.tolerance.tth, - [3, 3] - ) - self.assertEqual( - self.cfgs[2].fit_grains.tolerance.tth, - [5, 6] - ) + RuntimeError, getattr, self.cfgs[0].fit_grains.tolerance, 'tth' + ) + self.assertEqual(self.cfgs[1].fit_grains.tolerance.tth, [3, 3]) + self.assertEqual(self.cfgs[2].fit_grains.tolerance.tth, [5, 6]) class TestExclusions(TestConfig): diff --git a/tests/config/test_image_series.py b/tests/config/test_image_series.py index ece0e333b..5a9d7c683 100644 --- a/tests/config/test_image_series.py +++ b/tests/config/test_image_series.py @@ -3,8 +3,8 @@ from .common import TestConfig, test_data -reference_data = \ -""" +reference_data = ( + """ image_series: format: array data: @@ -12,23 +12,20 @@ args: a1 - filename: f2 args: a2 -""" % test_data +""" + % test_data +) class TestImageSeries(TestConfig): - @classmethod def get_reference_data(cls): return reference_data - def test_format(self): - self.assertEqual( - 'array', - self.cfgs[0].get('image_series:format') - ) + self.assertEqual('array', self.cfgs[0].get('image_series:format')) def test_data(self): diff --git a/tests/config/test_instrument.py b/tests/config/test_instrument.py index 6bd4d0072..3afcf906c 100644 --- a/tests/config/test_instrument.py +++ b/tests/config/test_instrument.py @@ -1,22 +1,26 @@ import os -import hexrd.instrument +import hexrd.core.instrument from .common import TestConfig, test_data + try: - from hexrd.config.instrument import (Instrument, Beam, OscillationStage, - Detector) + from hexrd.hedm.config.instrument import ( + Instrument, + Beam, + OscillationStage, + Detector, + ) except: pass import pytest pytest.skip( - "This module needs updating--skipping for now", - allow_module_level=True + "This module needs updating--skipping for now", allow_module_level=True ) -reference_data = \ -""" +reference_data = ( + """ beam: {} --- beam: @@ -56,7 +60,9 @@ tilt_angles: [0.00044459111576242654, 0.003958638944891969, -0.47488346109306645] --- instrument: instrument.yaml -""" % test_data +""" + % test_data +) class TestInstrument(TestConfig): @@ -68,30 +74,44 @@ def get_reference_data(cls): def test_beam(self): icfg = Instrument(self.cfgs[1]) b = icfg.beam - self.assertTrue(isinstance(b, hexrd.instrument.beam.Beam), "Failed to produce a Beam instance") + self.assertTrue( + isinstance(b, hexrd.core.instrument.beam.Beam), + "Failed to produce a Beam instance", + ) def test_oscillation_stage(self): icfg = Instrument(self.cfgs[2]) ostage = icfg.oscillation_stage - self.assertTrue(isinstance(ostage, hexrd.instrument.oscillation_stage.OscillationStage), - "Failed to produce an OscillationStage instance") + self.assertTrue( + isinstance( + ostage, + hexrd.core.instrument.oscillation_stage.OscillationStage, + ), + "Failed to produce an OscillationStage instance", + ) def test_detector(self): cfg = self.cfgs[3] icfg = Detector(cfg, 'GE1') det = icfg.detector(Beam(cfg).beam) - self.assertTrue(isinstance(det, hexrd.instrument.PlanarDetector), - "Failed to produce an Detector instance") + self.assertTrue( + isinstance(det, hexrd.core.instrument.PlanarDetector), + "Failed to produce an Detector instance", + ) def test_detector_dict(self): icfg = Instrument(self.cfgs[3]) dd = icfg.detector_dict - self.assertTrue(isinstance(dd, dict), - "Failed to produce an Detector Dictionary instance") + self.assertTrue( + isinstance(dd, dict), + "Failed to produce an Detector Dictionary instance", + ) for k in dd: d = dd[k] - self.assertTrue(isinstance(d, hexrd.instrument.PlanarDetector), - "Detector dictionary values are not detector instances") + self.assertTrue( + isinstance(d, hexrd.core.instrument.PlanarDetector), + "Detector dictionary values are not detector instances", + ) class TestBeam(TestConfig): @@ -103,7 +123,9 @@ def get_reference_data(cls): def test_beam_energy_dflt(self): bcfg = Beam(self.cfgs[0]) energy = bcfg._energy - self.assertEqual(energy, Beam.beam_energy_DFLT, "Incorrect default beam energy") + self.assertEqual( + energy, Beam.beam_energy_DFLT, "Incorrect default beam energy" + ) def test_beam_energy(self): bcfg = Beam(self.cfgs[1]) @@ -136,25 +158,37 @@ def get_reference_data(cls): def test_chi_dflt(self): oscfg = OscillationStage(self.cfgs[0]) - self.assertEqual(oscfg._chi, OscillationStage.chi_DFLT, "Incorrect default chi for oscillation stage") + self.assertEqual( + oscfg._chi, + OscillationStage.chi_DFLT, + "Incorrect default chi for oscillation stage", + ) def test_chi(self): oscfg = OscillationStage(self.cfgs[2]) - self.assertEqual(oscfg._chi, 0.05, "Incorrect default chi for oscillation stage") + self.assertEqual( + oscfg._chi, 0.05, "Incorrect default chi for oscillation stage" + ) def test_tvec_dflt(self): oscfg = OscillationStage(self.cfgs[0]) tvec_dflt = OscillationStage.tvec_DFLT tvec = oscfg._tvec - self.assertEqual(tvec[0], tvec_dflt[0], "Incorrect default translation vector") - self.assertEqual(tvec[1], tvec_dflt[1], "Incorrect default translation vector") - self.assertEqual(tvec[2], tvec_dflt[2], "Incorrect default translation vector") + self.assertEqual( + tvec[0], tvec_dflt[0], "Incorrect default translation vector" + ) + self.assertEqual( + tvec[1], tvec_dflt[1], "Incorrect default translation vector" + ) + self.assertEqual( + tvec[2], tvec_dflt[2], "Incorrect default translation vector" + ) def test_tvec(self): oscfg = OscillationStage(self.cfgs[2]) tvec = oscfg._tvec - self.assertEqual(tvec[0], 1., "Incorrect translation vector") - self.assertEqual(tvec[1], 2., "Incorrect translation vector") - self.assertEqual(tvec[2], 3., "Incorrect translation vector") + self.assertEqual(tvec[0], 1.0, "Incorrect translation vector") + self.assertEqual(tvec[1], 2.0, "Incorrect translation vector") + self.assertEqual(tvec[2], 3.0, "Incorrect translation vector") diff --git a/tests/config/test_material.py b/tests/config/test_material.py index b3d6e77bd..c425e9f58 100644 --- a/tests/config/test_material.py +++ b/tests/config/test_material.py @@ -1,10 +1,9 @@ from .common import TestConfig, test_data -from hexrd.config.material import TTHW_DFLT, DMIN_DFLT -from hexrd.config.utils import get_exclusion_parameters +from hexrd.core.config.material import TTHW_DFLT, DMIN_DFLT +from hexrd.core.config.utils import get_exclusion_parameters -reference_data = \ -""" +reference_data = """ material: definitions: %(existing_file)s # active: # not set to test error @@ -48,28 +47,22 @@ class TestMaterialConfig(TestConfig): - @classmethod def get_reference_data(cls): return reference_data % test_data - def test_definitions(self): self.assertEqual( - self.cfgs[0].material.definitions, - test_data['existing_file'] - ) + self.cfgs[0].material.definitions, test_data['existing_file'] + ) self.assertRaises( - IOError, - getattr, self.cfgs[1].material, 'definitions' - ) - + IOError, getattr, self.cfgs[1].material, 'definitions' + ) def test_active(self): self.assertRaises( - RuntimeError, - getattr, self.cfgs[0].material, 'active' - ) + RuntimeError, getattr, self.cfgs[0].material, 'active' + ) self.assertEqual(self.cfgs[1].material.active, 'ruby') self.assertEqual(self.cfgs[2].material.active, 'CeO2') diff --git a/tests/config/test_root.py b/tests/config/test_root.py index db829dde1..3e73887dc 100644 --- a/tests/config/test_root.py +++ b/tests/config/test_root.py @@ -4,11 +4,11 @@ from unittest import skipIf from .common import TestConfig, test_data -from hexrd import config +from hexrd.hedm import config -reference_data = \ -""" +reference_data = ( + """ analysis_name: analysis #working_dir: # not set to test defaulting to cwd --- @@ -29,7 +29,9 @@ multiprocessing: -1000 --- multiprocessing: foo -""" % test_data +""" + % test_data +) class TestRootConfig(TestConfig): @@ -41,8 +43,8 @@ def get_reference_data(cls): def test_analysis_dir(self): self.assertEqual( str(self.cfgs[0].analysis_dir), - os.path.join(os.getcwd(), 'analysis') - ) + os.path.join(os.getcwd(), 'analysis'), + ) def test_analysis_name(self): self.assertEqual(self.cfgs[0].analysis_name, 'analysis') @@ -66,9 +68,12 @@ def test_working_dir(self): str(self.cfgs[7].working_dir), test_data['existing_path'] ) self.assertRaises( - IOError, setattr, self.cfgs[7], 'working_dir', - test_data['nonexistent_path'] - ) + IOError, + setattr, + self.cfgs[7], + 'working_dir', + test_data['nonexistent_path'], + ) @skipIf(mp.cpu_count() < 2, 'test requires at least two cores') def test_multiprocessing(self): @@ -76,11 +81,11 @@ def test_multiprocessing(self): self.assertEqual(self.cfgs[0].multiprocessing, ncpus - 1) self.assertEqual(self.cfgs[1].multiprocessing, ncpus - 1) self.assertEqual(self.cfgs[2].multiprocessing, ncpus) - self.assertEqual(self.cfgs[3].multiprocessing, ncpus//2) + self.assertEqual(self.cfgs[3].multiprocessing, ncpus // 2) self.assertEqual(self.cfgs[4].multiprocessing, 2) self.assertEqual(self.cfgs[5].multiprocessing, ncpus) self.assertEqual(self.cfgs[6].multiprocessing, 1) - self.assertEqual(self.cfgs[7].multiprocessing, ncpus-1) + self.assertEqual(self.cfgs[7].multiprocessing, ncpus - 1) self.cfgs[7].multiprocessing = 1 self.assertEqual(self.cfgs[7].multiprocessing, 1) self.cfgs[7].multiprocessing = 'all' @@ -89,11 +94,10 @@ def test_multiprocessing(self): self.assertEqual(self.cfgs[7].multiprocessing, 2) self.assertRaises( RuntimeError, setattr, self.cfgs[7], 'multiprocessing', 'foo' - ) + ) self.assertRaises( RuntimeError, setattr, self.cfgs[7], 'multiprocessing', -2 - ) - + ) class TestSingleConfig(TestConfig): diff --git a/tests/find_orientations_testing.py b/tests/find_orientations_testing.py index d8a0907f3..6e3042c7a 100755 --- a/tests/find_orientations_testing.py +++ b/tests/find_orientations_testing.py @@ -11,8 +11,10 @@ import numpy as np -from hexrd.material.crystallography import PlaneData -from hexrd.rotations import misorientation + +# TODO: Check that this test is still sensible after PlaneData change. +from hexrd.core.material.crystallography import PlaneData +from hexrd.core.rotations import misorientation # ============================================================================= @@ -65,12 +67,21 @@ def compare_quaternion_lists(new_quats, ref_quats, tol=0.05): + "is greater than threshold" ) + # ============================================================================= # ETA-OMEGA MAPS # ============================================================================= -EOMap = namedtuple('EOMap', - ['data', 'eta', 'eta_edges', 'omega', 'omega_edges', - 'hkl_indices', 'plane_data'] +EOMap = namedtuple( + 'EOMap', + [ + 'data', + 'eta', + 'eta_edges', + 'omega', + 'omega_edges', + 'hkl_indices', + 'plane_data', + ], ) _keys = [ @@ -81,7 +92,7 @@ def compare_quaternion_lists(new_quats, ref_quats, tol=0.05): 'omegas', 'omeEdges', 'planeData_args', - 'planeData_hkls' + 'planeData_hkls', ] @@ -95,7 +106,7 @@ def load(npz): e['omegas'], e['omeEdges'], e['iHKLList'], - plane_data(e) + plane_data(e), ) @@ -119,103 +130,110 @@ def plane_data(e): class Comparison: - def __init__(self, e1, e2): - self.e1 = e1 - self.e2 = e2 - self.tol = 1.0e-6 - - def compare(self): - """Compare whether maps are same or not""" - same = self.eta()[0] and self.omega()[0] and self.data()[0] - return same - - def eta(self): - """compare etas""" - eta1 = self.e1.eta - eta2 = self.e2.eta - l1, l2 = len(eta1), len(eta2) - if l1 != l2: - msg = "eta: lengths differ: %d and %d" % (l1, l2) + def __init__(self, e1, e2): + self.e1 = e1 + self.e2 = e2 + self.tol = 1.0e-6 + + def compare(self): + """Compare whether maps are same or not""" + same = self.eta()[0] and self.omega()[0] and self.data()[0] + return same + + def eta(self): + """compare etas""" + eta1 = self.e1.eta + eta2 = self.e2.eta + l1, l2 = len(eta1), len(eta2) + if l1 != l2: + msg = "eta: lengths differ: %d and %d" % (l1, l2) + logging.info(msg) + return False, msg + + nrmdiff = np.linalg.norm(eta1 - eta2) + if nrmdiff < self.tol: + return True, "eta: same" + else: + msg = "eta: norm of difference: %s" % nrmdiff + logging.info(msg) + return False, msg + + def omega(self): + """compare omegas""" + omega1 = self.e1.omega + omega2 = self.e2.omega + l1, l2 = len(omega1), len(omega2) + if l1 != l2: + msg = "omega: lengths differ: %d and %d" % (l1, l2) + logging.info(msg) + return False, msg + + nrmdiff = np.linalg.norm(omega1 - omega2) + if nrmdiff < self.tol: + return True, "omega: same" + else: + msg = "omega: norm of difference: %s" % nrmdiff + logging.info(msg) + return False, msg + + def hkl_indices(self): + hkl1, hkl2 = self.e1.hkl_indices, self.e2.hkl_indices + n1, n2 = len(hkl1), len(hkl2) + if n1 != n2: + return False, "hkl: lengths differ: %d and %d" % (n1, n2) + for i in range(n1): + if hkl1[i] != hkl2[i]: + return False, "hkl: indices not the same" + + return True, "hkl: same" + + def data(self): + d1, d2 = self.e1.data, self.e2.data + if d1.shape != d2.shape: + msg = "data shapes do not match: " % (d1.shape, d2.shape) + logging.info(msg) + return False, msg + + for ind in range(d1.shape[0]): + d1i, d2i = d1[ind], d2[ind] + nnan1 = np.count_nonzero(np.isnan(d1i)) + nnan2 = np.count_nonzero(np.isnan(d2i)) + # print("number nans: ", nnan1, nnan2) + if nnan1 > 0: + d1i = np.nan_to_num(d1i) + if nnan2 > 0: + d2i = np.nan_to_num(d1i) + + nnz1 = np.count_nonzero(d1i) + nnz2 = np.count_nonzero(d2i) + if nnz1 != nnz2: + msg = "data: map %d: number nonzero differ: %d, %d" % ( + ind, + nnz1, + nnz2, + ) logging.info(msg) return False, msg - nrmdiff = np.linalg.norm(eta1 - eta2) - if nrmdiff < self.tol: - return True, "eta: same" - else: - msg = "eta: norm of difference: %s" % nrmdiff + overlapping = d1i.astype(bool) | d2i.astype(bool) + nnz = np.count_nonzero(overlapping) + if nnz != nnz1: + msg = "data: map %d: overlaps differ: %d, %d" % ( + ind, + nnz1, + nnz, + ) logging.info(msg) return False, msg - def omega(self): - """compare omegas""" - omega1 = self.e1.omega - omega2 = self.e2.omega - l1, l2 = len(omega1), len(omega2) - if l1 != l2: - msg = "omega: lengths differ: %d and %d" % (l1, l2) - logging.info(msg) - return False, msg - - nrmdiff = np.linalg.norm(omega1 - omega2) - if nrmdiff < self.tol: - return True, "omega: same" + d1over = d1i[overlapping] + d2over = d2i[overlapping] + diff = np.linalg.norm(d1over - d2over) + if diff < self.tol: + return True, "data: same" else: - msg = "omega: norm of difference: %s" % nrmdiff - logging.info(msg) - return False, msg - - def hkl_indices(self): - hkl1, hkl2 = self.e1.hkl_indices, self.e2.hkl_indices - n1, n2 = len(hkl1), len(hkl2) - if n1 != n2: - return False, "hkl: lengths differ: %d and %d" % (n1, n2) - for i in range(n1): - if hkl1[i] != hkl2[i]: - return False, "hkl: indices not the same" - - return True, "hkl: same" - - def data(self): - d1, d2 = self.e1.data, self.e2.data - if d1.shape != d2.shape: - msg = "data shapes do not match: " % (d1.shape, d2.shape) + msg = "data: map %s: map values differ" % (ind) logging.info(msg) return False, msg - for ind in range(d1.shape[0]): - d1i, d2i = d1[ind], d2[ind] - nnan1 = np.count_nonzero(np.isnan(d1i)) - nnan2 = np.count_nonzero(np.isnan(d2i)) - # print("number nans: ", nnan1, nnan2) - if nnan1 > 0: - d1i = np.nan_to_num(d1i) - if nnan2 > 0: - d2i = np.nan_to_num(d1i) - - nnz1 = np.count_nonzero(d1i) - nnz2 = np.count_nonzero(d2i) - if nnz1 != nnz2: - msg = "data: map %d: number nonzero differ: %d, %d" % (ind, nnz1, nnz2) - logging.info(msg) - return False, msg - - overlapping = d1i.astype(bool) | d2i.astype(bool) - nnz = np.count_nonzero(overlapping) - if nnz != nnz1: - msg = "data: map %d: overlaps differ: %d, %d" % (ind, nnz1, nnz) - logging.info(msg) - return False, msg - - d1over = d1i[overlapping] - d2over = d2i[overlapping] - diff = np.linalg.norm(d1over - d2over) - if diff < self.tol: - return True, "data: same" - else: - msg = "data: map %s: map values differ" % (ind) - logging.info(msg) - return False, msg - - - return True, "data: same" + return True, "data: same" diff --git a/tests/fit_grains_check.py b/tests/fit_grains_check.py index 0a89d62ed..085a82db7 100755 --- a/tests/fit_grains_check.py +++ b/tests/fit_grains_check.py @@ -9,14 +9,15 @@ import numpy as np -from hexrd import config -from hexrd.fitgrains import fit_grains -from hexrd import matrixutil as mutil -from hexrd import rotations as rot +from hexrd.hedm import config +from hexrd.hedm.fitgrains import fit_grains +from hexrd.core import matrixutil as mutil +from hexrd.core import rotations as rot -def compare_grain_fits(fit_grain_params, ref_grain_params, - mtol=1.e-4, ctol=1.e-3, vtol=1.e-4): +def compare_grain_fits( + fit_grain_params, ref_grain_params, mtol=1.0e-4, ctol=1.0e-3, vtol=1.0e-4 +): """ Executes comparison between reference and fit grain parameters for ff-HEDM for the same initial parameters. @@ -47,11 +48,10 @@ def compare_grain_fits(fit_grain_params, ref_grain_params, ii = 0 for fg, rg in zip(fit_grain_params, ref_grain_params): # test_orientation - quats = rot.quatOfExpMap( - np.vstack([fg[:3], rg[:3]]).T + quats = rot.quatOfExpMap(np.vstack([fg[:3], rg[:3]]).T) + ang, mis = rot.misorientation( + quats[:, 0].reshape(4, 1), quats[:, 1].reshape(4, 1) ) - ang, mis = rot.misorientation(quats[:, 0].reshape(4, 1), - quats[:, 1].reshape(4, 1)) if ang <= mtol: cresult = True else: @@ -64,10 +64,12 @@ def compare_grain_fits(fit_grain_params, ref_grain_params, return False # test strain - vmat_fit = mutil.symmToVecMV(np.linalg.inv(mutil.vecMVToSymm(fg[6:])), - scale=False) - vmat_ref = mutil.symmToVecMV(np.linalg.inv(mutil.vecMVToSymm(rg[6:])), - scale=False) + vmat_fit = mutil.symmToVecMV( + np.linalg.inv(mutil.vecMVToSymm(fg[6:])), scale=False + ) + vmat_ref = mutil.symmToVecMV( + np.linalg.inv(mutil.vecMVToSymm(rg[6:])), scale=False + ) if np.linalg.norm(vmat_fit - vmat_ref, ord=1) > vtol: print("stretch components for grain %d do not agree." % ii) return False @@ -79,26 +81,33 @@ def compare_grain_fits(fit_grain_params, ref_grain_params, if __name__ == '__main__': parser = argparse.ArgumentParser( - description="Montage of spot data for a specifed G-vector family") + description="Montage of spot data for a specifed G-vector family" + ) - parser.add_argument('cfg_file', - help="yaml HEDM config filename", - type=str) - parser.add_argument('gt_ref', - help="reference grain table filename", - type=str) + parser.add_argument('cfg_file', help="yaml HEDM config filename", type=str) + parser.add_argument( + 'gt_ref', help="reference grain table filename", type=str + ) - parser.add_argument('-m', '--misorientation', - help="misorientation threshold", - type=float, default=1.e-4) + parser.add_argument( + '-m', + '--misorientation', + help="misorientation threshold", + type=float, + default=1.0e-4, + ) - parser.add_argument('-c', '--centroid', - help="centroid threshold", - type=float, default=1.e-3) + parser.add_argument( + '-c', + '--centroid', + help="centroid threshold", + type=float, + default=1.0e-3, + ) - parser.add_argument('-v', '--stretch', - help="stretch threshold", - type=float, default=1.e-4) + parser.add_argument( + '-v', '--stretch', help="stretch threshold", type=float, default=1.0e-4 + ) args = parser.parse_args() @@ -112,14 +121,19 @@ def compare_grain_fits(fit_grain_params, ref_grain_params, cfg = config.open(cfg_file)[0] grains_table = np.loadtxt(gt_ref, ndmin=2) ref_grain_params = grains_table[:, 3:15] - gresults = fit_grains(cfg, - grains_table, - show_progress=False, - ids_to_refine=None, - write_spots_files=False) + gresults = fit_grains( + cfg, + grains_table, + show_progress=False, + ids_to_refine=None, + write_spots_files=False, + ) cresult = compare_grain_fits( - np.vstack([i[-1] for i in gresults]), ref_grain_params, - mtol=mtol, ctol=ctol, vtol=vtol + np.vstack([i[-1] for i in gresults]), + ref_grain_params, + mtol=mtol, + ctol=ctol, + vtol=vtol, ) if cresult: print("test passed") diff --git a/tests/imageseries/common.py b/tests/imageseries/common.py index 1cd81d5a7..c5c56d05a 100644 --- a/tests/imageseries/common.py +++ b/tests/imageseries/common.py @@ -1,7 +1,7 @@ import numpy as np import unittest -from hexrd import imageseries +from hexrd.core import imageseries _NFXY = (3, 7, 5) @@ -13,31 +13,35 @@ class ImageSeriesTest(unittest.TestCase): # random array from randint # a = np.random.randint(20, size=(3, 5, 7)) random_array = np.array( - [[[2, 4, 5, 0, 14, 16, 17], - [18, 17, 5, 19, 2, 8, 17], - [0, 16, 10, 18, 13, 16, 9], - [2, 15, 13, 14, 12, 19, 9], - [0, 3, 4, 11, 8, 8, 3]], - - [[8, 17, 15, 0, 0, 5, 17], - [7, 4, 8, 17, 2, 5, 3], - [14, 1, 12, 4, 6, 19, 2], - [13, 7, 5, 6, 17, 17, 6], - [16, 4, 10, 3, 6, 0, 14]], - - [[17, 3, 8, 3, 15, 6, 18], - [13, 1, 3, 5, 9, 11, 15], - [1, 11, 15, 1, 19, 2, 0], - [5, 0, 12, 11, 12, 10, 11], - [6, 4, 16, 2, 16, 9, 18]]] + [ + [ + [2, 4, 5, 0, 14, 16, 17], + [18, 17, 5, 19, 2, 8, 17], + [0, 16, 10, 18, 13, 16, 9], + [2, 15, 13, 14, 12, 19, 9], + [0, 3, 4, 11, 8, 8, 3], + ], + [ + [8, 17, 15, 0, 0, 5, 17], + [7, 4, 8, 17, 2, 5, 3], + [14, 1, 12, 4, 6, 19, 2], + [13, 7, 5, 6, 17, 17, 6], + [16, 4, 10, 3, 6, 0, 14], + ], + [ + [17, 3, 8, 3, 15, 6, 18], + [13, 1, 3, 5, 9, 11, 15], + [1, 11, 15, 1, 19, 2, 0], + [5, 0, 12, 11, 12, 10, 11], + [6, 4, 16, 2, 16, 9, 18], + ], + ] ) def make_array_ims(): """returns both the array and the array imageseries""" - is_a = imageseries.open( - None, 'array', data=random_array, meta=make_meta() - ) + is_a = imageseries.open(None, 'array', data=random_array, meta=make_meta()) return random_array, is_a @@ -47,7 +51,7 @@ def make_meta(): def make_omega_meta(n): - return np.linspace((0,0), (1, 1), n) + return np.linspace((0, 0), (1, 1), n) def compare(ims1, ims2): @@ -57,8 +61,8 @@ def compare(ims1, ims2): if ims1.dtype != ims2.dtype: raise ValueError( - "types do not match: %s is not %s" % - (repr(ims1.dtype), repr(ims2.dtype)) + "types do not match: %s is not %s" + % (repr(ims1.dtype), repr(ims2.dtype)) ) maxdiff = 0.0 diff --git a/tests/imageseries/test_formats.py b/tests/imageseries/test_formats.py index 6d03c6453..a0a8cf4cb 100644 --- a/tests/imageseries/test_formats.py +++ b/tests/imageseries/test_formats.py @@ -7,7 +7,7 @@ from .common import ImageSeriesTest from .common import make_array_ims, compare, compare_meta -from hexrd import imageseries +from hexrd.core import imageseries class ImageSeriesFormatTest(ImageSeriesTest): @@ -37,56 +37,59 @@ def test_fmth5(self): is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) diff = compare(self.is_a, is_h) - self.assertAlmostEqual(diff, 0., "h5 reconstruction failed") + self.assertAlmostEqual(diff, 0.0, "h5 reconstruction failed") self.assertTrue(compare_meta(self.is_a, is_h)) def test_fmth5_nparray(self): """HDF5 format with numpy array metadata""" key = 'np-array' - npa = np.array([0,2.0,1.3]) + npa = np.array([0, 2.0, 1.3]) self.is_a.metadata[key] = npa imageseries.write(self.is_a, self.h5file, self.fmt, path=self.h5path) is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) meta = is_h.metadata diff = np.linalg.norm(meta[key] - npa) - self.assertAlmostEqual(diff, 0., "h5 numpy array metadata failed") + self.assertAlmostEqual(diff, 0.0, "h5 numpy array metadata failed") def test_fmth5_nocompress(self): """HDF5 options: no compression""" - imageseries.write(self.is_a, self.h5file, self.fmt, - path=self.h5path, gzip=0) + imageseries.write( + self.is_a, self.h5file, self.fmt, path=self.h5path, gzip=0 + ) is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) diff = compare(self.is_a, is_h) - self.assertAlmostEqual(diff, 0., "h5 reconstruction failed") + self.assertAlmostEqual(diff, 0.0, "h5 reconstruction failed") self.assertTrue(compare_meta(self.is_a, is_h)) def test_fmth5_compress_err(self): """HDF5 options: compression level out of range""" with self.assertRaises(ValueError): - imageseries.write(self.is_a, self.h5file, self.fmt, - path=self.h5path, gzip=10) + imageseries.write( + self.is_a, self.h5file, self.fmt, path=self.h5path, gzip=10 + ) def test_fmth5_chunk(self): """HDF5 options: chunk size""" - imageseries.write(self.is_a, self.h5file, self.fmt, - path=self.h5path, chunk_rows=0) + imageseries.write( + self.is_a, self.h5file, self.fmt, path=self.h5path, chunk_rows=0 + ) is_h = imageseries.open(self.h5file, self.fmt, path=self.h5path) diff = compare(self.is_a, is_h) - self.assertAlmostEqual(diff, 0., "h5 reconstruction failed") + self.assertAlmostEqual(diff, 0.0, "h5 reconstruction failed") self.assertTrue(compare_meta(self.is_a, is_h)) class TestFormatFrameCache(ImageSeriesFormatTest): def setUp(self): - self.fcfile = os.path.join(self.tmpdir, 'frame-cache.npz') + self.fcfile = os.path.join(self.tmpdir, 'frame-cache.npz') self.fmt = 'frame-cache' self.thresh = 0.5 self.style = 'npz' - self.cache_file='frame-cache.npz' + self.cache_file = 'frame-cache.npz' _, self.is_a = make_array_ims() def tearDown(self): @@ -94,44 +97,59 @@ def tearDown(self): def test_fmtfc(self): """save/load frame-cache format""" - imageseries.write(self.is_a, self.fcfile, self.fmt, style=self.style, - threshold=self.thresh, cache_file=self.cache_file) + imageseries.write( + self.is_a, + self.fcfile, + self.fmt, + style=self.style, + threshold=self.thresh, + cache_file=self.cache_file, + ) is_fc = imageseries.open(self.fcfile, self.fmt, style=self.style) diff = compare(self.is_a, is_fc) - self.assertAlmostEqual(diff, 0., "frame-cache reconstruction failed") + self.assertAlmostEqual(diff, 0.0, "frame-cache reconstruction failed") self.assertTrue(compare_meta(self.is_a, is_fc)) def test_fmtfc_nocache_file(self): """save/load frame-cache format with no cache_file arg""" imageseries.write( - self.is_a, self.fcfile, self.fmt, - threshold=self.thresh, style=self.style + self.is_a, + self.fcfile, + self.fmt, + threshold=self.thresh, + style=self.style, ) is_fc = imageseries.open(self.fcfile, self.fmt, style=self.style) diff = compare(self.is_a, is_fc) - self.assertAlmostEqual(diff, 0., "frame-cache reconstruction failed") + self.assertAlmostEqual(diff, 0.0, "frame-cache reconstruction failed") self.assertTrue(compare_meta(self.is_a, is_fc)) def test_fmtfc_nparray(self): """frame-cache format with numpy array metadata""" key = 'np-array' - npa = np.array([0,2.0,1.3]) + npa = np.array([0, 2.0, 1.3]) self.is_a.metadata[key] = npa - imageseries.write(self.is_a, self.fcfile, self.fmt, style=self.style, - threshold=self.thresh, cache_file=self.cache_file + imageseries.write( + self.is_a, + self.fcfile, + self.fmt, + style=self.style, + threshold=self.thresh, + cache_file=self.cache_file, ) is_fc = imageseries.open(self.fcfile, self.fmt, style=self.style) meta = is_fc.metadata diff = np.linalg.norm(meta[key] - npa) - self.assertAlmostEqual(diff, 0., - "frame-cache numpy array metadata failed") + self.assertAlmostEqual( + diff, 0.0, "frame-cache numpy array metadata failed" + ) class TestFormatFrameCache_FCH5(TestFormatFrameCache): def setUp(self): - self.fcfile = os.path.join(self.tmpdir, 'frame-cache.fch5') + self.fcfile = os.path.join(self.tmpdir, 'frame-cache.fch5') self.fmt = 'frame-cache' self.style = 'fch5' self.thresh = 0.5 @@ -143,7 +161,13 @@ def test_fmtfc_nested_metadata(self): metadata = {'int': 1, 'array': np.array([1, 2, 3])} self.is_a.metadata["key"] = metadata - imageseries.write(self.is_a, self.fcfile, self.fmt, style=self.style, - threshold=self.thresh, cache_file=self.cache_file) + imageseries.write( + self.is_a, + self.fcfile, + self.fmt, + style=self.style, + threshold=self.thresh, + cache_file=self.cache_file, + ) is_fc = imageseries.open(self.fcfile, self.fmt, style=self.style) self.assertTrue(compare_meta(self.is_a, is_fc)) diff --git a/tests/imageseries/test_omega.py b/tests/imageseries/test_omega.py index 85d54e927..e8d74d0a1 100644 --- a/tests/imageseries/test_omega.py +++ b/tests/imageseries/test_omega.py @@ -2,8 +2,9 @@ from .common import ImageSeriesTest -from hexrd import imageseries -from hexrd.imageseries.omega import OmegaSeriesError, OmegaImageSeries +from hexrd.core import imageseries +from hexrd.core.imageseries.omega import OmegaSeriesError, OmegaImageSeries + class TestOmegaSeries(ImageSeriesTest): @@ -26,7 +27,7 @@ def test_nframes_mismatch(self): def test_negative_delta(self): om = np.zeros((3, 2)) - om[0,1] = -0.5 + om[0, 1] = -0.5 m = dict(omega=om, dtype=float) ims = self.make_ims(3, m) with self.assertRaises(OmegaSeriesError): @@ -34,10 +35,10 @@ def test_negative_delta(self): def test_one_wedge(self): nf = 5 - a = np.linspace(0, nf+1, nf+1) + a = np.linspace(0, nf + 1, nf + 1) om = np.zeros((nf, 2)) - om[:,0] = a[:-1] - om[:,1] = a[1:] + om[:, 0] = a[:-1] + om[:, 1] = a[1:] m = dict(omega=om, dtype=float) ims = self.make_ims(nf, m) oms = OmegaImageSeries(ims) @@ -45,10 +46,10 @@ def test_one_wedge(self): def test_two_wedges(self): nf = 5 - a = np.linspace(0, nf+1, nf+1) + a = np.linspace(0, nf + 1, nf + 1) om = np.zeros((nf, 2)) - om[:,0] = a[:-1] - om[:,1] = a[1:] + om[:, 0] = a[:-1] + om[:, 1] = a[1:] om[3:, :] += 0.1 m = dict(omega=om, dtype=float) ims = self.make_ims(nf, m) @@ -57,10 +58,10 @@ def test_two_wedges(self): def test_compare_omegas(self): nf = 5 - a = np.linspace(0, nf+1, nf+1) + a = np.linspace(0, nf + 1, nf + 1) om = np.zeros((nf, 2)) - om[:,0] = a[:-1] - om[:,1] = a[1:] + om[:, 0] = a[:-1] + om[:, 1] = a[1:] om[3:, :] += 0.1 m = dict(omega=om, dtype=float) ims = self.make_ims(nf, m) @@ -68,21 +69,21 @@ def test_compare_omegas(self): domega = om - oms.omegawedges.omegas dnorm = np.linalg.norm(domega) - msg='omegas from wedges do not match originals' - self.assertAlmostEqual(dnorm, 0., msg=msg) + msg = 'omegas from wedges do not match originals' + self.assertAlmostEqual(dnorm, 0.0, msg=msg) def test_wedge_delta(self): nf = 5 - a = np.linspace(0, nf+1, nf+1) + a = np.linspace(0, nf + 1, nf + 1) om = np.zeros((nf, 2)) - om[:,0] = a[:-1] - om[:,1] = a[1:] + om[:, 0] = a[:-1] + om[:, 1] = a[1:] om[3:, :] += 0.1 m = dict(omega=om, dtype=float) ims = self.make_ims(nf, m) oms = OmegaImageSeries(ims) - mydelta =om[nf - 1, 1] - om[nf - 1, 0] + mydelta = om[nf - 1, 1] - om[nf - 1, 0] d = oms.wedge(oms.nwedges - 1) self.assertAlmostEqual(d['delta'], mydelta) diff --git a/tests/imageseries/test_pickleable.py b/tests/imageseries/test_pickleable.py index ebaa05e28..f8f400b3c 100644 --- a/tests/imageseries/test_pickleable.py +++ b/tests/imageseries/test_pickleable.py @@ -4,9 +4,9 @@ import unittest from .common import make_array_ims -from hexrd.imageseries.load.hdf5 import HDF5ImageSeriesAdapter -from hexrd.imageseries.load.framecache import FrameCacheImageSeriesAdapter -from hexrd import imageseries +from hexrd.core.imageseries.load.hdf5 import HDF5ImageSeriesAdapter +from hexrd.core.imageseries.load.framecache import FrameCacheImageSeriesAdapter +from hexrd.core import imageseries class ImageSeriesPickleableTest(unittest.TestCase): diff --git a/tests/imageseries/test_process.py b/tests/imageseries/test_process.py index b80cfca4a..81b25e1eb 100644 --- a/tests/imageseries/test_process.py +++ b/tests/imageseries/test_process.py @@ -2,8 +2,9 @@ from .common import ImageSeriesTest, make_array_ims, make_omega_meta, compare -from hexrd import imageseries -from hexrd.imageseries import process, ImageSeries +from hexrd.core import imageseries +from hexrd.core.imageseries import process, ImageSeries + class TestImageSeriesProcess(ImageSeriesTest): @@ -14,7 +15,7 @@ def _runfliptest(self, a, flip, aflip): is_aflip = imageseries.open(None, 'array', data=aflip) diff = compare(is_aflip, is_p) msg = "flipped [%s] image series failed" % flip - self.assertAlmostEqual(diff, 0., msg=msg) + self.assertAlmostEqual(diff, 0.0, msg=msg) def test_process(self): """Processed image series""" @@ -22,7 +23,7 @@ def test_process(self): is_p = process.ProcessedImageSeries(is_a, []) diff = compare(is_a, is_p) msg = "processed image series failed to reproduce original" - self.assertAlmostEqual(diff, 0., msg) + self.assertAlmostEqual(diff, 0.0, msg) def test_process_flip_t(self): """Processed image series: flip transpose""" @@ -60,7 +61,7 @@ def test_process_flip_r90(self): self._runfliptest(a, flip, aflip) def test_process_flip_r270(self): - """Processed image series: flip clockwise 90 """ + """Processed image series: flip clockwise 90""" flip = 'cw90' a, _ = make_array_ims() aflip = np.transpose(a, (0, 2, 1))[:, :, ::-1] @@ -71,12 +72,12 @@ def test_process_dark(self): a, _ = make_array_ims() dark = np.ones_like(a[0]) is_a = imageseries.open(None, 'array', data=a) - apos = np.where(a >= 1, a-1, 0) + apos = np.where(a >= 1, a - 1, 0) is_a1 = imageseries.open(None, 'array', data=apos) ops = [('dark', dark)] is_p = process.ProcessedImageSeries(is_a, ops) diff = compare(is_a1, is_p) - self.assertAlmostEqual(diff, 0., msg="dark image failed") + self.assertAlmostEqual(diff, 0.0, msg="dark image failed") def test_process_framelist(self): a, _ = make_array_ims() @@ -87,10 +88,9 @@ def test_process_framelist(self): is_p = process.ProcessedImageSeries(is_a, ops, frame_list=frames) is_a2 = imageseries.open(None, 'array', data=a[tuple(frames), ...]) diff = compare(is_a2, is_p) - self.assertAlmostEqual(diff, 0., msg="frame list failed") + self.assertAlmostEqual(diff, 0.0, msg="frame list failed") self.assertEqual(len(is_p), len(is_p.metadata["omega"])) - def test_process_shape(self): a, _ = make_array_ims() is_a = imageseries.open(None, 'array', data=a) diff --git a/tests/imageseries/test_stats.py b/tests/imageseries/test_stats.py index e22254bf0..0e2b5d679 100644 --- a/tests/imageseries/test_stats.py +++ b/tests/imageseries/test_stats.py @@ -1,7 +1,7 @@ import numpy as np -from hexrd import imageseries -from hexrd.imageseries import stats +from hexrd.core import imageseries +from hexrd.core.imageseries import stats from .common import ImageSeriesTest, make_array_ims @@ -18,7 +18,7 @@ def test_stats_average(self): is_avg = stats.average(is_a) np_avg = np.average(a, axis=0).astype(np.float32) err = np.linalg.norm(np_avg - is_avg) - self.assertAlmostEqual(err, 0., msg="stats.average failed") + self.assertAlmostEqual(err, 0.0, msg="stats.average failed") self.assertEqual(is_avg.dtype, np.float32) def test_stats_median(self): @@ -27,7 +27,7 @@ def test_stats_median(self): ismed = stats.median(is_a) amed = np.median(a, axis=0) err = np.linalg.norm(amed - ismed) - self.assertAlmostEqual(err, 0., msg="stats.median failed") + self.assertAlmostEqual(err, 0.0, msg="stats.median failed") self.assertEqual(ismed.dtype, np.float32) def test_stats_max(self): @@ -36,27 +36,25 @@ def test_stats_max(self): ismax = stats.max(is_a) amax = np.max(a, axis=0) err = np.linalg.norm(amax - ismax) - self.assertAlmostEqual(err, 0., msg="stats.max failed") + self.assertAlmostEqual(err, 0.0, msg="stats.max failed") self.assertEqual(ismax.dtype, is_a.dtype) - def test_stats_min(self): """imageseries.stats: min""" a, is_a = make_array_ims() ismin = stats.min(is_a) amin = np.min(a, axis=0) err = np.linalg.norm(amin - ismin) - self.assertAlmostEqual(err, 0., msg="stats.min failed") + self.assertAlmostEqual(err, 0.0, msg="stats.min failed") self.assertEqual(ismin.dtype, is_a.dtype) - def test_stats_percentile(self): """imageseries.stats: percentile""" a, is_a = make_array_ims() isp90 = stats.percentile(is_a, 90) ap90 = np.percentile(a, 90, axis=0).astype(np.float32) err = np.linalg.norm(ap90 - isp90) - self.assertAlmostEqual(err, 0., msg="stats.percentile failed") + self.assertAlmostEqual(err, 0.0, msg="stats.percentile failed") self.assertEqual(isp90.dtype, np.float32) # These tests compare chunked operations (iterators) to non-chunked ops @@ -71,13 +69,13 @@ def test_stats_average_chunked(self): for ismed1 in stats.average_iter(is_a, 1): pass err = np.linalg.norm(a_avg - ismed1) - self.assertAlmostEqual(err, 0., msg="stats.average failed (1 chunk)") + self.assertAlmostEqual(err, 0.0, msg="stats.average failed (1 chunk)") # Run with 2 chunks for ismed2 in stats.average_iter(is_a, 2): pass err = np.linalg.norm(a_avg - ismed2) - self.assertAlmostEqual(err, 0., msg="stats.average failed") + self.assertAlmostEqual(err, 0.0, msg="stats.average failed") def test_stats_median_chunked(self): """imageseries.stats: chunked median""" @@ -89,20 +87,20 @@ def test_stats_median_chunked(self): for ismed1 in stats.median_iter(is_a, 1): pass err = np.linalg.norm(a_med - ismed1) - self.assertAlmostEqual(err, 0., msg="stats.average failed (1 chunk)") + self.assertAlmostEqual(err, 0.0, msg="stats.average failed (1 chunk)") # Run with 2 chunks for ismed2 in stats.median_iter(is_a, 2): pass err = np.linalg.norm(a_med - ismed2) - self.assertAlmostEqual(err, 0., msg="stats.average failed (2 chunks)") + self.assertAlmostEqual(err, 0.0, msg="stats.average failed (2 chunks)") # Run with 3 chunks, with buffer for ismed3 in stats.median_iter(is_a, 3, use_buffer=True): pass err = np.linalg.norm(a_med - ismed3) self.assertAlmostEqual( - err, 0., msg="stats.average failed (3 chunks, buffer)" + err, 0.0, msg="stats.average failed (3 chunks, buffer)" ) # Run with 3 chunks, no buffer @@ -110,5 +108,5 @@ def test_stats_median_chunked(self): pass err = np.linalg.norm(a_med - ismed3) self.assertAlmostEqual( - err, 0., msg="stats.average failed (3 chunks, no buffer)" + err, 0.0, msg="stats.average failed (3 chunks, no buffer)" ) diff --git a/tests/matrix_util/test_norms.py b/tests/matrix_util/test_norms.py index 2a33498f4..46e4bd21c 100644 --- a/tests/matrix_util/test_norms.py +++ b/tests/matrix_util/test_norms.py @@ -1,5 +1,5 @@ import numpy as np -from hexrd import matrixutil as mu +from hexrd.core import matrixutil as mu def test_column_norm(n_dim): diff --git a/tests/matrix_util/test_strain_stress_reps.py b/tests/matrix_util/test_strain_stress_reps.py index 0755d2065..ace5f0fde 100644 --- a/tests/matrix_util/test_strain_stress_reps.py +++ b/tests/matrix_util/test_strain_stress_reps.py @@ -4,7 +4,7 @@ """ import numpy as np -from hexrd import matrixutil as mu +from hexrd.core import matrixutil as mu def test_stress_repr(): diff --git a/tests/matrix_util/test_vector_and_matrix_math.py b/tests/matrix_util/test_vector_and_matrix_math.py index ae2ff7ae3..a6a86a247 100644 --- a/tests/matrix_util/test_vector_and_matrix_math.py +++ b/tests/matrix_util/test_vector_and_matrix_math.py @@ -1,5 +1,5 @@ import numpy as np -from hexrd import matrixutil as mu +from hexrd.core import matrixutil as mu def test_cross(): diff --git a/tests/planedata/test_exclusion.py b/tests/planedata/test_exclusion.py index f85c0fc47..4d611c6a5 100644 --- a/tests/planedata/test_exclusion.py +++ b/tests/planedata/test_exclusion.py @@ -1,6 +1,7 @@ import numpy as np -from hexrd.material.crystallography import PlaneData +# TODO: Check that this test is still sensible after PlaneData change. +from hexrd.core.material.crystallography import PlaneData def test_exclusion(): diff --git a/tests/planedata/test_init.py b/tests/planedata/test_init.py index 531424475..d102771b9 100644 --- a/tests/planedata/test_init.py +++ b/tests/planedata/test_init.py @@ -1,7 +1,8 @@ import numpy as np import pytest -from hexrd.material.crystallography import PlaneData +# TODO: Check that this test is still sensible after PlaneData change. +from hexrd.core.material.crystallography import PlaneData def test_init_with_data_and_from_copy(): @@ -15,17 +16,9 @@ def test_init_with_data_and_from_copy(): pd2 = PlaneData(hkls, pd) pd3 = PlaneData(None, pd) pd4 = PlaneData( - None, - pd, - doTThSort=False, - exclusions=[False, False, False] - ) - pd5 = PlaneData( - None, - pd, - tThMax=6.0, - tThWidth=3.0 + None, pd, doTThSort=False, exclusions=[False, False, False] ) + pd5 = PlaneData(None, pd, tThMax=6.0, tThWidth=3.0) assert pd2.hkls.shape == pd3.hkls.shape and np.all(pd2.hkls == pd3.hkls) assert pd2.hkls.shape == pd4.hkls.shape and np.all(pd2.hkls == pd4.hkls) diff --git a/tests/planedata/test_misc.py b/tests/planedata/test_misc.py index 78a977039..335cfa024 100644 --- a/tests/planedata/test_misc.py +++ b/tests/planedata/test_misc.py @@ -1,9 +1,10 @@ import os import numpy as np -from hexrd.material.crystallography import PlaneData -from hexrd.rotations import quatOfLaueGroup -from hexrd.valunits import valWUnit +# TODO: Check that this test is still sensible after PlaneData change. +from hexrd.core.material.crystallography import PlaneData +from hexrd.core.rotations import quatOfLaueGroup +from hexrd.core.valunits import valWUnit def test_misc(): diff --git a/tests/planedata/test_with_data.py b/tests/planedata/test_with_data.py index 0b4e9d539..e9a45ea49 100644 --- a/tests/planedata/test_with_data.py +++ b/tests/planedata/test_with_data.py @@ -3,9 +3,9 @@ import pytest -from hexrd.material.crystallography import ltypeOfLaueGroup -from hexrd.material.material import Material -from hexrd.rotations import rotMatOfQuat +from hexrd.core.material.crystallography import ltypeOfLaueGroup +from hexrd.core.material.material import Material +from hexrd.core.rotations import rotMatOfQuat @pytest.fixture @@ -24,8 +24,9 @@ def materials(test_data_dir): for mat_name in material_names: # Load {test_data_dir}/materials/{mat_name}.cif mat = Material( - mat_name, str(test_data_dir) + "/materials/" + mat_name + ".cif", - sgsetting=0 + mat_name, + str(test_data_dir) + "/materials/" + mat_name + ".cif", + sgsetting=0, ) mats[mat_name] = mat.planeData return mats @@ -70,8 +71,9 @@ def test_plane_data_with_data(test_data_dir, materials): assertEqualNumpyArrays(pd.powder_intensity, obj['powder_intensity']) # With the identity symmetry, zero rotation may have some sign issues, # but the rotation matrix should be pretty much the exact same - assertEqualNumpyArrays(rotMatOfQuat(pd.getQSym()), - rotMatOfQuat(obj['q_sym'])) + assertEqualNumpyArrays( + rotMatOfQuat(pd.getQSym()), rotMatOfQuat(obj['q_sym']) + ) assert pd.nHKLs == obj['nHKLs'] assert pd.getNhklRef() == obj['nhklRef'] assertEqualNumpyArrays(pd.getMultiplicity(), obj['multiplicity']) diff --git a/tests/rotations/test_eulers.py b/tests/rotations/test_eulers.py index 48fd957b4..988ba1ed0 100644 --- a/tests/rotations/test_eulers.py +++ b/tests/rotations/test_eulers.py @@ -2,7 +2,7 @@ import numpy as np -from hexrd import rotations +from hexrd.core import rotations def random_rot_mat_euler(): diff --git a/tests/rotations/test_quat_math.py b/tests/rotations/test_quat_math.py index b5bb9e3f7..98751445f 100644 --- a/tests/rotations/test_quat_math.py +++ b/tests/rotations/test_quat_math.py @@ -2,7 +2,7 @@ import numpy as np -from hexrd import rotations +from hexrd.core import rotations def allclose(a, b): diff --git a/tests/rotations/test_utilities.py b/tests/rotations/test_utilities.py index c128be3af..ecb57500c 100644 --- a/tests/rotations/test_utilities.py +++ b/tests/rotations/test_utilities.py @@ -1,4 +1,4 @@ -from hexrd import rotations +from hexrd.core import rotations import numpy as np diff --git a/tests/test_absorption_correction.py b/tests/test_absorption_correction.py index 0c98b5cc0..1962ab9ff 100644 --- a/tests/test_absorption_correction.py +++ b/tests/test_absorption_correction.py @@ -2,8 +2,8 @@ import pytest import yaml -from hexrd.instrument.hedm_instrument import HEDMInstrument -from hexrd.instrument.physics_package import HEDPhysicsPackage +from hexrd.core.instrument.hedm_instrument import HEDMInstrument +from hexrd.core.instrument.physics_package import HEDPhysicsPackage @pytest.fixture @@ -66,8 +66,7 @@ def test_absorption_correction(simulated_tardis_dir, test_data_dir): transmissions = instr.calc_transmission() # Normalize so that the max transmission across all detectors is 1 - max_transmission = max( - [np.nanmax(v) for v in transmissions.values()]) + max_transmission = max([np.nanmax(v) for v in transmissions.values()]) transmissions = {k: v / max_transmission for k, v in transmissions.items()} # Now compare to our reference diff --git a/tests/test_concurrent.py b/tests/test_concurrent.py index 8a9a7508b..de69bacb2 100644 --- a/tests/test_concurrent.py +++ b/tests/test_concurrent.py @@ -1,4 +1,4 @@ -from hexrd.utils.concurrent import distribute_tasks +from hexrd.core.utils.concurrent import distribute_tasks def test_distribute_tasks(): diff --git a/tests/test_find_orientations.py b/tests/test_find_orientations.py index c6208f012..92db46458 100644 --- a/tests/test_find_orientations.py +++ b/tests/test_find_orientations.py @@ -8,9 +8,14 @@ import coloredlogs -from hexrd.findorientations import find_orientations, generate_eta_ome_maps -from hexrd import config -from hexrd.material.crystallography import PlaneData +from hexrd.hedm.findorientations import ( + find_orientations, + generate_eta_ome_maps, +) +from hexrd.hedm import config + +# TODO: Check that this test is still sensible after PlaneData change. +from hexrd.core.material.crystallography import PlaneData import find_orientations_testing as test_utils @@ -20,7 +25,8 @@ handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.INFO) formatter = coloredlogs.ColoredFormatter( - '%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s') + '%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s' +) handler.setFormatter(formatter) root.addHandler(handler) @@ -69,8 +75,9 @@ def example_repo_config_with_eta_ome_maps(test_config, reference_eta_ome_maps): @pytest.fixture def reference_orientations_path(example_repo_results_path): - filename = \ + filename = ( 'accepted_orientations_results_mruby_composite_hexrd06_py27_ruby.dat' + ) return example_repo_results_path / filename @@ -94,13 +101,13 @@ def to_eomap(eta_ome_maps): eta_ome_maps.omegas, eta_ome_maps.omeEdges, eta_ome_maps.iHKLList, - plane_data(eta_ome_maps.planeData) + plane_data(eta_ome_maps.planeData), ) -def test_generate_eta_ome_maps(example_repo_include_path, - test_config, - reference_eta_ome_maps): +def test_generate_eta_ome_maps( + example_repo_include_path, test_config, reference_eta_ome_maps +): os.chdir(example_repo_include_path) eta_ome_maps = generate_eta_ome_maps(test_config, save=False) eta_ome_maps = to_eomap(eta_ome_maps) @@ -110,19 +117,20 @@ def test_generate_eta_ome_maps(example_repo_include_path, assert comparison.compare() -def test_find_orientations(example_repo_include_path, - example_repo_config_with_eta_ome_maps, - reference_orientations): +def test_find_orientations( + example_repo_include_path, + example_repo_config_with_eta_ome_maps, + reference_orientations, +): os.chdir(example_repo_include_path) - results = find_orientations( - example_repo_config_with_eta_ome_maps - ) + results = find_orientations(example_repo_config_with_eta_ome_maps) orientations = results['qbar'] try: - test_utils.compare_quaternion_lists(orientations.T, - reference_orientations) + test_utils.compare_quaternion_lists( + orientations.T, reference_orientations + ) except RuntimeError as err: pytest.fail(str(err)) diff --git a/tests/test_fit-grains.py b/tests/test_fit-grains.py index 307a35354..7a537f3a6 100644 --- a/tests/test_fit-grains.py +++ b/tests/test_fit-grains.py @@ -8,8 +8,8 @@ import coloredlogs -from hexrd import config -from hexrd.fitgrains import fit_grains +from hexrd.hedm import config +from hexrd.hedm.fitgrains import fit_grains from fit_grains_check import compare_grain_fits @@ -21,7 +21,8 @@ handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.INFO) formatter = coloredlogs.ColoredFormatter( - '%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s') + '%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s' +) handler.setFormatter(formatter) root.addHandler(handler) @@ -73,21 +74,30 @@ def test_config(single_ge_config_path, single_ge_include_path): return conf -def test_fit_grains(single_ge_include_path, test_config, grains_file_path, - grains_reference_file_path): +def test_fit_grains( + single_ge_include_path, + test_config, + grains_file_path, + grains_reference_file_path, +): os.chdir(str(single_ge_include_path)) grains_table = np.loadtxt(grains_reference_file_path, ndmin=2) ref_grain_params = grains_table[:, 3:15] - gresults = fit_grains(test_config, - grains_table, - show_progress=False, - ids_to_refine=None, - write_spots_files=False) + gresults = fit_grains( + test_config, + grains_table, + show_progress=False, + ids_to_refine=None, + write_spots_files=False, + ) cresult = compare_grain_fits( - np.vstack([i[-1] for i in gresults]), ref_grain_params, - mtol=1.e-4, ctol=1.e-3, vtol=1.e-4 + np.vstack([i[-1] for i in gresults]), + ref_grain_params, + mtol=1.0e-4, + ctol=1.0e-3, + vtol=1.0e-4, ) assert cresult diff --git a/tests/test_graindata.py b/tests/test_graindata.py index 80df56041..f7dc27350 100644 --- a/tests/test_graindata.py +++ b/tests/test_graindata.py @@ -1,30 +1,33 @@ """Testing GrainData class""" + from pathlib import Path import pytest import numpy as np -from hexrd.cli.fit_grains import GrainData +from hexrd.hedm.cli.fit_grains import GrainData @pytest.fixture def exp90(): - return (np.pi/2) * np.identity(3) + return (np.pi / 2) * np.identity(3) @pytest.fixture def quats90(): - c45, s45 = np.cos(np.pi / 4), np.sin(np.pi /4) + c45, s45 = np.cos(np.pi / 4), np.sin(np.pi / 4) return [[c45, s45, 0, 0], [c45, 0, s45, 0], [c45, 0, 0, s45]] @pytest.fixture def rmats90(): - return np.array([ - [[1, 0, 0], [0, 0,- 1], [0, 1, 0]], - [[0, 0, 1], [0, 1, 0], [-1, 0, 0]], - [[0, -1, 0], [1, 0, 0], [0, 0, 1]] - ]) + return np.array( + [ + [[1, 0, 0], [0, 0, -1], [0, 1, 0]], + [[0, 0, 1], [0, 1, 0], [-1, 0, 0]], + [[0, -1, 0], [1, 0, 0], [0, 0, 1]], + ] + ) @pytest.fixture diff --git a/tests/test_inverse_distortion.py b/tests/test_inverse_distortion.py index 9029d1bfa..522c638f8 100644 --- a/tests/test_inverse_distortion.py +++ b/tests/test_inverse_distortion.py @@ -1,7 +1,7 @@ import json import numpy as np -from hexrd.extensions import inverse_distortion +from hexrd.core.extensions import inverse_distortion RHO_MAX = 204.8 params = [ diff --git a/tests/test_material.py b/tests/test_material.py index f10a36629..d772684ab 100644 --- a/tests/test_material.py +++ b/tests/test_material.py @@ -2,10 +2,10 @@ import h5py import pytest -from hexrd.material import Material, load_materials_hdf5 +from hexrd.core.material import Material, load_materials_hdf5 # Tolerance for comparing floats -FLOAT_TOL = 1.e-8 +FLOAT_TOL = 1.0e-8 # Use consistent units to simplify testing DEFAULT_LENGTH_UNIT = 'angstrom' @@ -92,14 +92,22 @@ def test_load_materials(test_materials_file): def test_remove_duplicate_atoms(test_material_file_duplicate_atoms): mats = load_materials_hdf5(test_material_file_duplicate_atoms) - apos_xtal1 = np.array([[0., 0., 0., 1.]]) - apos_xtal2 = np.array([[0., 0., 0., 0.5], - [0., 0., 0., 0.5]]) - apos_xtal3 = np.array([[0., 0., 0., 1./3.], - [0., 0., 0., 1./3.], - [0., 0., 0., 1./3.], - [0.5, 0., 0., 1.], - [0.5, 0.5, 0.25, 1.]]) + apos_xtal1 = np.array([[0.0, 0.0, 0.0, 1.0]]) + apos_xtal2 = np.array( + [ + [0.0, 0.0, 0.0, 0.5], + [0.0, 0.0, 0.0, 0.5], + ] + ) + apos_xtal3 = np.array( + [ + [0.0, 0.0, 0.0, 1.0 / 3.0], + [0.0, 0.0, 0.0, 1.0 / 3.0], + [0.0, 0.0, 0.0, 1.0 / 3.0], + [0.5, 0.0, 0.0, 1.0], + [0.5, 0.5, 0.25, 1.0], + ] + ) mats['xtal1'].unitcell.remove_duplicate_atoms() assert np.all(np.isclose(mats['xtal1'].atom_pos, apos_xtal1)) @@ -146,11 +154,11 @@ def test_sfac(self, default_material): pd = default_material.planeData pd.exclude() sfacmax_pd = pd.structFact.max() - sfac = pd.structFact/sfacmax_pd + sfac = pd.structFact / sfacmax_pd assert (sfac.min() < sfacmin) and (sfac.max() > sfacmax) pd.exclude(sfacmin=sfacmin, sfacmax=sfacmax) - sfac = pd.structFact/sfacmax_pd + sfac = pd.structFact / sfacmax_pd assert (sfac.min() >= sfacmin) and (sfac.max() <= sfacmax) def test_pint(self, default_material): @@ -160,9 +168,9 @@ def test_pint(self, default_material): pd = default_material.planeData pd.exclude() pintmax_pd = pd.powder_intensity.max() - pint = np.array(pd.powder_intensity)/pintmax_pd + pint = np.array(pd.powder_intensity) / pintmax_pd assert (pint.min() < pintmin) and (pint.max() > pintmax) pd.exclude(pintmin=pintmin, pintmax=pintmax) - pint = np.array(pd.powder_intensity)/pintmax_pd + pint = np.array(pd.powder_intensity) / pintmax_pd assert (pint.min() >= pintmin) and (pint.max() <= pintmax) diff --git a/tests/test_memoize.py b/tests/test_memoize.py index fbd2f0a86..c67382afb 100644 --- a/tests/test_memoize.py +++ b/tests/test_memoize.py @@ -2,7 +2,7 @@ import numpy as np -from hexrd.utils.decorators import memoize +from hexrd.core.utils.decorators import memoize def test_memoize(): diff --git a/tests/test_pixel_solid_angles.py b/tests/test_pixel_solid_angles.py index 8e7545737..02030d729 100644 --- a/tests/test_pixel_solid_angles.py +++ b/tests/test_pixel_solid_angles.py @@ -6,13 +6,13 @@ import pytest -import hexrd.resources -from hexrd.instrument.hedm_instrument import HEDMInstrument +import hexrd.core.resources +from hexrd.core.instrument.hedm_instrument import HEDMInstrument @pytest.fixture def tardis_instrument() -> HEDMInstrument: - path = importlib.resources.files(hexrd.resources).joinpath( + path = importlib.resources.files(hexrd.core.resources).joinpath( 'tardis_reference_config.yml' ) with open(path, 'r') as rf: diff --git a/tests/test_polar_view.py b/tests/test_polar_view.py index 3203849d8..f61050f8e 100644 --- a/tests/test_polar_view.py +++ b/tests/test_polar_view.py @@ -4,10 +4,10 @@ import numpy as np import pytest -from hexrd import imageseries -from hexrd.imageseries.process import ProcessedImageSeries -from hexrd.instrument import HEDMInstrument -from hexrd.projections.polar import PolarView +from hexrd.core import imageseries +from hexrd.core.imageseries.process import ProcessedImageSeries +from hexrd.core.instrument import HEDMInstrument +from hexrd.core.projections.polar import PolarView @pytest.fixture @@ -65,8 +65,7 @@ def test_polar_view( pixel_size = (0.01, 5.0) pv = PolarView(tth_range, instr, eta_min, eta_max, pixel_size) - img = pv.warp_image(img_dict, pad_with_nans=True, - do_interpolation=True) + img = pv.warp_image(img_dict, pad_with_nans=True, do_interpolation=True) # This is a masked array. Just fill it with nans. img = img.filled(np.nan) @@ -76,10 +75,17 @@ def test_polar_view( assert np.allclose(img, ref, equal_nan=True) # Also generate it using the cache - pv = PolarView(tth_range, instr, eta_min, eta_max, pixel_size, - cache_coordinate_map=True) - fast_img = pv.warp_image(img_dict, pad_with_nans=True, - do_interpolation=True) + pv = PolarView( + tth_range, + instr, + eta_min, + eta_max, + pixel_size, + cache_coordinate_map=True, + ) + fast_img = pv.warp_image( + img_dict, pad_with_nans=True, do_interpolation=True + ) # This should also be identical fast_img = fast_img.filled(np.nan) diff --git a/tests/test_powder.py b/tests/test_powder.py index e9e5f1dd3..84ec59c68 100644 --- a/tests/test_powder.py +++ b/tests/test_powder.py @@ -23,9 +23,7 @@ def ceria_examples_path(eiger_examples_path: Path) -> Path: @pytest.fixture def eiger_instrument(ceria_examples_path: Path) -> HEDMInstrument: - instr_path = ( - ceria_examples_path / 'eiger_ceria_calibrated_composite.hexrd' - ) + instr_path = ceria_examples_path / 'eiger_ceria_calibrated_composite.hexrd' with h5py.File(instr_path, 'r') as rf: return HEDMInstrument(rf) @@ -164,8 +162,7 @@ def test_simulate_powder_pattern_image( eta_max = 180 pixel_size = (0.1, 0.1) pv = PolarView(tth_range, instr, eta_min, eta_max, pixel_size) - img = pv.warp_image(img_dict, pad_with_nans=True, - do_interpolation=True) + img = pv.warp_image(img_dict, pad_with_nans=True, do_interpolation=True) lineout = img.mean(axis=0).filled(np.nan) diff --git a/tests/test_preprocess.py b/tests/test_preprocess.py index 0f01f27e8..a03ad5e6d 100644 --- a/tests/test_preprocess.py +++ b/tests/test_preprocess.py @@ -1,4 +1,4 @@ -from hexrd.preprocess.profiles import ( +from hexrd.hedm.preprocess.profiles import ( Eiger_Arguments, Dexelas_Arguments, HexrdPPScript_Arguments, diff --git a/tests/test_rotations.py b/tests/test_rotations.py index 1abe5f668..be7ae4d4e 100644 --- a/tests/test_rotations.py +++ b/tests/test_rotations.py @@ -1,9 +1,10 @@ """Test rotations module""" + import numpy as np import pytest -from hexrd.material import symmetry -from hexrd import rotations +from hexrd.core.material import symmetry +from hexrd.core import rotations def test_misorientations(): @@ -13,8 +14,17 @@ def test_misorientations(): # their own members. # laue_groups = [ - "ci", "c2h", "d2h", "c4h", "d4h", "c3i", - "d3d", "c6h", "d6h", "th", "oh" + "ci", + "c2h", + "d2h", + "c4h", + "d4h", + "c3i", + "d3d", + "c6h", + "d6h", + "th", + "oh", ] for lg in laue_groups: print("group: ", lg) @@ -22,5 +32,5 @@ def test_misorientations(): q1 = qsym[:, -1:] ang, mis = rotations.misorientation(q1, qsym, (qsym,)) assert np.allclose(ang, 0.0) - assert np.allclose(mis[0, :], 1.) - assert np.allclose(mis[1:, :], 0.) + assert np.allclose(mis[0, :], 1.0) + assert np.allclose(mis[1:, :], 0.0) diff --git a/tests/test_snip.py b/tests/test_snip.py index 44cec86b8..a90688df0 100644 --- a/tests/test_snip.py +++ b/tests/test_snip.py @@ -55,8 +55,7 @@ def test_snip1d( pixel_size = (0.1, 1.0) pv = PolarView(tth_range, instr, eta_min, eta_max, pixel_size) - img = pv.warp_image(img_dict, pad_with_nans=True, - do_interpolation=True) + img = pv.warp_image(img_dict, pad_with_nans=True, do_interpolation=True) snip_width = 100 numiter = 2 diff --git a/tests/test_transforms.py b/tests/test_transforms.py index 79417834a..7b4b50256 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -3,7 +3,7 @@ import numpy as np -from hexrd.transforms.xfcapi import gvec_to_xy +from hexrd.core.transforms.xfcapi import gvec_to_xy from common import convert_axis_angle_to_rmat diff --git a/tests/test_utils_json.py b/tests/test_utils_json.py index 3824c9640..73061af74 100644 --- a/tests/test_utils_json.py +++ b/tests/test_utils_json.py @@ -2,7 +2,11 @@ import numpy as np -from hexrd.utils.json import NumpyDecoder, NumpyEncoder, NumpyToNativeEncoder +from hexrd.core.utils.json import ( + NumpyDecoder, + NumpyEncoder, + NumpyToNativeEncoder, +) def test_decode_encode(): @@ -46,16 +50,16 @@ def test_numpy_to_native(): output = json.loads(encoded) assert ( - isinstance(output['inside'], list) and - output['inside'] == to_test['inside'].tolist() + isinstance(output['inside'], list) + and output['inside'] == to_test['inside'].tolist() ) assert ( - isinstance(output['float'], float) and - output['float'] == to_test['float'].item() + isinstance(output['float'], float) + and output['float'] == to_test['float'].item() ) assert ( - isinstance(output['nested']['float'], list) and - output['nested']['float'] == to_test['nested']['float'].tolist() + isinstance(output['nested']['float'], list) + and output['nested']['float'] == to_test['nested']['float'].tolist() ) diff --git a/tests/test_utils_yaml.py b/tests/test_utils_yaml.py index cc8ff1c1f..c901293da 100644 --- a/tests/test_utils_yaml.py +++ b/tests/test_utils_yaml.py @@ -1,7 +1,7 @@ import numpy as np import yaml -from hexrd.utils.yaml import NumpyToNativeDumper +from hexrd.core.utils.yaml import NumpyToNativeDumper def test_numpy_to_native(): @@ -20,26 +20,24 @@ def test_numpy_to_native(): output = yaml.safe_load(encoded) assert ( - isinstance(output['inside'], list) and - output['inside'] == to_test['inside'].tolist() + isinstance(output['inside'], list) + and output['inside'] == to_test['inside'].tolist() ) assert ( - isinstance(output['nested']['float16'], list) and - output['nested']['float16'] == to_test['nested']['float16'].tolist() + isinstance(output['nested']['float16'], list) + and output['nested']['float16'] + == to_test['nested']['float16'].tolist() ) assert ( - isinstance(output['float32'], float) and - output['float32'] == to_test['float32'].item() + isinstance(output['float32'], float) + and output['float32'] == to_test['float32'].item() ) assert ( - isinstance(output['float64'], float) and - output['float64'] == to_test['float64'].item() + isinstance(output['float64'], float) + and output['float64'] == to_test['float64'].item() ) assert ( - isinstance(output['int64'], int) and - output['int64'] == to_test['int64'].item() - ) - assert ( - isinstance(output['str'], str) and - output['str'] == to_test['str'] + isinstance(output['int64'], int) + and output['int64'] == to_test['int64'].item() ) + assert isinstance(output['str'], str) and output['str'] == to_test['str'] diff --git a/tests/test_wppf.py b/tests/test_wppf.py index 841e9dd75..a88bc16bd 100644 --- a/tests/test_wppf.py +++ b/tests/test_wppf.py @@ -5,8 +5,8 @@ import numpy as np import pytest -from hexrd.material import _angstroms, load_materials_hdf5, Material -from hexrd.wppf import LeBail, Rietveld +from hexrd.core.material import _angstroms, load_materials_hdf5, Material +from hexrd.powder.wppf import LeBail, Rietveld @pytest.fixture diff --git a/tests/transforms/common.py b/tests/transforms/common.py index f50d7fc08..3afa7b7a0 100644 --- a/tests/transforms/common.py +++ b/tests/transforms/common.py @@ -2,8 +2,8 @@ import numpy as np -import hexrd.constants as ct -from hexrd.transforms.new_capi.xf_new_capi import unit_vector +import hexrd.core.constants as ct +from hexrd.core.transforms.new_capi.xf_new_capi import unit_vector def convert_axis_angle_to_rmat(axis, angle): @@ -30,15 +30,15 @@ def convert_axis_angle_to_rmat(axis, angle): s = math.sin(angle) t = 1.0 - c - m[0, 0] = c + axis[0]*axis[0]*t - m[0, 1] = axis[0]*axis[1]*t - axis[2]*s - m[0, 2] = axis[0]*axis[2]*t + axis[1]*s - m[1, 0] = axis[0]*axis[1]*t + axis[2]*s - m[1, 1] = c + axis[1]*axis[1]*t - m[1, 2] = axis[1]*axis[2]*t - axis[0]*s - m[2, 0] = axis[0]*axis[2]*t - axis[1]*s - m[2, 1] = axis[1]*axis[2]*t + axis[0]*s - m[2, 2] = c + axis[2]*axis[2]*t + m[0, 0] = c + axis[0] * axis[0] * t + m[0, 1] = axis[0] * axis[1] * t - axis[2] * s + m[0, 2] = axis[0] * axis[2] * t + axis[1] * s + m[1, 0] = axis[0] * axis[1] * t + axis[2] * s + m[1, 1] = c + axis[1] * axis[1] * t + m[1, 2] = axis[1] * axis[2] * t - axis[0] * s + m[2, 0] = axis[0] * axis[2] * t - axis[1] * s + m[2, 1] = axis[1] * axis[2] * t + axis[0] * s + m[2, 2] = c + axis[2] * axis[2] * t return m @@ -68,9 +68,11 @@ def random_rotation_matrix(): r22 = 2 * (q0 * q0 + q3 * q3) - 1 # 3x3 rotation matrix + # fmt: off rot_matrix = np.array([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]]) + # fmt: on return rot_matrix diff --git a/tests/transforms/test_angles_to_dvec_from_file.py b/tests/transforms/test_angles_to_dvec_from_file.py index e716dd90b..c4f8a7c0e 100644 --- a/tests/transforms/test_angles_to_dvec_from_file.py +++ b/tests/transforms/test_angles_to_dvec_from_file.py @@ -4,15 +4,15 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import angles_to_dvec +from hexrd.core.transforms.new_capi.xf_new_capi import angles_to_dvec + # from common import random_rotation_matrix, random_unit_vectors def test_angles_to_dvec_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_angles_to_dvec.npy', - allow_pickle=True + test_data_dir / 'test_correct_angles_to_dvec.npy', allow_pickle=True ) for obj in arr: @@ -21,7 +21,7 @@ def test_angles_to_dvec_from_file(test_data_dir): obj["beam_vec"], obj["eta_vec"], obj["chi"], - obj["rmat_c"] + obj["rmat_c"], ) assert np.allclose(result, obj["result"]) diff --git a/tests/transforms/test_angles_to_gvec_from_file.py b/tests/transforms/test_angles_to_gvec_from_file.py index 5056e90dd..0273d7a40 100644 --- a/tests/transforms/test_angles_to_gvec_from_file.py +++ b/tests/transforms/test_angles_to_gvec_from_file.py @@ -5,15 +5,15 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import angles_to_gvec +from hexrd.core.transforms.new_capi.xf_new_capi import angles_to_gvec + # from common import random_rotation_matrix, random_unit_vectors def test_angles_to_gvec_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_angles_to_gvec.npy', - allow_pickle=True + test_data_dir / 'test_correct_angles_to_gvec.npy', allow_pickle=True ) for obj in arr: @@ -22,7 +22,7 @@ def test_angles_to_gvec_from_file(test_data_dir): obj["beam_vec"], obj["eta_vec"], obj["chi"], - obj["rmat_c"] + obj["rmat_c"], ) assert np.allclose(result, obj["result"]) diff --git a/tests/transforms/test_gvec_to_xy.py b/tests/transforms/test_gvec_to_xy.py index 8cfc9ff62..a681ca01d 100644 --- a/tests/transforms/test_gvec_to_xy.py +++ b/tests/transforms/test_gvec_to_xy.py @@ -12,7 +12,7 @@ from common import convert_axis_angle_to_rmat -from hexrd.transforms.new_capi.xf_new_capi import gvec_to_xy +from hexrd.core.transforms.new_capi.xf_new_capi import gvec_to_xy # gvec_to_xy intersects vectors from crystal position with the detector plane. diff --git a/tests/transforms/test_gvec_to_xy_from_file.py b/tests/transforms/test_gvec_to_xy_from_file.py index 8b4dedca1..9f9bd5ebc 100644 --- a/tests/transforms/test_gvec_to_xy_from_file.py +++ b/tests/transforms/test_gvec_to_xy_from_file.py @@ -4,25 +4,27 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import gvec_to_xy +from hexrd.core.transforms.new_capi.xf_new_capi import gvec_to_xy + # from common import random_rotation_matrix, random_unit_vectors def test_gvec_to_xy_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_gvec_to_xy.npy', - allow_pickle=True + test_data_dir / 'test_correct_gvec_to_xy.npy', allow_pickle=True ) for obj in arr: - result = gvec_to_xy(obj["gvec_c"], - obj["rmat_d"], - obj["rmat_s"], - obj["rmat_c"], - obj["tvec_d"], - obj["tvec_s"], - obj["tvec_c"], - obj["beam_vec"]) + result = gvec_to_xy( + obj["gvec_c"], + obj["rmat_d"], + obj["rmat_s"], + obj["rmat_c"], + obj["tvec_d"], + obj["tvec_s"], + obj["tvec_c"], + obj["beam_vec"], + ) assert np.allclose(result, obj["result"], equal_nan=True) diff --git a/tests/transforms/test_make_beam_rmat_from_file.py b/tests/transforms/test_make_beam_rmat_from_file.py index 07b4589b4..c3c8ea7bd 100644 --- a/tests/transforms/test_make_beam_rmat_from_file.py +++ b/tests/transforms/test_make_beam_rmat_from_file.py @@ -5,23 +5,20 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import make_beam_rmat +from hexrd.core.transforms.new_capi.xf_new_capi import make_beam_rmat + # from common import random_unit_vectors def test_make_beam_rmat_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_make_beam_rmat.npy', - allow_pickle=True + test_data_dir / 'test_correct_make_beam_rmat.npy', allow_pickle=True ) for obj in arr: - result = make_beam_rmat( - obj["bvec_l"], - obj["evec_l"] - ) + result = make_beam_rmat(obj["bvec_l"], obj["evec_l"]) assert np.allclose(result.T.dot(obj['bvec_l']), [0, 0, -1]) assert np.allclose(result.T.dot(obj['evec_l'])[1], 0) assert np.allclose(result, obj["result"]) diff --git a/tests/transforms/test_make_binary_rmat.py b/tests/transforms/test_make_binary_rmat.py index a13d1860f..ff809cb12 100644 --- a/tests/transforms/test_make_binary_rmat.py +++ b/tests/transforms/test_make_binary_rmat.py @@ -19,8 +19,8 @@ def test_make_binary_rmat(): # Two binary rmats should be the identity assert np.allclose(rmat @ rmat, np.eye(3)) assert np.allclose(rmat.T @ rmat, np.eye(3)), "It is orthogonal" - assert np.all((np.abs(rmat) - 1 < 1e-10) | (np.abs(rmat) < 1e-10)), "It is binary" - rmat_expected = rotMatOfQuat( - quatOfAngleAxis(np.pi, np.c_[axis]) - ) + assert np.all( + (np.abs(rmat) - 1 < 1e-10) | (np.abs(rmat) < 1e-10) + ), "It is binary" + rmat_expected = rotMatOfQuat(quatOfAngleAxis(np.pi, np.c_[axis])) assert np.allclose(rmat, rmat_expected) diff --git a/tests/transforms/test_make_detector_rmat_from_file.py b/tests/transforms/test_make_detector_rmat_from_file.py index a0074dc7e..7d8343ee1 100644 --- a/tests/transforms/test_make_detector_rmat_from_file.py +++ b/tests/transforms/test_make_detector_rmat_from_file.py @@ -5,24 +5,23 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import make_detector_rmat +from hexrd.core.transforms.new_capi.xf_new_capi import make_detector_rmat def test_make_detector_rmat_from_file(test_data_dir): # Load the array from a file arr = np.load( test_data_dir / 'test_correct_make_detector_rmat.npy', - allow_pickle=True + allow_pickle=True, ) for obj in arr: - result = make_detector_rmat( - obj["tilt_angles"] - ) + result = make_detector_rmat(obj["tilt_angles"]) assert np.allclose(result, obj["result"]) + # def test_correct_make_detector_rmat(test_data_dir): # arr = []; diff --git a/tests/transforms/test_make_rmat_of_expmap_from_file.py b/tests/transforms/test_make_rmat_of_expmap_from_file.py index a89a2355b..678194a68 100644 --- a/tests/transforms/test_make_rmat_of_expmap_from_file.py +++ b/tests/transforms/test_make_rmat_of_expmap_from_file.py @@ -5,24 +5,23 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import make_rmat_of_expmap +from hexrd.core.transforms.new_capi.xf_new_capi import make_rmat_of_expmap def test_make_rmat_of_expmap_from_file(test_data_dir): # Load the array from a file arr = np.load( test_data_dir / 'test_correct_make_rmat_of_expmap.npy', - allow_pickle=True + allow_pickle=True, ) for obj in arr: - result = make_rmat_of_expmap( - obj["expmap"] - ) + result = make_rmat_of_expmap(obj["expmap"]) assert np.allclose(result, obj["result"]) + # def test_correct_make_sample_rmat(test_data_dir): # arr = []; diff --git a/tests/transforms/test_make_sample_rmat_from_file.py b/tests/transforms/test_make_sample_rmat_from_file.py index bd7c765af..49657994e 100644 --- a/tests/transforms/test_make_sample_rmat_from_file.py +++ b/tests/transforms/test_make_sample_rmat_from_file.py @@ -4,25 +4,22 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import make_sample_rmat +from hexrd.core.transforms.new_capi.xf_new_capi import make_sample_rmat def test_make_sample_rmat_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_make_sample_rmat.npy', - allow_pickle=True + test_data_dir / 'test_correct_make_sample_rmat.npy', allow_pickle=True ) for obj in arr: - result = make_sample_rmat( - obj["chi"], - obj["omega"] - ) + result = make_sample_rmat(obj["chi"], obj["omega"]) assert np.allclose(result, obj["result"]) + # def test_correct_make_sample_rmat(test_data_dir): # arr = []; diff --git a/tests/transforms/test_quat_distance_from_file.py b/tests/transforms/test_quat_distance_from_file.py index f34b284d3..b7d35e655 100644 --- a/tests/transforms/test_quat_distance_from_file.py +++ b/tests/transforms/test_quat_distance_from_file.py @@ -4,25 +4,22 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import quat_distance +from hexrd.core.transforms.new_capi.xf_new_capi import quat_distance + # from common import random_unit_vectors -# from hexrd.rotations import quatOfLaueGroup +# from hexrd.core.rotations import quatOfLaueGroup def test_quat_distance_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_quat_distance.npy', - allow_pickle=True + test_data_dir / 'test_correct_quat_distance.npy', allow_pickle=True ) for obj in arr: - result = quat_distance( - obj["q1"], - obj["q2"], - obj["q_sym"] - ) + result = quat_distance(obj["q1"], obj["q2"], obj["q_sym"]) assert np.allclose(result, obj["result"]) + # def test_correct_quat_distance(test_data_dir): # arr = []; diff --git a/tests/transforms/test_rotate_vecs_about_axis.py b/tests/transforms/test_rotate_vecs_about_axis.py index 9bb3177f7..94200285f 100644 --- a/tests/transforms/test_rotate_vecs_about_axis.py +++ b/tests/transforms/test_rotate_vecs_about_axis.py @@ -1,4 +1,4 @@ -from hexrd.transforms.new_capi.xf_new_capi import rotate_vecs_about_axis +from hexrd.core.transforms.new_capi.xf_new_capi import rotate_vecs_about_axis import numpy as np diff --git a/tests/transforms/test_unit_vector.py b/tests/transforms/test_unit_vector.py index 440f5885d..bd92d9591 100644 --- a/tests/transforms/test_unit_vector.py +++ b/tests/transforms/test_unit_vector.py @@ -1,4 +1,4 @@ -from hexrd.transforms.xfcapi import unit_vector +from hexrd.core.transforms.xfcapi import unit_vector import numpy as np diff --git a/tests/transforms/test_validate_angle_ranges_from_file.py b/tests/transforms/test_validate_angle_ranges_from_file.py index 289fb9318..49d42d51d 100644 --- a/tests/transforms/test_validate_angle_ranges_from_file.py +++ b/tests/transforms/test_validate_angle_ranges_from_file.py @@ -4,7 +4,7 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import validate_angle_ranges +from hexrd.core.transforms.new_capi.xf_new_capi import validate_angle_ranges def test_validate_angle_ranges_from_file(test_data_dir): diff --git a/tests/transforms/test_xy_to_gvec.py b/tests/transforms/test_xy_to_gvec.py index f831f7e27..54cddf1cf 100644 --- a/tests/transforms/test_xy_to_gvec.py +++ b/tests/transforms/test_xy_to_gvec.py @@ -9,7 +9,7 @@ from collections import namedtuple import pytest import numpy as np -from hexrd.transforms.new_capi.xf_new_capi import xy_to_gvec +from hexrd.core.transforms.new_capi.xf_new_capi import xy_to_gvec Experiment = namedtuple( diff --git a/tests/transforms/test_xy_to_gvec_from_file.py b/tests/transforms/test_xy_to_gvec_from_file.py index 299b638ff..1093d2981 100644 --- a/tests/transforms/test_xy_to_gvec_from_file.py +++ b/tests/transforms/test_xy_to_gvec_from_file.py @@ -4,15 +4,15 @@ from __future__ import absolute_import import numpy as np -from hexrd.transforms.xfcapi import xy_to_gvec +from hexrd.core.transforms.xfcapi import xy_to_gvec + # from common import random_rotation_matrix, random_unit_vectors def test_xy_to_gvec_from_file(test_data_dir): # Load the array from a file arr = np.load( - test_data_dir / 'test_correct_xy_to_gvec.npy', - allow_pickle=True + test_data_dir / 'test_correct_xy_to_gvec.npy', allow_pickle=True ) for obj in arr: @@ -23,7 +23,7 @@ def test_xy_to_gvec_from_file(test_data_dir): obj["tvec_d"], obj["tvec_s"], obj["tvec_c"], - obj["rmat_b"] + obj["rmat_b"], ) assert np.allclose(result[0], obj["result"][0]) diff --git a/tests/unitcell/test_vec_math.py b/tests/unitcell/test_vec_math.py index 794ad4da5..f2a633a17 100644 --- a/tests/unitcell/test_vec_math.py +++ b/tests/unitcell/test_vec_math.py @@ -1,5 +1,5 @@ from pytest import fixture -from hexrd.material import Material, unitcell +from hexrd.core.material import Material, unitcell import numpy as np