diff --git a/hexrd/constants.py b/hexrd/constants.py index 3e1ba4e35..6bb312a3f 100644 --- a/hexrd/constants.py +++ b/hexrd/constants.py @@ -26,10 +26,19 @@ # Boston, MA 02111-1307 USA or visit . # ============================================================================= import os +import platform +import multiprocessing as mp import numpy as np + from scipy import constants as sc +# !!! for stetting mp start method +if 'Windows' in platform.system(): + mp_context = mp.get_context("spawn") +else: + mp_context = mp.get_context("fork") + # pi related pi = np.pi piby2 = 0.5 * pi diff --git a/hexrd/fitting/calibration.py b/hexrd/fitting/calibration.py index 2f2a99cb6..7fb3ad29a 100644 --- a/hexrd/fitting/calibration.py +++ b/hexrd/fitting/calibration.py @@ -4,14 +4,14 @@ from scipy.optimize import leastsq, least_squares +from tqdm import tqdm + from hexrd import constants as cnst -from hexrd.fitting import fitpeak -from hexrd.fitting.peakfunctions import mpeak_nparams_dict from hexrd import matrixutil as mutil from hexrd.transforms import xfcapi from . import grains as grainutil - +from . import spectrum # ============================================================================= # %% PARAMETERS @@ -31,21 +31,27 @@ dtype=bool ) +nfields_powder_data = 8 + + # ============================================================================= # %% POWDER CALIBRATION # ============================================================================= -nfields_powder_data = 8 + +def _normalized_ssqr(resd): + return np.sum(resd*resd)/len(resd) class PowderCalibrator(object): def __init__(self, instr, plane_data, img_dict, flags, - tth_tol=None, eta_tol=0.25, - pktype='pvoigt'): + tth_tol=None, eta_tol=0.25, fwhm_estimate=None, + pktype='pvoigt', bgtype='linear'): assert list(instr.detectors.keys()) == list(img_dict.keys()), \ "instrument and image dict must have the same keys" self._instr = instr self._plane_data = plane_data + self._fwhm_estimate = fwhm_estimate self._plane_data.wavelength = self._instr.beam_energy # force self._img_dict = img_dict self._params = np.asarray(plane_data.lparms, dtype=float) @@ -72,6 +78,7 @@ def __init__(self, instr, plane_data, img_dict, flags, # for peak fitting # ??? fitting only, or do alternative peak detection? self._pktype = pktype + self._bgtype = bgtype @property def npi(self): @@ -109,6 +116,16 @@ def eta_tol(self, x): assert np.isscalar(x), "eta_tol must be a scalar value" self._eta_tol = x + @property + def fwhm_estimate(self): + return self._fwhm_estimate + + @fwhm_estimate.setter + def fwhm_estimate(self, x): + if x is not None: + assert np.isscalar(x), "fwhm_estimate must be a scalar value" + self._fwhm_estimate = x + @property def params(self): return self._params @@ -157,14 +174,26 @@ def pktype(self, x): 'pvoigt', 'split_pvoigt', or 'pink_beam_dcs' """ - assert x in ['gaussian', - 'lorentzian', - 'pvoigt', - 'split_pvoigt', - 'pink_beam_dcs'], \ + assert x in spectrum._function_dict_1d, \ "pktype '%s' not understood" self._pktype = x + @property + def bgtype(self): + return self._bgtype + + @bgtype.setter + def bgtype(self, x): + """ + currently only + 'gaussian', 'lorentzian, + 'pvoigt', 'split_pvoigt', + or 'pink_beam_dcs' + """ + assert x in spectrum._function_dict_1d, \ + "pktype '%s' not understood" + self._bgtype = x + def _interpolate_images(self): """ returns the iterpolated powder line data from the images in img_dict @@ -191,7 +220,7 @@ def _extract_powder_lines(self, fit_tth_tol=None, int_cutoff=1e-4): """ if fit_tth_tol is None: fit_tth_tol = self.tth_tol/4. - fit_tth_tol = np.radians(fit_tth_tol) + # ideal tth wlen = self.instr.beam_wavelength dsp_ideal = np.atleast_1d(self.plane_data.getPlaneSpacings()) @@ -219,7 +248,7 @@ def _extract_powder_lines(self, fit_tth_tol=None, int_cutoff=1e-4): rhs = dict.fromkeys(self.instr.detectors) for det_key, panel in self.instr.detectors.items(): rhs[det_key] = [] - for i_ring, ringset in enumerate(powder_lines[det_key]): + for i_ring, ringset in enumerate(tqdm(powder_lines[det_key])): tmp = [] if len(ringset) == 0: continue @@ -233,102 +262,66 @@ def _extract_powder_lines(self, fit_tth_tol=None, int_cutoff=1e-4): # np.array(intensities).squeeze(), # axis=0 # ) - tth_centers = angs[0] - eta_ref = angs[1] - int1d = intensities[0] + spec_data = np.vstack( + [np.degrees(angs[0]), + intensities[0]] + ).T # peak profile fitting - if len(dsp0[i_ring]) == 1: - p0 = fitpeak.estimate_pk_parms_1d( - tth_centers, int1d, self.pktype - ) - - p = fitpeak.fit_pk_parms_1d( - p0, tth_centers, int1d, self.pktype - ) - - # !!! this is where we can kick out bunk fits - tth_meas = p[1] - tth_pred = 2.*np.arcsin(0.5*wlen/dsp0[i_ring]) - center_err = abs(tth_meas - tth_pred) - if p[0] < int_cutoff or center_err > fit_tth_tol: - tmp.append(np.empty((0, nfields_powder_data))) - continue - - # push back through mapping to cartesian (x, y) - xy_meas = panel.angles_to_cart( - [[tth_meas, eta_ref], ], - tvec_s=self.instr.tvec, - apply_distortion=True - ) - - # cat results - tmp.append( - np.hstack( - [xy_meas.squeeze(), - tth_meas, - hkls[i_ring].squeeze(), - dsp0[i_ring], - eta_ref] - ) - ) - else: - # multiple peaks - tth_pred = 2.*np.arcsin(0.5*wlen/dsp0[i_ring]) - npeaks = len(tth_pred) - eta_ref_tile = np.tile(eta_ref, npeaks) - - # !!! these hueristics merit checking - fwhm_guess = self.plane_data.tThWidth/4. - center_bnd = self.plane_data.tThWidth/2./npeaks - - p0, bnds = fitpeak.estimate_mpk_parms_1d( - tth_pred, tth_centers, int1d, - pktype=self.pktype, bgtype='linear', - fwhm_guess=fwhm_guess, - center_bnd=center_bnd - ) - - p = fitpeak.fit_mpk_parms_1d( - p0, tth_centers, int1d, self.pktype, - npeaks, bgtype='linear', bnds=bnds - ) - - nparams_per_peak = mpeak_nparams_dict[self.pktype] - just_the_peaks = \ - p[:npeaks*nparams_per_peak].reshape( - npeaks, nparams_per_peak - ) - - # !!! this is where we can kick out bunk fits - tth_meas = just_the_peaks[:, 1] - center_err = abs(tth_meas - tth_pred) - if np.any( - np.logical_or( - just_the_peaks[:, 0] < int_cutoff, - center_err > fit_tth_tol - ) - ): - tmp.append(np.empty((0, nfields_powder_data))) - continue - - # push back through mapping to cartesian (x, y) - xy_meas = panel.angles_to_cart( - np.vstack([tth_meas, eta_ref_tile]).T, - tvec_s=self.instr.tvec, - apply_distortion=True - ) - - # cat results - tmp.append( - np.hstack( - [xy_meas, - tth_meas.reshape(npeaks, 1), - hkls[i_ring], - dsp0[i_ring].reshape(npeaks, 1), - eta_ref_tile.reshape(npeaks, 1)] - ) + tth_pred = np.degrees( + 2.*np.arcsin(0.5*wlen/dsp0[i_ring]) + ) + npeaks = len(tth_pred) + + # reference eta + eta_ref_tile = np.tile(angs[1], npeaks) + + # spectrum fitting + sm = spectrum.SpectrumModel( + spec_data, tth_pred, + pktype=self.pktype, bgtype=self.bgtype, + fwhm_init=self.fwhm_estimate + ) + fit_results = sm.fit() + if not fit_results.success: + tmp.append(np.empty((0, nfields_powder_data))) + continue + + fit_params = np.vstack([ + (fit_results.best_values['pk%d_amp' % i], + fit_results.best_values['pk%d_cen' % i]) + for i in range(npeaks) + ]).T + pk_amp, tth_meas = fit_params + + # !!! this is where we can kick out bunk fits + center_err = abs(tth_meas - tth_pred) + failed_fit_heuristic = np.logical_or( + pk_amp < int_cutoff, + center_err > fit_tth_tol + ) + if np.any(failed_fit_heuristic): + tmp.append(np.empty((0, nfields_powder_data))) + continue + + # push back through mapping to cartesian (x, y) + tth_meas = np.radians(tth_meas) + xy_meas = panel.angles_to_cart( + np.vstack([tth_meas, eta_ref_tile]).T, + tvec_s=self.instr.tvec, + apply_distortion=True + ) + + # cat results + tmp.append( + np.hstack( + [xy_meas, + tth_meas.reshape(npeaks, 1), + hkls[i_ring], + dsp0[i_ring].reshape(npeaks, 1), + eta_ref_tile.reshape(npeaks, 1)] ) + ) if len(tmp) == 0: rhs[det_key].append(np.empty((0, nfields_powder_data))) else: @@ -401,6 +394,7 @@ def _evaluate(self, reduced_params, data_dict, output='residual'): apply_distortion=True ) + ''' # !!! apply tth distortion if panel.tth_distortion is not None: eval_pts = panel.cartToPixel( @@ -411,6 +405,7 @@ def _evaluate(self, reduced_params, data_dict, output='residual'): updated_angles[:, 0] = \ updated_angles[:, 0] \ + panel.tth_distortion[eval_pts[:, 0], eval_pts[:, 1]] + ''' # derive ideal tth positions from additional ring point info hkls = pdata[:, 3:6] @@ -460,6 +455,22 @@ def model(self, reduced_params, data_dict): class InstrumentCalibrator(object): def __init__(self, *args): + """ + Model for instrument calibration class as a function of + + Parameters + ---------- + *args : TYPE + DESCRIPTION. + + Returns + ------- + None. + + Notes + ----- + Flags are set on calibrators + """ assert len(args) > 0, \ "must have at least one calibrator" self._calibrators = args @@ -479,6 +490,14 @@ def instr(self): def calibrators(self): return self._calibrators + @property + def flags(self): + # additional params come next + flags = [self.instr.calibration_flags, ] + for calib_class in self.calibrators: + flags.append(calib_class.flags[calib_class.npi:]) + return np.hstack(flags) + @property def full_params(self): return self._full_params @@ -490,78 +509,165 @@ def full_params(self, x): % (len(self._full_params), len(x)) self._full_params = x + @property + def reduced_params(self): + return self.full_params[self.flags] + # ========================================================================= # METHODS # ========================================================================= + def _reduced_params_flag(self, cidx): + assert cidx >= 0 and cidx < len(self.calibrators), \ + "index must be in %s" % str(np.arange(len(self.calibrators))) + + calib_class = self.calibrators[cidx] + + # instrument params come first + npi = calib_class.npi + + # additional params come next + cparams_flags = [calib_class.flags[:npi], ] + for i, calib_class in enumerate(self.calibrators): + if i == cidx: + cparams_flags.append(calib_class.flags[npi:]) + else: + cparams_flags.append(np.zeros(calib_class.npe, dtype=bool)) + return np.hstack(cparams_flags) + + def extract_points(self, fit_tth_tol, int_cutoff=1e-4): + # !!! list in the same order as dict looping + master_data_dict_list = [] + for calib_class in self.calibrators: + master_data_dict_list.append( + calib_class._extract_powder_lines( + fit_tth_tol=fit_tth_tol, int_cutoff=int_cutoff + ) + ) + return master_data_dict_list + + def residual(self, x0, master_data_dict_list): + # !!! list in the same order as dict looping + resd = [] + for i, calib_class in enumerate(self.calibrators): + # !!! need to grab the param set + # specific to this calibrator class + fp = np.array(self.full_params) # copy full_params + fp[self.flags] = x0 # assign new global values + this_x0 = fp[self._reduced_params_flag(i)] # select these + resd.append( + calib_class.residual( + this_x0, + master_data_dict_list[i] + ) + ) + return np.hstack(resd) + def run_calibration(self, - conv_tol=1e-4, max_iter=3, fit_tth_tol=None, + fit_tth_tol=None, int_cutoff=1e-4, + conv_tol=1e-4, max_iter=5, use_robust_optimization=False): """ - FIXME: only coding serial powder case to get things going. Will - eventually figure out how to loop over multiple calibrator classes. - All will have a reference the same instrument, but some -- like single - crystal -- will have to add parameters as well as contribute to the RHS - """ - calib_class = self.calibrators[0] - obj_func = calib_class.residual - delta_r = np.inf - step_successful = True - iters = 0 - while (delta_r > conv_tol and step_successful) and iters < max_iter: - data_dict = calib_class._extract_powder_lines( - fit_tth_tol=fit_tth_tol) + Parameters + ---------- + fit_tth_tol : TYPE, optional + DESCRIPTION. The default is None. + int_cutoff : TYPE, optional + DESCRIPTION. The default is 1e-4. + conv_tol : TYPE, optional + DESCRIPTION. The default is 1e-4. + max_iter : TYPE, optional + DESCRIPTION. The default is 5. + use_robust_optimization : TYPE, optional + DESCRIPTION. The default is False. + + Returns + ------- + x1 : TYPE + DESCRIPTION. - # grab reduced optimizaion parameter set - x0 = self._instr.calibration_parameters[ - self._instr.calibration_flags - ] + """ - resd0 = obj_func(x0, data_dict) + delta_r = np.inf + step_successful = True + iter_count = 0 + nrm_ssr_prev = np.inf + rparams_prev = np.array(self.reduced_params) + while delta_r > conv_tol \ + and step_successful \ + and iter_count < max_iter: + + # extract data + master_data_dict_list = self.extract_points( + fit_tth_tol=fit_tth_tol, + int_cutoff=int_cutoff + ) + # grab reduced params for optimizer + x0 = np.array(self.reduced_params) # !!! copy + resd0 = self.residual(x0, master_data_dict_list) + nrm_ssr_0 = _normalized_ssqr(resd0) + if nrm_ssr_0 > nrm_ssr_prev: + print('No residual imporvement; exiting') + self.full_params = rparams_prev + break if use_robust_optimization: oresult = least_squares( - obj_func, x0, args=(data_dict, ), + self.residual, + x0, args=(master_data_dict_list, ), method='trf', loss='soft_l1' ) x1 = oresult['x'] else: x1, cox_x, infodict, mesg, ierr = leastsq( - obj_func, x0, args=(data_dict, ), + self.residual, + x0, args=(master_data_dict_list, ), full_output=True ) - resd1 = obj_func(x1, data_dict) - nrm_ssr_0 = sum(resd0**2)/float(len(resd0)) - nrm_ssr_1 = sum(resd1**2)/float(len(resd1)) + # eval new residual + # !!! I thought this should update the underlying class params? + resd1 = self.residual(x1, master_data_dict_list) + + nrm_ssr_1 = _normalized_ssqr(resd1) - delta_r = nrm_ssr_0 - nrm_ssr_1 + delta_r = 1. - nrm_ssr_1/nrm_ssr_0 if delta_r > 0: print('OPTIMIZATION SUCCESSFUL') - print('normalized initial ssr: %f\nnormalized final ssr: %f' - % (nrm_ssr_0, nrm_ssr_1)) - print(('change in resdiual: %f' % delta_r)) - + print('normalized initial ssr: %.4e' % nrm_ssr_0) + print('normalized final ssr: %.4e' % nrm_ssr_1) + print('change in resdiual: %.4e' % delta_r) + # FIXME: WHY IS THIS UPDATE NECESSARY? + # Thought the cal to self.residual below did this, but + # appeasr not to. + new_params = np.array(self.full_params) + new_params[self.flags] = x1 + self.full_params = new_params + + nrm_ssr_prev = nrm_ssr_0 + rparams_prev = np.array(self.full_params) # !!! careful + rparams_prev[self.flags] = x0 else: print('no improvement in residual!!!') step_successful = False break - iters += 1 + iter_count += 1 # handle exit condition in case step failed if not step_successful: x1 = x0 - _ = obj_func(x1, data_dict) + _ = self.residual(x1, master_data_dict_list) # update the full_params # FIXME: this class is still hard-coded for one calibrator fp = np.array(self.full_params, dtype=float) fp[self.flags] = x1 self.full_params = fp + return x1 diff --git a/hexrd/fitting/fitpeak.py b/hexrd/fitting/fitpeak.py index bee52ce68..f6b3d81d5 100644 --- a/hexrd/fitting/fitpeak.py +++ b/hexrd/fitting/fitpeak.py @@ -26,6 +26,7 @@ # ============================================================ import numpy as np +# from numpy.polynomial import chebyshev from scipy import integrate from scipy import ndimage as imgproc @@ -48,6 +49,18 @@ inf = np.inf minf = -inf +# dcs param values +# !!! converted from deg^-1 in Von Dreele's paper +alpha0, alpha1, beta0, beta1 = np.r_[14.4, 0., 3.016, -7.94] + + +def cnst_fit_obj(x, b): + return np.ones_like(x)*b + + +def cnst_fit_jac(x, b): + return np.vstack([np.ones_like(x)]).T + def lin_fit_obj(x, m, b): return m*np.asarray(x) + b @@ -57,6 +70,23 @@ def lin_fit_jac(x, m, b): return np.vstack([x, np.ones_like(x)]).T +def quad_fit_obj(x, a, b, c): + x = np.asarray(x) + return a*x**2 + b*x + c + + +def quad_fit_jac(x, a, b, c): + x = np.asarray(x) + return a*x**2 + b*x + c + return np.vstack([x**2, x, np.ones_like(x)]).T + + +def _amplitude_guess(x, x0, y, fwhm): + pt_l = np.argmin(np.abs(x - (x0 - 0.5*fwhm))) + pt_h = np.argmin(np.abs(x - (x0 + 0.5*fwhm))) + return np.max(y[pt_l:pt_h + 1]) + + # ============================================================================= # 1-D Peak Fitting # ============================================================================= @@ -78,6 +108,11 @@ def estimate_pk_parms_1d(x, f, pktype='pvoigt'): p -- (m) ndarray containing initial guesses for parameters for the input peaktype (see peak function help for what each parameters corresponds to) + + Notes + ----- + !!! LINEAR BACKGROUND ONLY + !!! ASSUMES ANGULAR SPECTRA IN RADIANS (DCS PARAMS) """ npts = len(x) assert len(f) == npts, "ordinate and data must be same length!" @@ -88,7 +123,7 @@ def estimate_pk_parms_1d(x, f, pktype='pvoigt'): # fit linear bg and grab params bp, _ = optimize.curve_fit(lin_fit_obj, x, bkg, jac=lin_fit_jac) - bg0 = bp[-1] + bg0 = bp[1] bg1 = bp[0] # set remaining params @@ -124,6 +159,9 @@ def estimate_pk_parms_1d(x, f, pktype='pvoigt'): p = [A, x0, FWHM, 0.5, bg0, bg1] elif pktype == 'split_pvoigt': p = [A, x0, FWHM, FWHM, 0.5, 0.5, bg0, bg1] + elif pktype == 'pink_beam_dcs': + # A, x0, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l + p = [A, x0, alpha0, alpha1, beta0, beta1, FWHM, FWHM, bg0, bg1] else: raise RuntimeError("pktype '%s' not understood" % pktype) @@ -197,6 +235,8 @@ def fit_pk_parms_1d(p0, x, f, pktype='pvoigt'): ftol=ftol, xtol=xtol) elif pktype == 'dcs_pinkbeam': + # !!!: for some reason the 'trf' method was not behaving well, + # so switched to 'lm' lb = np.array([0.0, x.min(), -100., -100., -100., -100., 0., 0., -np.inf, -np.inf, -np.inf]) @@ -206,8 +246,8 @@ def fit_pk_parms_1d(p0, x, f, pktype='pvoigt'): res = optimize.least_squares( fit_pk_obj_1d, p0, jac='2-point', - bounds=(lb, ub), - method='trf', + # bounds=(), # (lb, ub), + method='lm', args=fitArgs, ftol=ftol, xtol=xtol) @@ -277,7 +317,7 @@ def fit_mpk_parms_1d( def estimate_mpk_parms_1d( pk_pos_0, x, f, pktype='pvoigt', bgtype='linear', - fwhm_guess=0.07, center_bnd=0.02, + fwhm_guess=None, center_bnd=0.02, amp_lim_mult=[0.1, 10.], fwhm_lim_mult=[0.5, 2.] ): """ @@ -323,6 +363,8 @@ def estimate_mpk_parms_1d( if(len(center_bnd) < 2): center_bnd = center_bnd*np.ones(num_pks) + if fwhm_guess is None: + fwhm_guess = (np.max(x) - np.min(x))/(20.*num_pks) fwhm_guess = np.atleast_1d(fwhm_guess) if(len(fwhm_guess) < 2): fwhm_guess = fwhm_guess*np.ones(num_pks) @@ -335,71 +377,89 @@ def estimate_mpk_parms_1d( # fit linear bg and grab params bp, _ = optimize.curve_fit(lin_fit_obj, x, bkg, jac=lin_fit_jac) - bg0 = bp[-1] + bg0 = bp[1] bg1 = bp[0] + ''' + # TODO: In case we want to switch to chebyshev + bkg_mod = chebyshev.Chebyshev( + [0., 0.], domain=(min(x), max(x)) + ) + fit_bkg = bkg_mod.fit(x, bkg, 1) + coeff = fit_bkg.coef + bg0, bg1 = coeff + ''' + + # make lin bkg subtracted spectrum + fsubtr = f - lin_fit_obj(x, *bp) + # !!! for chebyshev + # fsubtr = f - fit_bkg(x) + + # number of parmaters from reference dict + npp = pkfuncs.mpeak_nparams_dict[pktype] + + p0tmp = np.zeros([num_pks, npp]) + p0tmp_lb = np.zeros([num_pks, npp]) + p0tmp_ub = np.zeros([num_pks, npp]) + + # case processing + # !!! used to use (f[pt] - min_val) for ampl if pktype == 'gaussian' or pktype == 'lorentzian': - p0tmp = np.zeros([num_pks, 3]) - p0tmp_lb = np.zeros([num_pks, 3]) - p0tmp_ub = np.zeros([num_pks, 3]) - # x is just 2theta values # make guess for the initital parameters for ii in np.arange(num_pks): - pt = np.argmin(np.abs(x - pk_pos_0[ii])) + amp_guess = _amplitude_guess( + x, pk_pos_0[ii], fsubtr, fwhm_guess[ii] + ) p0tmp[ii, :] = [ - (f[pt] - min_val), + amp_guess, pk_pos_0[ii], fwhm_guess[ii] ] p0tmp_lb[ii, :] = [ - (f[pt] - min_val)*amp_lim_mult[0], + amp_guess*amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], fwhm_guess[ii]*fwhm_lim_mult[0] ] p0tmp_ub[ii, :] = [ - (f[pt] - min_val)*amp_lim_mult[1], + amp_guess*amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], fwhm_guess[ii]*fwhm_lim_mult[1] ] elif pktype == 'pvoigt': - p0tmp = np.zeros([num_pks, 4]) - p0tmp_lb = np.zeros([num_pks, 4]) - p0tmp_ub = np.zeros([num_pks, 4]) - # x is just 2theta values # make guess for the initital parameters for ii in np.arange(num_pks): - pt = np.argmin(np.abs(x - pk_pos_0[ii])) + amp_guess = _amplitude_guess( + x, pk_pos_0[ii], fsubtr, fwhm_guess[ii] + ) p0tmp[ii, :] = [ - (f[pt] - min_val), + amp_guess, pk_pos_0[ii], fwhm_guess[ii], 0.5 ] p0tmp_lb[ii, :] = [ - (f[pt] - min_val)*amp_lim_mult[0], + amp_guess*amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], fwhm_guess[ii]*fwhm_lim_mult[0], 0.0 ] p0tmp_ub[ii, :] = [ - (f[pt] - min_val+1.)*amp_lim_mult[1], + (amp_guess - min_val + 1.)*amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], fwhm_guess[ii]*fwhm_lim_mult[1], 1.0 ] elif pktype == 'split_pvoigt': - p0tmp = np.zeros([num_pks, 6]) - p0tmp_lb = np.zeros([num_pks, 6]) - p0tmp_ub = np.zeros([num_pks, 6]) - # x is just 2theta values # make guess for the initital parameters for ii in np.arange(num_pks): - pt = np.argmin(np.abs(x - pk_pos_0[ii])) + amp_guess = _amplitude_guess( + x, pk_pos_0[ii], fsubtr, fwhm_guess[ii] + ) p0tmp[ii, :] = [ - (f[pt] - min_val), + amp_guess, pk_pos_0[ii], fwhm_guess[ii], fwhm_guess[ii], @@ -407,7 +467,7 @@ def estimate_mpk_parms_1d( 0.5 ] p0tmp_lb[ii, :] = [ - (f[pt] - min_val)*amp_lim_mult[0], + amp_guess*amp_lim_mult[0], pk_pos_0[ii] - center_bnd[ii], fwhm_guess[ii]*fwhm_lim_mult[0], fwhm_guess[ii]*fwhm_lim_mult[0], @@ -415,62 +475,102 @@ def estimate_mpk_parms_1d( 0.0 ] p0tmp_ub[ii, :] = [ - (f[pt] - min_val)*amp_lim_mult[1], + amp_guess*amp_lim_mult[1], pk_pos_0[ii] + center_bnd[ii], fwhm_guess[ii]*fwhm_lim_mult[1], fwhm_guess[ii]*fwhm_lim_mult[1], 1.0, 1.0 ] + elif pktype == 'pink_beam_dcs': + # x is just 2theta values + # make guess for the initital parameters + for ii in np.arange(num_pks): + amp_guess = _amplitude_guess( + x, pk_pos_0[ii], fsubtr, fwhm_guess[ii] + ) + p0tmp[ii, :] = [ + amp_guess, + pk_pos_0[ii], + alpha0, + alpha1, + beta0, + beta1, + fwhm_guess[ii], + fwhm_guess[ii], + ] + p0tmp_lb[ii, :] = [ + amp_guess*amp_lim_mult[0], + pk_pos_0[ii] - center_bnd[ii], + -1e5, + -1e5, + -1e5, + -1e5, + fwhm_guess[ii]*fwhm_lim_mult[0], + fwhm_guess[ii]*fwhm_lim_mult[0], + ] + p0tmp_ub[ii, :] = [ + amp_guess*amp_lim_mult[1], + pk_pos_0[ii] + center_bnd[ii], + 1e5, + 1e5, + 1e5, + 1e5, + fwhm_guess[ii]*fwhm_lim_mult[1], + fwhm_guess[ii]*fwhm_lim_mult[1], + ] - if bgtype == 'linear': - num_pk_parms = len(p0tmp.ravel()) - p0 = np.zeros(num_pk_parms+2) - lb = np.zeros(num_pk_parms+2) - ub = np.zeros(num_pk_parms+2) + num_pk_parms = len(p0tmp.ravel()) + if bgtype == 'constant': + p0 = np.zeros(num_pk_parms + 1) + lb = np.zeros(num_pk_parms + 1) + ub = np.zeros(num_pk_parms + 1) p0[:num_pk_parms] = p0tmp.ravel() lb[:num_pk_parms] = p0tmp_lb.ravel() ub[:num_pk_parms] = p0tmp_ub.ravel() - p0[-2] = bg0 - p0[-1] = bg1 - - lb[-2] = minf + p0[-1] = np.average(bkg) lb[-1] = minf - - ub[-2] = inf ub[-1] = inf - elif bgtype == 'constant': - num_pk_parms = len(p0tmp.ravel()) - p0 = np.zeros(num_pk_parms+1) - lb = np.zeros(num_pk_parms+1) - ub = np.zeros(num_pk_parms+1) + elif bgtype == 'linear': + p0 = np.zeros(num_pk_parms + 2) + lb = np.zeros(num_pk_parms + 2) + ub = np.zeros(num_pk_parms + 2) p0[:num_pk_parms] = p0tmp.ravel() lb[:num_pk_parms] = p0tmp_lb.ravel() ub[:num_pk_parms] = p0tmp_ub.ravel() - p0[-1] = np.average(bkg) - lb[-1] = minf - ub[-1] = inf + p0[-2] = bg0 + p0[-1] = bg1 + lb[-2:] = minf + ub[-2:] = inf elif bgtype == 'quadratic': - num_pk_parms = len(p0tmp.ravel()) - p0 = np.zeros(num_pk_parms+3) - lb = np.zeros(num_pk_parms+3) - ub = np.zeros(num_pk_parms+3) + p0 = np.zeros(num_pk_parms + 3) + lb = np.zeros(num_pk_parms + 3) + ub = np.zeros(num_pk_parms + 3) p0[:num_pk_parms] = p0tmp.ravel() lb[:num_pk_parms] = p0tmp_lb.ravel() ub[:num_pk_parms] = p0tmp_ub.ravel() p0[-3] = bg0 p0[-2] = bg1 - lb[-3] = minf - lb[-2] = minf - lb[-1] = minf - ub[-3] = inf - ub[-2] = inf - ub[-1] = inf + lb[-3:] = minf + ub[-3:] = inf + + elif bgtype == 'cubic': + p0 = np.zeros(num_pk_parms + 4) + lb = np.zeros(num_pk_parms + 4) + ub = np.zeros(num_pk_parms + 4) + p0[:num_pk_parms] = p0tmp.ravel() + lb[:num_pk_parms] = p0tmp_lb.ravel() + ub[:num_pk_parms] = p0tmp_ub.ravel() + + p0[-4] = bg0 + p0[-3] = bg1 + lb[-4:] = minf + ub[-4:] = inf return p0, (lb, ub) @@ -486,6 +586,30 @@ def eval_pk_deriv_1d(p, x, y0, pktype): def fit_pk_obj_1d(p, x, f0, pktype): + """ + Return residual between specified peak function and data. + + Parameters + ---------- + p : TYPE + DESCRIPTION. + x : TYPE + DESCRIPTION. + f0 : TYPE + DESCRIPTION. + pktype : TYPE + DESCRIPTION. + + Returns + ------- + resd : TYPE + DESCRIPTION. + + Notes + ----- + !!! These objective functions all have a linear background added in their + definition in peakfuncs + """ ww = np.ones(f0.shape) if pktype == 'gaussian': @@ -503,7 +627,7 @@ def fit_pk_obj_1d(p, x, f0, pktype): ww = 1./np.sqrt(f0) ww[np.isnan(ww)] = 0.0 - resd = (f-f0)*ww + resd = (f - f0)*ww return resd @@ -516,6 +640,10 @@ def fit_pk_obj_1d_bnded(p, x, f0, pktype, weight, lb, ub): f = pkfuncs.pvoigt1d(p, x) elif pktype == 'split_pvoigt': f = pkfuncs.split_pvoigt1d(p, x) + elif pktype == 'dcs_pinkbeam': + f = pkfuncs.pink_beam_dcs(p, x) + ww = 1./np.sqrt(f0) + ww[np.isnan(ww)] = 0.0 num_data = len(f) num_parm = len(p) diff --git a/hexrd/fitting/peakfunctions.py b/hexrd/fitting/peakfunctions.py index ff2406c64..92adc6cdc 100644 --- a/hexrd/fitting/peakfunctions.py +++ b/hexrd/fitting/peakfunctions.py @@ -41,7 +41,8 @@ 'gaussian': 3, 'lorentzian': 3, 'pvoigt': 4, - 'split_pvoigt': 6 + 'split_pvoigt': 6, + 'pink_beam_dcs': 8 } """ @@ -53,6 +54,8 @@ Abramowitz and Stegun Error is < 1.5e-7 for all x """ + + @numba_njit_if_available(cache=True, nogil=True) def erfc(x): # save the sign of x @@ -60,20 +63,23 @@ def erfc(x): x = np.abs(x) # constants - a1,a2,a3,a4,a5,p = c_erf + a1, a2, a3, a4, a5, p = c_erf # A&S formula 7.1.26 t = 1.0/(1.0 + p*x) y = 1. - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x) - erf = sign*y # erf(-x) = -erf(x) + erf = sign*y # erf(-x) = -erf(x) return 1. - erf + """ cutom function to compute the exponential integral based on Padé approximation of exponential integral function. coefficients found in pg. 231 Abramowitz and Stegun, eq. 5.1.53 """ + + @numba_njit_if_available(cache=True, nogil=True) def exp1exp_under1(x): f = np.zeros(x.shape).astype(np.complex128) @@ -83,6 +89,7 @@ def exp1exp_under1(x): return (f - np.log(x) - np.euler_gamma)*np.exp(x) + """ cutom function to compute the exponential integral based on Padé approximation of exponential integral @@ -90,6 +97,8 @@ def exp1exp_under1(x): special functions and their approximations, vol 2 (1969) Elsevier """ + + @numba_njit_if_available(cache=True, nogil=True) def exp1exp_over1(x): num = np.zeros(x.shape).astype(np.complex128) @@ -107,12 +116,13 @@ def exp1exp_over1(x): return (num/den)*(1./x) + @numba_njit_if_available(cache=True, nogil=True) def exp1exp(x): mask = np.sign(x.real)*np.abs(x) > 1. f = np.zeros(x.shape).astype(np.complex128) - f[mask] = exp1exp_over1(x[mask]) + f[mask] = exp1exp_over1(x[mask]) f[~mask] = exp1exp_under1(x[~mask]) return f @@ -121,6 +131,8 @@ def exp1exp(x): # 1-D Gaussian Functions # ============================================================================= # Split the unit gaussian so this can be called for 2d and 3d functions + + def _unit_gaussian(p, x): """ Required Arguments: @@ -135,7 +147,7 @@ def _unit_gaussian(p, x): FWHM = p[1] sigma = FWHM/gauss_width_fact - f = np.exp(-(x-x0)**2/(2.*sigma**2.)) + f = np.exp(-(x - x0)**2/(2.*sigma**2.)) return f @@ -167,7 +179,7 @@ def gaussian1d(p, x): bg0 = p[3] bg1 = p[4] - f = _gaussian1d_no_bg(p[:3], x)+bg0+bg1*x + f = _gaussian1d_no_bg(p[:3], x) + bg0 + bg1*x return f @@ -187,9 +199,10 @@ def _gaussian1d_no_bg_deriv(p, x): sigma = FWHM/gauss_width_fact - dydx0 = _gaussian1d_no_bg(p, x)*((x-x0)/(sigma**2.)) + dydx0 = _gaussian1d_no_bg(p, x)*((x - x0)/(sigma**2.)) dydA = _unit_gaussian(p[[1, 2]], x) - dydFWHM = _gaussian1d_no_bg(p, x)*((x-x0)**2./(sigma**3.))/gauss_width_fact + dydFWHM = _gaussian1d_no_bg(p, x) \ + * ((x - x0)**2./(sigma**3.))/gauss_width_fact d_mat = np.zeros((len(p), len(x))) @@ -323,6 +336,7 @@ def lorentzian1d_deriv(p, x): # ============================================================================= # 1-D Psuedo Voigt Functions # ============================================================================= + # Split the unit function so this can be called for 2d and 3d functions def _unit_pvoigt1d(p, x): """ @@ -368,7 +382,7 @@ def pvoigt1d(p, x): bg0 = p[4] bg1 = p[5] - f = _pvoigt1d_no_bg(p[:4], x)+bg0+bg1*x + f = _pvoigt1d_no_bg(p[:4], x) + bg0 + bg1*x return f @@ -421,10 +435,11 @@ def split_pvoigt1d(p, x): bg0 = p[6] bg1 = p[7] - f = _split_pvoigt1d_no_bg(p[:6], x)+bg0+bg1*x + f = _split_pvoigt1d_no_bg(p[:6], x) + bg0 + bg1*x return f + """ ================================================================ ================================================================ @@ -440,6 +455,8 @@ def split_pvoigt1d(p, x): ================================================================ ================================================================ """ + + @numba_njit_if_available(cache=True, nogil=True) def _calc_alpha(alpha, x0): a0, a1 = alpha @@ -451,6 +468,7 @@ def _calc_beta(beta, x0): b0, b1 = beta return b0 + b1*np.tan(np.radians(0.5*x0)) + @numba_njit_if_available(cache=True, nogil=True) def _mixing_factor_pv(fwhm_g, fwhm_l): """ @@ -480,6 +498,7 @@ def _mixing_factor_pv(fwhm_g, fwhm_l): return eta, fwhm + @numba_njit_if_available(nogil=True) def _gaussian_pink_beam(p, x): """ @@ -494,7 +513,7 @@ def _gaussian_pink_beam(p, x): p = [A,x0,alpha0,alpha1,beta0,beta1,fwhm_g,bkg_c0,bkg_c1,bkg_c2] """ - A,x0,alpha,beta,fwhm_g = p + A, x0, alpha, beta, fwhm_g = p del_tth = x - x0 sigsqr = fwhm_g**2 @@ -515,9 +534,9 @@ def _gaussian_pink_beam(p, x): g = np.zeros(x.shape) zmask = np.abs(del_tth) > 5.0 - g[~zmask] = (0.5*(alpha*beta)/(alpha + beta)) \ - * np.exp(u[~zmask])*t1[~zmask] + \ - np.exp(v[~zmask])*t2[~zmask] + g[~zmask] = \ + (0.5*(alpha*beta)/(alpha + beta)) * np.exp(u[~zmask])*t1[~zmask] \ + + np.exp(v[~zmask])*t2[~zmask] mask = np.isnan(g) g[mask] = 0. @@ -540,7 +559,7 @@ def _lorentzian_pink_beam(p, x): p = [A,x0,alpha0,alpha1,beta0,beta1,fwhm_l] """ - A,x0,alpha,beta,fwhm_l = p + A, x0, alpha, beta, fwhm_l = p del_tth = x - x0 @@ -551,7 +570,7 @@ def _lorentzian_pink_beam(p, x): f1 = exp1exp(p) f2 = exp1exp(q) - y = -(alpha*beta)/(np.pi*(alpha+beta))*(f1+f2).imag + y = -(alpha*beta)/(np.pi*(alpha + beta))*(f1 + f2).imag mask = np.isnan(y) y[mask] = 0. @@ -559,8 +578,9 @@ def _lorentzian_pink_beam(p, x): return y + @numba_njit_if_available(nogil=True) -def pink_beam_dcs(p, x): +def _pink_beam_dcs_no_bg(p, x): """ @author Saransh Singh, Lawrence Livermore National Lab @date 10/18/2021 SS 1.0 original @@ -568,27 +588,68 @@ def pink_beam_dcs(p, x): more details can be found in Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3–6 - p has the following parameters - p = [A,x0,alpha0,alpha1,beta0,beta1,fwhm_g,fwhm_l,bkg_c0,bkg_c1,bkg_c2] + p has the following 10 parameters + p = [A, x0, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l] """ alpha = _calc_alpha((p[2], p[3]), p[1]) - beta = _calc_beta((p[4], p[5]), p[1]) + beta = _calc_beta((p[4], p[5]), p[1]) - arg1 = np.array([alpha,beta,p[6]]).astype(np.float64) - arg2 = np.array([alpha,beta,p[7]]).astype(np.float64) + arg1 = np.array([alpha, beta, p[6]]).astype(np.float64) + arg2 = np.array([alpha, beta, p[7]]).astype(np.float64) - p_g = np.hstack((p[0:2],arg1)) - p_l = np.hstack((p[0:2],arg2)) + p_g = np.hstack((p[0:2], arg1)) + p_l = np.hstack((p[0:2], arg2)) - bkg_c = p[8:11] - bkg = p[8] + p[9]*x + p[10]*(2.*x**2 - 1.) + # bkg = p[8] + p[9]*x + p[10]*(2.*x**2 - 1.) + # bkg = p[8] + p[9]*x # !!! make like the other peak funcs here eta, fwhm = _mixing_factor_pv(p[6], p[7]) G = _gaussian_pink_beam(p_g, x) L = _lorentzian_pink_beam(p_l, x) - return eta*L + (1.-eta)*G + bkg + return eta*L + (1. - eta)*G + + +def pink_beam_dcs(p, x): + """ + @author Saransh Singh, Lawrence Livermore National Lab + @date 10/18/2021 SS 1.0 original + @details pink beam profile for DCS data for calibration. + more details can be found in + Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3–6 + + p has the following 10 parameters + p = [A, x0, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l, bkg_c0, bkg_c1] + """ + return _pink_beam_dcs_no_bg(p[:-2], x) + p[-2] + p[-1]*x + + +def pink_beam_dcs_lmfit( + x, A, x0, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l): + """ + @author Saransh Singh, Lawrence Livermore National Lab + @date 10/18/2021 SS 1.0 original + @details pink beam profile for DCS data for calibration. + more details can be found in + Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3–6 + """ + alpha = _calc_alpha((alpha0, alpha1), x0) + beta = _calc_beta((beta0, beta1), x0) + + arg1 = np.array([alpha, beta, fwhm_g], dtype=np.float64) + arg2 = np.array([alpha, beta, fwhm_l], dtype=np.float64) + + p_g = np.hstack([[A, x0], arg1]).astype(np.float64, order='C') + p_l = np.hstack([[A, x0], arg2]).astype(np.float64, order='C') + + eta, fwhm = _mixing_factor_pv(fwhm_g, fwhm_l) + + G = _gaussian_pink_beam(p_g, x) + L = _lorentzian_pink_beam(p_l, x) + + return eta*L + (1. - eta)*G + """ ================================================================ @@ -600,6 +661,7 @@ def pink_beam_dcs(p, x): # Tanh Step Down # ============================================================================= + def tanh_stepdown_nobg(p, x): """ Required Arguments: @@ -884,22 +946,21 @@ def _mpeak_1d_no_bg(p, x, pktype, num_pks): f = np.zeros(len(x)) - if pktype == 'gaussian' or pktype == 'lorentzian': - p_fit = np.reshape(p[:3*num_pks], [num_pks, 3]) - elif pktype == 'pvoigt': - p_fit = np.reshape(p[:4*num_pks], [num_pks, 4]) - elif pktype == 'split_pvoigt': - p_fit = np.reshape(p[:6*num_pks], [num_pks, 6]) + npp = mpeak_nparams_dict[pktype] + + p_fit = np.reshape(p[:npp*num_pks], [num_pks, npp]) for ii in np.arange(num_pks): if pktype == 'gaussian': - f = f+_gaussian1d_no_bg(p_fit[ii], x) + f += _gaussian1d_no_bg(p_fit[ii], x) elif pktype == 'lorentzian': - f = f+_lorentzian1d_no_bg(p_fit[ii], x) + f += _lorentzian1d_no_bg(p_fit[ii], x) elif pktype == 'pvoigt': - f = f+_pvoigt1d_no_bg(p_fit[ii], x) + f += _pvoigt1d_no_bg(p_fit[ii], x) elif pktype == 'split_pvoigt': - f = f+_split_pvoigt1d_no_bg(p_fit[ii], x) + f += _split_pvoigt1d_no_bg(p_fit[ii], x) + elif pktype == 'pink_beam_dcs': + f += _pink_beam_dcs_no_bg(p_fit[ii], x) return f diff --git a/hexrd/fitting/spectrum.py b/hexrd/fitting/spectrum.py new file mode 100644 index 000000000..d871a10eb --- /dev/null +++ b/hexrd/fitting/spectrum.py @@ -0,0 +1,529 @@ +import numpy as np +from numpy.polynomial import chebyshev + +from lmfit import Model, Parameters + +from hexrd.constants import fwhm_to_sigma +from hexrd.imageutil import snip1d + +from .utils import (_calc_alpha, _calc_beta, + _mixing_factor_pv, + _gaussian_pink_beam, + _lorentzian_pink_beam, + _parameter_arg_constructor, + _extract_parameters_by_name, + _set_bound_constraints, + _set_refinement_by_name, + _set_width_mixing_bounds, + _set_equality_constraints, + _set_peak_center_bounds) + +# ============================================================================= +# PARAMETERS +# ============================================================================= + +_function_dict_1d = { + 'gaussian': ['amp', 'cen', 'fwhm'], + 'lorentzian': ['amp', 'cen', 'fwhm'], + 'pvoigt': ['amp', 'cen', 'fwhm', 'mixing'], + 'split_pvoigt': ['amp', 'cen', 'fwhm_l', 'fwhm_h', 'mixing_l', 'mixing_h'], + 'pink_beam_dcs': ['amp', 'cen', + 'alpha0', 'alpha1', + 'beta0', 'beta1', + 'fwhm_g', 'fwhm_l'], + 'constant': ['c0'], + 'linear': ['c0', 'c1'], + 'quadratic': ['c0', 'c1', 'c2'], + 'cubic': ['c0', 'c1', 'c2', 'c3'], + 'quartic': ['c0', 'c1', 'c2', 'c3', 'c4'], + 'quintic': ['c0', 'c1', 'c2', 'c3', 'c4', 'c5'] +} + +num_func_params = dict.fromkeys(_function_dict_1d) +for key, val in _function_dict_1d.items(): + num_func_params[key] = len(val) + +pk_prefix_tmpl = "pk%d_" + +alpha0_DFLT, alpha1_DFLT, beta0_DFLT, beta1_DFLT = np.r_[ + 14.45, 0.0, 3.0162, -7.9411 +] + +param_hints_DFLT = (True, None, None, None, None) + +fwhm_min = 1e-3 +fwhm_DFLT = 0.1 +mixing_DFLT = 0.5 +pk_sep_min = 0.01 + + +# ============================================================================= +# SIMPLE FUNCTION DEFS +# ============================================================================= + + +def constant_bkg(x, c0): + # return c0 + return c0 + + +def linear_bkg(x, c0, c1): + # return c0 + c1*x + cheb_cls = chebyshev.Chebyshev( + [c0, c1], domain=(min(x), max(x)) + ) + return cheb_cls(x) + + +def quadratic_bkg(x, c0, c1, c2): + # return c0 + c1*x + c2*x**2 + cheb_cls = chebyshev.Chebyshev( + [c0, c1, c2], domain=(min(x), max(x)) + ) + return cheb_cls(x) + + +def cubic_bkg(x, c0, c1, c2, c3): + # return c0 + c1*x + c2*x**2 + c3*x**3 + cheb_cls = chebyshev.Chebyshev( + [c0, c1, c2, c3], domain=(min(x), max(x)) + ) + return cheb_cls(x) + + +def quartic_bkg(x, c0, c1, c2, c3, c4): + # return c0 + c1*x + c2*x**2 + c3*x**3 + cheb_cls = chebyshev.Chebyshev( + [c0, c1, c2, c3, c4], domain=(min(x), max(x)) + ) + return cheb_cls(x) + + +def quintic_bkg(x, c0, c1, c2, c3, c4, c5): + # return c0 + c1*x + c2*x**2 + c3*x**3 + cheb_cls = chebyshev.Chebyshev( + [c0, c1, c2, c3, c4, c5], domain=(min(x), max(x)) + ) + return cheb_cls(x) + + +def chebyshev_bkg(x, *args): + cheb_cls = chebyshev.Chebyshev(args, domain=(min(x), max(x))) + return cheb_cls(x) + + +def gaussian_1d(x, amp, cen, fwhm): + return amp * np.exp(-(x - cen)**2 / (2*fwhm_to_sigma*fwhm)**2) + + +def lorentzian_1d(x, amp, cen, fwhm): + return amp * (0.5*fwhm)**2 / ((x - cen)**2 + (0.5*fwhm)**2) + + +def pvoigt_1d(x, amp, cen, fwhm, mixing): + return mixing*gaussian_1d(x, amp, cen, fwhm) \ + + (1 - mixing)*lorentzian_1d(x, amp, cen, fwhm) + + +def split_pvoigt_1d(x, amp, cen, fwhm_l, fwhm_h, mixing_l, mixing_h): + idx_l = x <= cen + idx_h = x > cen + return np.concatenate( + [pvoigt_1d(x[idx_l], amp, cen, fwhm_l, mixing_l), + pvoigt_1d(x[idx_h], amp, cen, fwhm_h, mixing_h)] + ) + + +def pink_beam_dcs(x, amp, cen, alpha0, alpha1, beta0, beta1, fwhm_g, fwhm_l): + """ + @author Saransh Singh, Lawrence Livermore National Lab + @date 10/18/2021 SS 1.0 original + @details pink beam profile for DCS data for calibration. + more details can be found in + Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3–6 + """ + alpha = _calc_alpha((alpha0, alpha1), cen) + beta = _calc_beta((beta0, beta1), cen) + + arg1 = np.array([alpha, beta, fwhm_g], dtype=np.float64) + arg2 = np.array([alpha, beta, fwhm_l], dtype=np.float64) + + p_g = np.hstack([[amp, cen], arg1]).astype(np.float64, order='C') + p_l = np.hstack([[amp, cen], arg2]).astype(np.float64, order='C') + + eta, fwhm = _mixing_factor_pv(fwhm_g, fwhm_l) + + G = _gaussian_pink_beam(p_g, x) + L = _lorentzian_pink_beam(p_l, x) + + return eta*L + (1. - eta)*G + + +def _amplitude_guess(x, x0, y, fwhm): + pt_l = np.argmin(np.abs(x - (x0 - 0.5*fwhm))) + pt_h = np.argmin(np.abs(x - (x0 + 0.5*fwhm))) + return np.max(y[pt_l:pt_h + 1]) + + +def _initial_guess(peak_positions, x, f, + pktype='pvoigt', bgtype='linear', + fwhm_guess=None): + """ + Generate function-specific estimate for multi-peak parameters. + + Parameters + ---------- + peak_positions : TYPE + DESCRIPTION. + x : TYPE + DESCRIPTION. + f : TYPE + DESCRIPTION. + pktype : TYPE, optional + DESCRIPTION. The default is 'pvoigt'. + bgtype : TYPE, optional + DESCRIPTION. The default is 'linear'. + fwhm_guess : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + p0 : TYPE + DESCRIPTION. + """ + npts = len(x) + assert len(f) == npts, "ordinate and data must be same length!" + + num_pks = len(peak_positions) + + if fwhm_guess is None: + fwhm_guess = (np.max(x) - np.min(x))/(20.*num_pks) + fwhm_guess = np.atleast_1d(fwhm_guess) + if(len(fwhm_guess) < 2): + fwhm_guess = fwhm_guess*np.ones(num_pks) + + # estimate background with snip1d + # !!! using a window size based on abcissa + bkg = snip1d(np.atleast_2d(f), + w=int(np.floor(len(f)/num_pks/2.))).flatten() + + bkg_mod = chebyshev.Chebyshev( + [0., 0.], domain=(min(x), max(x)) + ) + fit_bkg = bkg_mod.fit(x, bkg, 1) + coeff = fit_bkg.coef + + # make lin bkg subtracted spectrum + fsubtr = f - fit_bkg(x) + + # number of parmaters from reference dict + nparams_pk = num_func_params[pktype] + nparams_bg = num_func_params[bgtype] + + pkparams = np.zeros((num_pks, nparams_pk)) + + # case processing + # !!! used to use (f[pt] - min_val) for ampl + if pktype == 'gaussian' or pktype == 'lorentzian': + # x is just 2theta values + # make guess for the initital parameters + for ii in np.arange(num_pks): + amp_guess = _amplitude_guess( + x, peak_positions[ii], fsubtr, fwhm_guess[ii] + ) + pkparams[ii, :] = [ + amp_guess, + peak_positions[ii], + fwhm_guess[ii] + ] + elif pktype == 'pvoigt': + # x is just 2theta values + # make guess for the initital parameters + for ii in np.arange(num_pks): + amp_guess = _amplitude_guess( + x, peak_positions[ii], fsubtr, fwhm_guess[ii] + ) + pkparams[ii, :] = [ + amp_guess, + peak_positions[ii], + fwhm_guess[ii], + 0.5 + ] + elif pktype == 'split_pvoigt': + # x is just 2theta values + # make guess for the initital parameters + for ii in np.arange(num_pks): + amp_guess = _amplitude_guess( + x, peak_positions[ii], fsubtr, fwhm_guess[ii] + ) + pkparams[ii, :] = [ + amp_guess, + peak_positions[ii], + fwhm_guess[ii], + fwhm_guess[ii], + 0.5, + 0.5 + ] + elif pktype == 'pink_beam_dcs': + # x is just 2theta values + # make guess for the initital parameters + for ii in np.arange(num_pks): + amp_guess = _amplitude_guess( + x, peak_positions[ii], fsubtr, fwhm_guess[ii] + ) + pkparams[ii, :] = [ + amp_guess, + peak_positions[ii], + alpha0_DFLT, + alpha1_DFLT, + beta0_DFLT, + beta1_DFLT, + fwhm_guess[ii], + fwhm_guess[ii], + ] + + if bgtype == 'constant': + bgparams = np.average(bkg) + else: + bgparams = np.hstack([coeff, np.zeros(nparams_bg - 2)]) + + return np.hstack([pkparams.flatten(), bgparams]) + +# ============================================================================= +# MODELS +# ============================================================================= + + +def _build_composite_model(npeaks=1, pktype='gaussian', bgtype='linear'): + if pktype == 'gaussian': + pkfunc = gaussian_1d + elif pktype == 'lorentzian': + pkfunc = lorentzian_1d + elif pktype == 'pvoigt': + pkfunc = pvoigt_1d + elif pktype == 'split_pvoigt': + pkfunc = split_pvoigt_1d + elif pktype == 'pink_beam_dcs': + pkfunc = pink_beam_dcs + + spectrum_model = Model(pkfunc, prefix=pk_prefix_tmpl % 0) + for i in range(1, npeaks): + spectrum_model += Model(pkfunc, prefix=pk_prefix_tmpl % i) + + if bgtype == 'constant': + spectrum_model += Model(constant_bkg) + elif bgtype == 'linear': + spectrum_model += Model(linear_bkg) + elif bgtype == 'quadratic': + spectrum_model += Model(quadratic_bkg) + elif bgtype == 'cubic': + spectrum_model += Model(cubic_bkg) + elif bgtype == 'quartic': + spectrum_model += Model(quartic_bkg) + elif bgtype == 'quintic': + spectrum_model += Model(quintic_bkg) + return spectrum_model + + +# ============================================================================= +# CLASSES +# ============================================================================= + + +class SpectrumModel(object): + def __init__(self, data, peak_centers, + pktype='pvoigt', bgtype='linear', + fwhm_init=None): + """ + Instantiates spectrum model. + + Parameters + ---------- + data : array_like + The (n, 2) array representing the spectrum as (2θ, intensity). + The units of tth are assumed to be in degrees. + peak_centers : array_like + The (n, ) vector of ideal peak 2θ positions in degrees. + pktype : TYPE, optional + DESCRIPTION. The default is 'pvoigt'. + bgtype : TYPE, optional + DESCRIPTION. The default is 'linear'. + + Returns + ------- + None. + + Notes + ----- + - data abscissa and peak centers are in DEGREES. + + """ + # peak and background spec + assert pktype in _function_dict_1d.keys(), \ + "peak type '%s' not recognized" % pktype + assert bgtype in _function_dict_1d.keys(), \ + "background type '%s' not recognized" % bgtype + self._pktype = pktype + self._bgtype = bgtype + + self._tth0 = peak_centers + num_peaks = len(peak_centers) + + master_keys_pks = _function_dict_1d[pktype] + master_keys_bkg = _function_dict_1d[bgtype] + + # spectrum data + data = np.atleast_2d(data) + assert data.shape[1] == 2, \ + "data must be [[tth_0, int_0], ..., [tth_N, int_N]" + assert len(data > 10), \ + "check your input spectrum; you provided fewer than 10 points." + self._data = data + + xdata, ydata = data.T + window_range = (np.min(xdata), np.max(xdata)) + ymax = np.max(ydata) + + # model + spectrum_model = _build_composite_model( + num_peaks, pktype=pktype, bgtype=bgtype + ) + self._model = spectrum_model + + p0 = _initial_guess( + self._tth0, xdata, ydata, + pktype=self._pktype, bgtype=self._bgtype, + fwhm_guess=np.diff(window_range)/(20.*num_peaks) + ) + psplit = num_func_params[bgtype] + p0_pks = np.reshape(p0[:-psplit], (num_peaks, num_func_params[pktype])) + p0_bkg = p0[-psplit:] + + # peaks + initial_params_pks = Parameters() + for i, pi in enumerate(p0_pks): + new_keys = [pk_prefix_tmpl % i + k for k in master_keys_pks] + initial_params_pks.add_many( + *_parameter_arg_constructor( + dict(zip(new_keys, pi)), param_hints_DFLT + ) + ) + + # set some special constraints + _set_width_mixing_bounds( + initial_params_pks, min_w=fwhm_min, max_w=np.inf + ) + _set_bound_constraints( + initial_params_pks, 'amp', min_val=0., max_val=ymax + ) + _set_peak_center_bounds( + initial_params_pks, window_range, min_sep=pk_sep_min + ) + if pktype == 'pink_beam_dcs': + # set bounds on fwhm params and mixing (where applicable) + # !!! important for making pseudo-Voigt behave! + _set_refinement_by_name(initial_params_pks, 'alpha', vary=False) + _set_refinement_by_name(initial_params_pks, 'beta', vary=False) + _set_equality_constraints( + initial_params_pks, + zip(_extract_parameters_by_name(initial_params_pks, 'fwhm_g'), + _extract_parameters_by_name(initial_params_pks, 'fwhm_l')) + ) + elif pktype == 'split_pvoigt': + mparams = _extract_parameters_by_name( + initial_params_pks, 'mixing_l' + ) + for mp in mparams[1:]: + _set_equality_constraints( + initial_params_pks, ((mp, mparams[0]), ) + ) + mparams = _extract_parameters_by_name( + initial_params_pks, 'mixing_h' + ) + for mp in mparams[1:]: + _set_equality_constraints( + initial_params_pks, ((mp, mparams[0]), ) + ) + + # background + initial_params_bkg = Parameters() + initial_params_bkg.add_many( + *_parameter_arg_constructor( + dict(zip(master_keys_bkg, p0_bkg)), + param_hints_DFLT + ) + ) + + self._peak_params = initial_params_pks + self._background_params = initial_params_bkg + + @property + def pktype(self): + return self._pktype + + @property + def bgtype(self): + return self._bgtype + + @property + def data(self): + return self._data + + @property + def tth0(self): + return self._tth0 + + @property + def num_peaks(self): + return len(self.tth0) + + @property + def model(self): + return self._model + + @property + def peak_params(self): + return self._peak_params + + @property + def background_params(self): + return self._background_params + + @property + def params(self): + return self._peak_params + self._background_params + + def fit(self): + xdata, ydata = self.data.T + window_range = (np.min(xdata), np.max(xdata)) + if self.pktype == 'pink_beam_dcs': + for pname, param in self.peak_params.items(): + if 'alpha' in pname or 'beta' in pname or 'fwhm' in pname: + param.vary = False + + res0 = self.model.fit(ydata, params=self.params, x=xdata) + + new_p = Parameters() + new_p.add_many( + *_parameter_arg_constructor(res0.best_values, param_hints_DFLT) + ) + _set_equality_constraints(new_p, 'alpha0') + _set_equality_constraints(new_p, 'beta0') + _set_refinement_by_name(new_p, 'alpha1', vary=False) + _set_refinement_by_name(new_p, 'beta1', vary=False) + _set_width_mixing_bounds(new_p, min_w=fwhm_min, max_w=np.inf) + # !!! not sure on this, but it seems to give more stable results + # with many peaks + _set_equality_constraints( + new_p, + zip(_extract_parameters_by_name(new_p, 'fwhm_g'), + _extract_parameters_by_name(new_p, 'fwhm_l')) + ) + _set_peak_center_bounds(new_p, window_range, min_sep=pk_sep_min) + + # refit + res1 = self.model.fit(ydata, params=new_p, x=xdata) + else: + res1 = self.model.fit(ydata, params=self.params, x=xdata) + + return res1 diff --git a/hexrd/fitting/utils.py b/hexrd/fitting/utils.py new file mode 100644 index 000000000..d0abfd1ac --- /dev/null +++ b/hexrd/fitting/utils.py @@ -0,0 +1,324 @@ +import numpy as np + +from hexrd.constants import ( + c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp, sqrt_epsf +) +from hexrd.matrixutil import uniqueVectors +from hexrd.utils.decorators import numba_njit_if_available + + +# ============================================================================= +# LMFIT Parameter munging utilities +# ============================================================================= + + +def _parameter_arg_constructor(pdict, pargs): + return [i + pargs for i in pdict.items()] + + +def _extract_parameters_by_name(params, pname_root): + return [s for s in params.keys() if s.__contains__(pname_root)] + + +def _set_refinement_by_name(params, pname_root, vary=True): + target_pnames = _extract_parameters_by_name(params, pname_root) + if len(target_pnames) > 0: + for pname in target_pnames: + params[pname].vary = vary + else: + raise RuntimeWarning("Only 1 parameter found; exiting") + + +def _set_equality_constraints(params, pname_spec): + if isinstance(pname_spec, str): + target_pnames = _extract_parameters_by_name(params, pname_spec) + if len(target_pnames) > 0: + for pname in target_pnames[1:]: + params[pname].expr = target_pnames[0] + else: + raise RuntimeWarning("Only 1 parameter found; exiting") + else: + for name_pair in pname_spec: + assert len(name_pair) == 2, \ + "entries in name spec must be 2-tuples" + params[name_pair[0]].expr = name_pair[1] + + +def _set_bound_constraints(params, pname_spec, + min_val=-np.inf, max_val=np.inf): + target_pnames = _extract_parameters_by_name(params, pname_spec) + for pname in target_pnames: + params[pname].min = min_val + params[pname].max = max_val + + +def _set_width_mixing_bounds(params, min_w=0.01, max_w=np.inf): + for pname, param in params.items(): + if 'fwhm' in pname: + param.min = min_w + param.max = max_w + if 'mixing' in pname: + param.min = 0. + param.max = 1. + + +def _set_peak_center_bounds(params, window_range, min_sep=0.01): + target_pnames = _extract_parameters_by_name(params, 'cen') + npks = len(target_pnames) + if npks > 1: + center_values = [] + for pname in target_pnames: + # will need these to sort the peaks with increasing centers + center_values.append(params[pname].value) + + # force peaks to be in window (with buffer) + params[pname].min = window_range[0] + min_sep + params[pname].max = window_range[1] - min_sep + + # make sure peak list does not include any peaks closer than min_sep + uvec = uniqueVectors( + np.atleast_2d(center_values), tol=min_sep + sqrt_epsf + ).squeeze() + if len(uvec) < npks: + raise RuntimeError( + "Params contain peaks separated by <=" + + " the specified min, %d" % min_sep + ) + + # get the sorted indices + peak_order = np.argsort(center_values) + + # sort parameter names + sorted_pnames = np.asarray(target_pnames)[peak_order].tolist() + + # add new parameter to fit peak separations + prev_peak = params[sorted_pnames[0]] + + # add parameters to fit subsequent centers as separations + for ip, pname in enumerate(sorted_pnames[1:]): + curr_peak = params[pname] + new_pname = 'pksep%d' % ip + params.add(name=new_pname, + value=curr_peak.value - prev_peak.value, + min=min_sep, + max=window_range[1] - window_range[0], + vary=True) + curr_peak.expr = '+'.join([prev_peak.name, new_pname]) + prev_peak = curr_peak + else: + msg = "Found only 1 peak; setting no bounds" + # print(msg) + # raise RuntimeWarning(msg) + + +# ============================================================================= +# DCS-related utilities +# ============================================================================= + +""" +cutom function to compute the complementary error function +based on rational approximation of the convergent Taylor +series. coefficients found in +Formula 7.1.26 +Handbook of Mathematical Functions, +Abramowitz and Stegun +Error is < 1.5e-7 for all x +""" + + +@numba_njit_if_available(cache=True, nogil=True) +def erfc(x): + # save the sign of x + sign = np.sign(x) + x = np.abs(x) + + # constants + a1, a2, a3, a4, a5, p = c_erf + + # A&S formula 7.1.26 + t = 1.0/(1.0 + p*x) + y = 1. - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x) + erf = sign*y # erf(-x) = -erf(x) + return 1. - erf + + +""" +cutom function to compute the exponential integral +based on Padé approximation of exponential integral +function. coefficients found in pg. 231 Abramowitz +and Stegun, eq. 5.1.53 +""" + + +@numba_njit_if_available(cache=True, nogil=True) +def exp1exp_under1(x): + f = np.zeros(x.shape).astype(np.complex128) + for i in range(6): + xx = x**(i+1) + f += c_coeff_exp1exp[i]*xx + + return (f - np.log(x) - np.euler_gamma)*np.exp(x) + + +""" +cutom function to compute the exponential integral +based on Padé approximation of exponential integral +function. coefficients found in pg. 415 Y. Luke, The +special functions and their approximations, vol 2 +(1969) Elsevier +""" + + +@numba_njit_if_available(cache=True, nogil=True) +def exp1exp_over1(x): + num = np.zeros(x.shape).astype(np.complex128) + den = np.zeros(x.shape).astype(np.complex128) + + for i in range(11): + p = 10-i + if p != 0: + xx = x**p + num += cnum_exp1exp[i]*xx + den += cden_exp1exp[i]*xx + else: + num += cnum_exp1exp[i] + den += cden_exp1exp[i] + + return (num/den)*(1./x) + + +@numba_njit_if_available(cache=True, nogil=True) +def exp1exp(x): + mask = np.sign(x.real)*np.abs(x) > 1. + + f = np.zeros(x.shape).astype(np.complex128) + f[mask] = exp1exp_over1(x[mask]) + f[~mask] = exp1exp_under1(x[~mask]) + + return f + + +@numba_njit_if_available(cache=True, nogil=True) +def _calc_alpha(alpha, x0): + a0, a1 = alpha + return (a0 + a1*np.tan(np.radians(0.5*x0))) + + +@numba_njit_if_available(cache=True, nogil=True) +def _calc_beta(beta, x0): + b0, b1 = beta + return b0 + b1*np.tan(np.radians(0.5*x0)) + + +@numba_njit_if_available(cache=True, nogil=True) +def _mixing_factor_pv(fwhm_g, fwhm_l): + """ + @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, + saransh1@llnl.gov + @DATE: 05/20/2020 SS 1.0 original + 01/29/2021 SS 2.0 updated to depend only on fwhm of profile + P. Thompson, D.E. Cox & J.B. Hastings, J. Appl. Cryst.,20,79-83, + 1987 + @DETAILS: calculates the mixing factor eta to best approximate voight + peak shapes + """ + fwhm = fwhm_g**5 + 2.69269 * fwhm_g**4 * fwhm_l + \ + 2.42843 * fwhm_g**3 * fwhm_l**2 + \ + 4.47163 * fwhm_g**2 * fwhm_l**3 +\ + 0.07842 * fwhm_g * fwhm_l**4 +\ + fwhm_l**5 + + fwhm = fwhm**0.20 + eta = 1.36603 * (fwhm_l/fwhm) - \ + 0.47719 * (fwhm_l/fwhm)**2 + \ + 0.11116 * (fwhm_l/fwhm)**3 + if eta < 0.: + eta = 0. + elif eta > 1.: + eta = 1. + + return eta, fwhm + + +@numba_njit_if_available(nogil=True) +def _gaussian_pink_beam(p, x): + """ + @author Saransh Singh, Lawrence Livermore National Lab + @date 03/22/2021 SS 1.0 original + @details the gaussian component of the pink beam peak profile + obtained by convolution of gaussian with normalized back to back + exponentials. more details can be found in + Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3–6 + + p has the following parameters + p = [A,x0,alpha0,alpha1,beta0,beta1,fwhm_g,bkg_c0,bkg_c1,bkg_c2] + """ + + A, x0, alpha, beta, fwhm_g = p + + del_tth = x - x0 + sigsqr = fwhm_g**2 + + f1 = alpha*sigsqr + 2.0*del_tth + f2 = beta*sigsqr - 2.0*del_tth + f3 = np.sqrt(2.0)*fwhm_g + + u = 0.5*alpha*f1 + v = 0.5*beta*f2 + + y = (f1-del_tth)/f3 + z = (f2+del_tth)/f3 + + t1 = erfc(y) + t2 = erfc(z) + + g = np.zeros(x.shape) + zmask = np.abs(del_tth) > 5.0 + + g[~zmask] = \ + (0.5*(alpha*beta)/(alpha + beta)) * np.exp(u[~zmask])*t1[~zmask] \ + + np.exp(v[~zmask])*t2[~zmask] + + mask = np.isnan(g) + g[mask] = 0. + g *= A + + return g + + +@numba_njit_if_available(nogil=True) +def _lorentzian_pink_beam(p, x): + """ + @author Saransh Singh, Lawrence Livermore National Lab + @date 03/22/2021 SS 1.0 original + @details the lorentzian component of the pink beam peak profile + obtained by convolution of gaussian with normalized back to back + exponentials. more details can be found in + Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3–6 + + p has the following parameters + p = [A,x0,alpha0,alpha1,beta0,beta1,fwhm_l] + """ + + A, x0, alpha, beta, fwhm_l = p + + del_tth = x - x0 + + p = -alpha*del_tth + 1j*0.5*alpha*fwhm_l + q = -beta*del_tth + 1j*0.5*beta*fwhm_l + + y = np.zeros(x.shape) + f1 = exp1exp(p) + f2 = exp1exp(q) + + y = -(alpha*beta)/(np.pi*(alpha + beta))*(f1 + f2).imag + + mask = np.isnan(y) + y[mask] = 0. + y *= A + + return y + +# ============================================================================= +# pseudo-Voigt +# ============================================================================= diff --git a/hexrd/instrument.py b/hexrd/instrument.py index 48dfbe66d..3b6dda4c3 100644 --- a/hexrd/instrument.py +++ b/hexrd/instrument.py @@ -3852,7 +3852,8 @@ def _pixel_solid_angles(rows, cols, pixel_size_row, pixel_size_col, 'tvec': tvec, } func = partial(_generate_pixel_solid_angles, **kwargs) - with ProcessPoolExecutor(max_workers=max_workers) as executor: + with ProcessPoolExecutor(mp_context=constants.mp_context, + max_workers=max_workers) as executor: results = executor.map(func, tasks) # Concatenate all the results together