Coverage for larch/xrd/xrd_fitting.py: 13%
180 statements
« prev ^ index » next coverage.py v7.6.0, created at 2024-10-16 21:04 +0000
« prev ^ index » next coverage.py v7.6.0, created at 2024-10-16 21:04 +0000
1#!/usr/bin/env python
2'''
3Diffraction functions require for fitting and analyzing data.
5mkak 2017.02.06 (originally written spring 2016)
6'''
8##########################################################################
9# IMPORT PYTHON PACKAGES
11import math
13import numpy as np
14from scipy import optimize,signal,interpolate
16from .xrd_tools import (d_from_q, d_from_twth, twth_from_d, twth_from_q,
17 q_from_d, q_from_twth)
19from ..math import peak_indices
21##########################################################################
22# FUNCTIONS
24def peakfilter(intthrsh,ipeaks,y,verbose=False):
25 '''
26 Returns x and y for data set corresponding to peak indices solution
27 from peakfilter() with the option of setting a minimum intensity
28 threshold for filtering peaks
29 '''
31 ipks = []
32 ipks += [i for i in ipeaks if y[i] > intthrsh]
33 if verbose: print('Peaks under intensity %i filtered out.' % intthrsh)
35 return ipks
37def peaklocater(ipeaks,x):
38 '''
39 Returns x and y for data set corresponding to peak indices solution
40 from peakfinder()
41 '''
42 xypeaks = [x[i] for i in ipeaks]
44 return np.array(xypeaks)
47def peakfinder(y, method='scipy',
48 widths=20, gapthrsh=5, thres=0.0, min_dist=10):
49 '''
50 Returns indices for peaks in y from dataset
51 '''
52 if method.startswith('peak'):
53 peak_indices = peak_indices(y, threshold=thres, min_dist=min_dist)
54 else:
55 ## scipy.signal.find_peaks_cwt(vector, widths, wavelet=None, max_distances=None,
56 ## gap_thresh=None, min_length=None, min_snr=1, noise_perc=10)
57 widths = np.arange(1,int(len(y)/widths))
58 peak_indices = signal.find_peaks_cwt(y, widths, gap_thresh=gapthrsh)
60 return peak_indices
62def peakfitter(ipeaks, twth, I, verbose=True, halfwidth=40, fittype='single'):
64 peaktwth,peakFWHM,peakinty = [],[],[]
65 for j in ipeaks:
66 if j > halfwidth and (len(twth)-j) > halfwidth:
67 minval = int(j - halfwidth)
68 maxval = int(j + halfwidth)
70 if I[j] > I[minval] and I[j] > I[maxval]:
71 xdata = twth[minval:maxval]
72 ydata = I[minval:maxval]
74 try:
75 pktwth,pkfwhm,pkint = data_gaussian_fit(xdata,ydata,fittype=fittype)
76 peaktwth += [pktwth]
77 peakFWHM += [pkfwhm]
78 peakinty += [pkint]
79 except:
80 pass
82 return np.array(peaktwth),np.array(peakFWHM),np.array(peakinty)
85def data_gaussian_fit(x,y,fittype='single'):
86 '''
87 Fits a single or double Gaussian functions.
88 '''
89 meanx = sum(x)/float(len(x))
90 meany = sum(y)/float(len(y))
91 sigma = np.sqrt(sum(y*(x-meanx)**2)/float(len(x)))
93 try:
94 popt,pcov = optimize.curve_fit(gaussian,x,y,p0=[np.max(y),meanx,sigma])
95 except:
96 popt = [1,1,1]
98 if fittype == 'double':
99 a,b,c = popt
100 popt2,pcov2 = optimize.curve_fit(doublegaussian,x,y,p0=[a,b,c,np.min(y),meanx,sigma])
102 rsqu_n1 = 0
103 rsqu_n2 = 0
104 rsqu_d = 0
105 for i in range(x.shape[0]):
106 rsqu_n1 = (y[i] - gaussian(x[i],*popt))**2 + rsqu_n1
107 if fittype == 'double':
108 rsqu_n2 = (y[i] - doublegaussian(x[i],*popt2))**2 + rsqu_n2
109 rsqu_d = (y[i] - meany)**2 + rsqu_d
112 if fittype == 'double':
113 pkpos = popt2[1]
114 pkfwhm = abs(2*np.sqrt(2*math.log1p(2))*popt2[2])
115 pkint = np.max(doublegaussian(x,*popt2))
116 else:
117 pkpos = popt[1]
118 pkfwhm = abs(2*np.sqrt(2*math.log1p(2))*popt[2])
119 pkint = np.max(gaussian(x,*popt))
121 return pkpos,pkfwhm,pkint
125def gaussian(x,a,b,c):
126 return a*np.exp(-(x-b)**2/(2*c**2))
130def doublegaussian(x,a1,b1,c1,a2,b2,c2):
131 return a1*np.exp(-(x-b1)**2/(2*c1**2))+a2*np.exp(-(x-b2)**2/(2*c2**2))
134def instrumental_fit_uvw(ipeaks, twthaxis, I, halfwidth=40, verbose=True):
136 twth,FWHM,inten = peakfitter(ipeaks,twthaxis,I,halfwidth=halfwidth,
137 fittype='double',verbose=verbose)
139 tanth = np.tan(np.radians(twth/2))
140 sqFWHM = FWHM**2
142 (u,v,w) = data_poly_fit(tanth,sqFWHM,verbose=verbose)
144 if verbose:
145 print('\nFit results:')
146 for i,(twthi,fwhmi,inteni) in enumerate(zip(twth,FWHM,inten)):
147 print('Peak %i @ %0.2f deg. (fwhm %0.3f deg, %i counts)' % (i,twthi,fwhmi,inteni))
148 print('\nInstrumental broadening parameters:')
149 print('--- U : %0.8f' % u)
150 print('--- V : %0.8f' % v)
151 print('--- W : %0.8f\n' % w)
153 return(u,v,w)
156def poly_func(x,a,b,c):
157 return a*x**2 + b*x + c
160def data_poly_fit(x, y, plot=False, verbose=False):
161 '''
162 Fits a set of data with to a second order polynomial function.
163 '''
165 try:
166 popt,pcov = optimize.curve_fit(poly_func,x,y,p0=[1,1,1])
167 except:
168 print( 'WARNING: scipy.optimize.curve_fit was unsuccessful.' )
169 return [1,1,1]
171 n = len(x)
172 meany = sum(y)/n
174 rsqu_n = 0
175 rsqu_d = 0
176 for i in range(x.shape[0]):
177 rsqu_n = (y[i] - poly_func(x[i],*popt))**2 + rsqu_n
178 rsqu_d = (y[i] - meany)**2 + rsqu_d
180 if verbose:
181 print('---Polynomial Fit:')
182 print('--- U : %0.8f' % popt[0])
183 print('--- V : %0.8f' % popt[1])
184 print('--- W : %0.8f\n' % popt[2])
186 return popt
188def trim_range(data,min,max,axis=0):
190 maxi = -1
191 mini = 0
192 for i,val in enumerate(data[axis]):
193 mini = i if val < min else mini
194 maxi = i if val < max else maxi
196 return data[:,mini:maxi]
198def interpolate_for_y(x,x0,y0):
199 t = interpolate.splrep(x0,y0)
200 return interpolate.splev(x,t,der=0)
202def calc_peak():
204 x = np.arange(-2,2,0.001)
206 ## Lorentzian
207 b = 1/(1+(x**2))
209 ## Gaussian
210 c = np.exp(-math.log1p(2)*(x**2))
212 ## VOIGT: Convolve, shift, and scale
213 d = np.convolve(b,c,'same')
214 d = norm_pattern(d,c)
215 shiftx = find_max(x,d)
216 newx = x-shiftx
218 ## Diffraction pattern data.
219 ## x,b - 'Lorentzian'
220 ## x,c - 'Gaussian'
221 ## newx,d - 'Voigt'
223 return 'Lorentzian',b,x,'Gaussian',x,c,'Voigt',newx,d
226def norm_pattern(intensity,scale_int):
228 max_int = np.max(intensity)
229 scale = np.max(scale_int)
230 intensity = intensity/max_int*scale
232 return(intensity)
235def scale_100(intensity):
237 intensity = norm_pattern(intensity,100)
239 return(intensity)
243def find_max(x,y):
244 return [xi for xi,yi in zip(x,y) if yi == np.max(y)][0]
248def calcRsqu(y,ycalc):
250 ss_res = 0
251 ss_tot = 0
253 ymean = np.mean(y) #sum(y)/float(len(y))
255 for i,yi in enumerate(y):
256 ss_res = ss_res + (yi - ycalc[i])**2
257 ss_tot = ss_tot + (yi - ymean)**2
259 return (1 - (ss_res/ss_tot))
262def outliers(y,scale=0.2):
263 for i,yi in enumerate(y):
264 if i > 0 and i < (len(y)-1):
265# if yi > y[i-1]*scale and yi > y[i+1]*scale:
266# y[i] = (y[i-1]+y[i+1])/2
267 if abs(yi-y[i-1]) > yi/scale and abs(yi - y[i+1]) > yi/scale:
268 y[i] = (y[i-1]+y[i+1])/2
270 return y
273def calc_broadening(pklist, twth, wavelength, u=1.0, v=1.0, w=1.0, C=1.0, D=None, verbose=False,smooth=False):
274 '''
275 == Broadening calculation performed in 2theta - not q ==
277 pklist - location of peaks in range [q,2th,d,I]
278 twth - 2-theta axis
279 wavelength - in units A
281 u,v,w - instrumental broadening parameters
282 C - shape factor (1 for sphere)
283 D - particle size in units A (if D is None, no size broadening)
284 '''
286 ## TERMS FOR INSTRUMENTAL BROADENING
287 thrad = np.radians(pklist[1]/2)
288 Bi = np.sqrt( u*(np.tan(thrad))**2 + v*np.tan(thrad) + w )
290 ## TERMS FOR SIZE BROADENING
291 ## FWHM(2th) = (C*wavelength)/(D*math.cos(math.radians(twth/2)))
292 ## FWHM(q) = (C*wavelength)/D*(1/np.sqrt(1-termB))
293 if D is not None:
294 termB = ((wavelength*pklist[0])/(2*math.pi))**2
295 Bs = (C*wavelength)/D*(1/np.sqrt(1-termB))
297 ## Define intensity array for broadened peaks
298 Itot = np.zeros(len(twth))
300 step = twth[1]-twth[0]
302 ## Loop through all peaks
303 for i,peak in enumerate(zip(*pklist)):
304 if peak[1] > min(twth) and peak[1] < max(twth):
305 A = peak[3]
306 B = peak[1]
308 c_i = Bi[i]/abs(2*np.sqrt(2*math.log1p(2)))
310 if D is not None: c_s = Bs[i]/abs(2*np.sqrt(2*math.log1p(2)))
312 # Bm = np.sqrt(Bi[i]**2+Bs[i]**2)
313 #else:
314 # Bm = np.sqrt(Bi[i]**2)
316 #c_m = Bm/abs(2*np.sqrt(2*math.log1p(2)))
318 width1 = max(Bs[i],Bi[i]) if D is not None else Bi[i]
319 width2 = min(Bs[i],Bi[i]) if D is not None else Bi[i]
321 min2th = B-2*width1
322 max2th = B+2*width1
324 num = abs((max2th-min2th)/step)
325 twthG = np.linspace(max2th,min2th,num)
327 intBi = A*np.exp(-(twthG-B)**2/(2*c_i**2))
328 if D is not None: intBs = A*np.exp(-(twthG-B)**2/(2*c_s**2))
329 #intBm = A*np.exp(-(twthG-B)**2/(2*c_m**2))
331 if D is not None:
332 new_intensity = np.convolve(intBs,intBi,'same')
333 else:
334 new_intensity = intBi
336 new_intensity = norm_pattern(new_intensity,A)
338 X = twthG
339 Y = new_intensity
341 bins = len(twth)
342 idx = np.digitize(X,twth)
343 Ii = [np.sum(Y[idx==k]) for k in range(bins)]
344 Itot = Itot + Ii
346 if smooth:
347 Itot = outliers(Itot)
348 return Itot
350##########################################################################
351# def fit_with_minimization(q,I,parameters=None,fit_method='leastsq'):
352# '''
353# fit_method options: 'leastsq','cobyla','slsqp','nelder'
354#
355# parameters of type Parameters(): needs a,b,c,nsize,pkshift?
356#
357# my_pars = Parameters()
358# my_pars.add('nsize', value= NPsize, min= 3.0, max=100.0,vary=False)
359# my_pars.add('a', value=bkgdA, min=minA, max=maxA)
360# my_pars.add('b', value=bkgdB, min=minB, max=maxB)
361# my_pars.add('c', value=bkgdC, min=minC, max=maxC)
362# '''
363#
364# from lmfit import minimize,Parameters,report_fit
365#
366# # ## First, fit background alone:
367# # result = minimize(BACKGROUND_FUNCTION, my_pars, args=(q,I),method=fit_method)
368# # ## Second, add fitted parameters into parameter set
369# # my_pars.add('?', value=result.params['?'].value)
370# # ## Third, fit peaks on top of background
371# # result = minimize(BACKGROUND_FUNCTION+PEAKS_FUNCTION, my_pars, args=(q,I),method=fit_method)
372# # ## Fourth, view error report
373# # report_fit(result)
374# # ## Fifth, return results
375# # NPsize = result.params['nsize'].value
376# # bkgdA = result.params['a'].value
377# # bkgdB = result.params['b'].value
378# # bkgdC = result.params['c'].value
379#
380#
381# return