1
1
import numpy as np
2
2
import matplotlib .pyplot as plt
3
- from scipy import optimize
3
+ import scipy
4
4
from mantid .simpleapi import mtd , CreateEmptyTableWorkspace , SumSpectra , \
5
5
CloneWorkspace , DeleteWorkspace , VesuvioCalculateGammaBackground , \
6
6
VesuvioCalculateMS , Scale , RenameWorkspace , Minus , CreateSampleShape , \
7
7
VesuvioThickness , Integration , Divide , Multiply , DeleteWorkspaces , \
8
8
CreateWorkspace
9
9
10
- from mvesuvio .util .analysis_helpers import histToPointData , loadConstants , \
11
- gaussian , lorentzian , numericalThirdDerivative
10
+ from mvesuvio .util .analysis_helpers import loadConstants , numericalThirdDerivative
12
11
13
12
from dataclasses import dataclass
14
13
@@ -69,10 +68,6 @@ def __init__(self, workspace, ip_file, h_ratio_to_lowest_mass, number_of_iterati
69
68
self ._table_fit_results = None
70
69
self ._fit_profiles_workspaces = {}
71
70
72
- # Only used for system tests, remove once tests are updated
73
- self ._run_hist_data = True
74
- self ._run_norm_voigt = False
75
-
76
71
77
72
def add_profiles (self , * args : NeutronComptonProfile ):
78
73
for profile in args :
@@ -152,9 +147,6 @@ def _update_workspace_data(self):
152
147
self ._dataY = self ._workspace_being_fit .extractY ()
153
148
self ._dataE = self ._workspace_being_fit .extractE ()
154
149
155
- if self ._run_hist_data : # Converts point data from workspaces to histogram data
156
- self ._dataY , self ._dataX , self ._dataE = histToPointData (self ._dataY , self ._dataX , self ._dataE )
157
-
158
150
self ._set_up_kinematic_arrays ()
159
151
160
152
self ._fit_parameters = np .zeros ((len (self ._dataY ), 3 * len (self ._profiles ) + 3 ))
@@ -593,7 +585,7 @@ def _fit_neutron_compton_profiles_to_row(self):
593
585
for attr in ['intensity_bounds' , 'width_bounds' , 'center_bounds' ]:
594
586
bounds .append (getattr (p , attr ))
595
587
596
- result = optimize .minimize (
588
+ result = scipy . optimize .minimize (
597
589
self .errorFunction ,
598
590
initial_parameters ,
599
591
method = "SLSQP" ,
@@ -658,7 +650,7 @@ def _neutron_compton_profiles(self, pars):
658
650
gaussRes , lorzRes = self .caculateResolutionForEachMass (centers )
659
651
totalGaussWidth = np .sqrt (widths ** 2 + gaussRes ** 2 )
660
652
661
- JOfY = self . pseudoVoigt (self ._y_space_arrays [self ._row_being_fit ] - centers , totalGaussWidth , lorzRes )
653
+ JOfY = scipy . special . voigt_profile (self ._y_space_arrays [self ._row_being_fit ] - centers , totalGaussWidth , lorzRes )
662
654
663
655
FSE = (
664
656
- numericalThirdDerivative (self ._y_space_arrays [self ._row_being_fit ], JOfY )
@@ -771,20 +763,6 @@ def calcLorentzianResolution(self, centers):
771
763
return lorentzianResWidth
772
764
773
765
774
- def pseudoVoigt (self , x , sigma , gamma ):
775
- """Convolution between Gaussian with std sigma and Lorentzian with HWHM gamma"""
776
- fg , fl = 2.0 * sigma * np .sqrt (2.0 * np .log (2.0 )), 2.0 * gamma
777
- f = 0.5346 * fl + np .sqrt (0.2166 * fl ** 2 + fg ** 2 )
778
- eta = 1.36603 * fl / f - 0.47719 * (fl / f ) ** 2 + 0.11116 * (fl / f ) ** 3
779
- sigma_v , gamma_v = f / (2.0 * np .sqrt (2.0 * np .log (2.0 ))), f / 2.0
780
- pseudo_voigt = eta * lorentzian (x , gamma_v ) + (1.0 - eta ) * gaussian (x , sigma_v )
781
-
782
- norm = (
783
- np .abs (np .trapz (pseudo_voigt , x , axis = 1 ))[:, np .newaxis ] if self ._run_norm_voigt else 1
784
- )
785
- return pseudo_voigt / norm
786
-
787
-
788
766
def _get_parsed_constraints (self ):
789
767
790
768
parsed_constraints = []
@@ -836,8 +814,7 @@ def _replace_zero_columns_with_ncp_fit(self):
836
814
OutputWorkspace = self ._workspace_for_corrections .name () + "_CorrectionsInput"
837
815
)
838
816
for row in range (self ._workspace_for_corrections .getNumberHistograms ()):
839
- # TODO: Once the option to change point to hist is removed, remove [:len(ncp)]
840
- self ._workspace_for_corrections .dataY (row )[self ._zero_columns_boolean_mask ] = ncp [row , self ._zero_columns_boolean_mask [:len (ncp [row ])]]
817
+ self ._workspace_for_corrections .dataY (row )[self ._zero_columns_boolean_mask ] = ncp [row , self ._zero_columns_boolean_mask ]
841
818
842
819
SumSpectra (
843
820
InputWorkspace = self ._workspace_for_corrections .name (),
@@ -1082,7 +1059,6 @@ def _set_results(self):
1082
1059
self .all_spec_best_par_chi_nit = np .array (allBestPar )
1083
1060
self .all_tot_ncp = np .array (allTotNcp )
1084
1061
self .all_ncp_for_each_mass = np .array (allIterNcp )
1085
-
1086
1062
self .all_mean_widths = np .array (allMeanWidhts )
1087
1063
self .all_mean_intensities = np .array (allMeanIntensities )
1088
1064
self .all_std_widths = np .array (allStdWidths )
@@ -1091,14 +1067,6 @@ def _set_results(self):
1091
1067
def _save_results (self ):
1092
1068
"""Saves all of the arrays stored in this object"""
1093
1069
1094
- maskedDetectorIdx = np .array (self ._mask_spectra ) - min (self ._workspace_being_fit .getSpectrumNumbers ())
1095
-
1096
- # TODO: Take out nans next time when running original results
1097
- # Because original results were recently saved with nans, mask spectra with nans
1098
- self .all_spec_best_par_chi_nit [:, maskedDetectorIdx , :] = np .nan
1099
- self .all_ncp_for_each_mass [:, maskedDetectorIdx , :, :] = np .nan
1100
- self .all_tot_ncp [:, maskedDetectorIdx , :] = np .nan
1101
-
1102
1070
if not self ._save_results_path :
1103
1071
return
1104
1072
0 commit comments