diff --git a/src/mitim_modules/freegsu/utils/FREEGSUparams.py b/src/mitim_modules/freegsu/utils/FREEGSUparams.py index bd84cee9..f8c13f8a 100644 --- a/src/mitim_modules/freegsu/utils/FREEGSUparams.py +++ b/src/mitim_modules/freegsu/utils/FREEGSUparams.py @@ -108,7 +108,7 @@ def createProblemParameters( dvs_min.extend(dvs_min2) dvs_max.extend(dvs_max2) - transformation = produceNewInputs + transformation = input_transform_freegs else: transformation = None @@ -376,7 +376,7 @@ def extractCont(x, cont): return v -def produceNewInputs(X, output, bounds, ParamProfile): +def input_transform_freegs(X, output, bounds, ParamProfile): """ X will be a tensor (with or without gradients) batch*dim, unnormalized """ diff --git a/src/mitim_modules/maestro/tmp_tests/maestro_test1.py b/src/mitim_modules/maestro/tmp_tests/maestro_test1.py index f155818a..e1018d28 100644 --- a/src/mitim_modules/maestro/tmp_tests/maestro_test1.py +++ b/src/mitim_modules/maestro/tmp_tests/maestro_test1.py @@ -46,7 +46,7 @@ "MODELparameters": { "RoaLocations": [0.35,0.55,0.75,0.875,0.9], "ProfilesPredicted": ["te", "ti", "ne"], "Physics_options": {"TypeTarget": 3}, - "transport_model": {"turbulence":'TGLF',"TGLFsettings": 6, "extraOptionsTGLF": {'USE_BPER':True}}}, + "transport_model": {"TGLFsettings": 6, "extraOptionsTGLF": {'USE_BPER':True}}}, "INITparameters": {"FastIsThermal": True, "removeIons": [5,6], "quasineutrality": True}, "optimization_options": { "convergence_options": { diff --git a/src/mitim_modules/maestro/utils/EPEDbeat.py b/src/mitim_modules/maestro/utils/EPEDbeat.py index 6167ec0a..0b9303e8 100644 --- a/src/mitim_modules/maestro/utils/EPEDbeat.py +++ b/src/mitim_modules/maestro/utils/EPEDbeat.py @@ -10,7 +10,7 @@ from mitim_tools.popcon_tools import FunctionalForms from mitim_tools.misc_tools.LOGtools import printMsg as print from mitim_modules.maestro.utils.MAESTRObeat import beat -from mitim_modules.powertorch.physics import CALCtools +from mitim_modules.powertorch.utils import CALCtools from IPython import embed # <> Function to interpolate a curve <> diff --git a/src/mitim_modules/maestro/utils/PORTALSbeat.py b/src/mitim_modules/maestro/utils/PORTALSbeat.py index 8216c5aa..2ce28e1e 100644 --- a/src/mitim_modules/maestro/utils/PORTALSbeat.py +++ b/src/mitim_modules/maestro/utils/PORTALSbeat.py @@ -44,7 +44,7 @@ def prepare(self, profiles = self.profiles_current for i in range(len(profiles.Species)): - data_df = pd.read_csv(__mitimroot__ / "src" / "mitim_modules" / "powertorch" / "physics" / "radiation_chebyshev.csv") + data_df = pd.read_csv(__mitimroot__ / "src" / "mitim_modules" / "powertorch" / "physics_models" / "radiation_chebyshev.csv") if not (data_df['Ion'].str.lower()==profiles.Species[i]["N"].lower()).any(): print(f"\t\t- {profiles.Species[i]['N']} not found in radiation table, looking for closest Z (+- 5) USING THE Z SPECIFIED IN THE INPUT.GACODE (fully stripped assumption)",typeMsg='w') # Find closest Z @@ -56,7 +56,7 @@ def prepare(self, new_name = data_df['Ion'][iZ] - print(f"\t\t\t- Changing name of ion from {profiles.Species[i]["N"]} ({profiles.Species[i]["Z"]}) to {new_name} ({Z[iZ]})") + print(f'\t\t\t- Changing name of ion from {profiles.Species[i]["N"]} ({profiles.Species[i]["Z"]}) to {new_name} ({Z[iZ]})') profiles.profiles['name'][i] = profiles.Species[i]["N"] = new_name @@ -344,7 +344,7 @@ def _inform(self, use_previous_residual = True, use_previous_surrogate_data = Tr # In the situation where the last radial location moves, I cannot reuse that surrogate data if last_radial_location_moved and reusing_surrogate_data: print('\t\t- Last radial location was moved, so surrogate data will not be reused for that specific location') - self.optimization_options['surrogate_options']["extrapointsModelsAvoidContent"] = ['Tar',f'_{len(self.MODELparameters[strKeys])}'] + self.optimization_options['surrogate_options']["extrapointsModelsAvoidContent"] = ['_tar',f'_{len(self.MODELparameters[strKeys])}'] self.try_flux_match_only_for_first_point = False def _inform_save(self): diff --git a/src/mitim_modules/portals/PORTALSmain.py b/src/mitim_modules/portals/PORTALSmain.py index 6fb6db59..a75d9351 100644 --- a/src/mitim_modules/portals/PORTALSmain.py +++ b/src/mitim_modules/portals/PORTALSmain.py @@ -7,14 +7,13 @@ from collections import OrderedDict from mitim_tools.misc_tools import IOtools from mitim_tools.gacode_tools import PROFILEStools -from mitim_tools.gacode_tools.utils import PORTALSinteraction from mitim_modules.portals import PORTALStools from mitim_modules.portals.utils import ( PORTALSinit, PORTALSoptimization, PORTALSanalysis, ) -from mitim_modules.powertorch.physics import TRANSPORTtools, TARGETStools +from mitim_modules.powertorch.physics_models import targets_analytic, transport_tgyro, transport_cgyro from mitim_tools.opt_tools import STRATEGYtools from mitim_tools.opt_tools.utils import BOgraphics from mitim_tools.misc_tools.LOGtools import printMsg as print @@ -64,7 +63,7 @@ def default_namelist(optimization_options, CGYROrun=False): optimization_options['acquisition_options']['relative_improvement_for_stopping'] = 1e-2 # Surrogate - optimization_options["surrogate_options"]["selectSurrogate"] = partial(PORTALStools.selectSurrogate, CGYROrun=CGYROrun) + optimization_options["surrogate_options"]["surrogate_selection"] = partial(PORTALStools.surrogate_selection_portals, CGYROrun=CGYROrun) if CGYROrun: # CGYRO runs should prioritize accuracy @@ -171,7 +170,7 @@ def __init__( "Tfast_ratio": False, # Keep the ratio of Tfast/Te constant throughout the Te evolution "ensureMachNumber": None, # Change w0 to match this Mach number when Ti varies }, - "transport_model": {"turbulence":'TGLF',"TGLFsettings": 6, "extraOptionsTGLF": {}} + "transport_model": {"TGLFsettings": 6, "extraOptionsTGLF": {}} } for key in self.MODELparameters.keys(): @@ -193,8 +192,12 @@ def __init__( """ # Selection of model - transport_evaluator = TRANSPORTtools.tgyro_model - targets_evaluator = TARGETStools.analytical_model + if CGYROrun: + transport_evaluator = transport_cgyro.cgyro_model + else: + transport_evaluator = transport_tgyro.tgyro_model + + targets_evaluator = targets_analytic.analytical_model self.PORTALSparameters = { "percentError": [5,10,1], # (%) Error (std, in percent) of model evaluation [TGLF (treated as minimum if scan trick), NEO, TARGET] @@ -341,7 +344,7 @@ def _define_reuse_models(self): ''' The user can define a list of strings to avoid reusing surrogates. e.g. - 'Tar' to avoid reusing targets + '_tar' to avoid reusing targets '_5' to avoid reusing position 5 ''' @@ -349,7 +352,7 @@ def _define_reuse_models(self): # Define avoiders if self.optimization_options['surrogate_options']['extrapointsModelsAvoidContent'] is None: - self.optimization_options['surrogate_options']['extrapointsModelsAvoidContent'] = ['Tar'] + self.optimization_options['surrogate_options']['extrapointsModelsAvoidContent'] = ['_tar'] # Define extrapointsModels for key in self.surrogate_parameters['surrogate_transformation_variables_lasttime'].keys(): @@ -416,17 +419,16 @@ def scalarized_objective(self, Y): ------------------------------------------------------------------------- Prepare transport dictionary ------------------------------------------------------------------------- - Note: var_dict['QeTurb'] must have shape (dim1...N, num_radii) + Note: var_dict['Qe_tr_turb'] must have shape (dim1...N, num_radii) """ var_dict = {} for of in ofs_ordered_names: - var, _ = of.split("_") + + var = '_'.join(of.split("_")[:-1]) if var not in var_dict: var_dict[var] = torch.Tensor().to(Y) - var_dict[var] = torch.cat( - (var_dict[var], Y[..., ofs_ordered_names == of]), dim=-1 - ) + var_dict[var] = torch.cat((var_dict[var], Y[..., ofs_ordered_names == of]), dim=-1) """ ------------------------------------------------------------------------- @@ -436,7 +438,7 @@ def scalarized_objective(self, Y): res must have shape (dim1...N) """ - of, cal, _, res = PORTALSinteraction.calculate_residuals(self.powerstate, self.PORTALSparameters,specific_vars=var_dict) + of, cal, _, res = PORTALStools.calculate_residuals(self.powerstate, self.PORTALSparameters,specific_vars=var_dict) return of, cal, res @@ -468,7 +470,7 @@ def check_flags(self): print("\t- Requested fineTargetsResolution, so running powerstate target calculations",typeMsg="w") self.PORTALSparameters["TargetCalc"] = "powerstate" - if not issubclass(self.PORTALSparameters["transport_evaluator"], TRANSPORTtools.tgyro_model) and (self.PORTALSparameters["TargetCalc"] == "tgyro"): + if not issubclass(self.PORTALSparameters["transport_evaluator"], transport_tgyro.tgyro_model) and (self.PORTALSparameters["TargetCalc"] == "tgyro"): print("\t- Requested TGYRO targets, but transport evaluator is not tgyro, so changing to powerstate",typeMsg="w") self.PORTALSparameters["TargetCalc"] = "powerstate" @@ -480,9 +482,6 @@ def check_flags(self): print("\t- In PORTALS TGYRO evaluations, we need to not recompute gradients (GradientsType=0)",typeMsg="i") self.MODELparameters["Physics_options"]["GradientsType"] = 0 - if 'TargetType' in self.MODELparameters["Physics_options"]: - raise Exception("\t- TargetType is not used in PORTALS anymore") - if self.PORTALSparameters["TargetCalc"] == "tgyro" and self.PORTALSparameters['profiles_postprocessing_fun'] is not None: print("\t- Requested custom modification of postprocessing function but targets from tgyro... are you sure?",typeMsg="q") @@ -555,7 +554,7 @@ def reuseTrainingTabular( self_copy.powerstate.TransportOptions["transport_evaluator"] = None self_copy.powerstate.TargetOptions["ModelOptions"]["TypeTarget"] = "powerstate" else: - self_copy.powerstate.TransportOptions["transport_evaluator"] = TRANSPORTtools.tgyro_model + self_copy.powerstate.TransportOptions["transport_evaluator"] = transport_tgyro.tgyro_model _, dictOFs = runModelEvaluator( self_copy, @@ -571,7 +570,7 @@ def reuseTrainingTabular( # ------------------------------------------------------------------------------------ for i in dictOFs: - if "Tar" in i: + if "_tar" in i: print(f"Changing {i} in file") optimization_data.data[i].iloc[numPORTALS] = dictOFs[i]["value"].cpu().numpy().item() @@ -646,41 +645,31 @@ def runModelEvaluator( return powerstate, dictOFs def map_powerstate_to_portals(powerstate, dictOFs): - """ - """ for var in powerstate.ProfilesPredicted: # Write in OFs for i in range(powerstate.plasma["rho"].shape[1] - 1): # Ignore position 0, which is rho=0 if var == "te": - var0, var1 = "Qe", "Pe" + var0, var1 = "Qe", "QeMWm2" elif var == "ti": - var0, var1 = "Qi", "Pi" + var0, var1 = "Qi", "QiMWm2" elif var == "ne": var0, var1 = "Ge", "Ce" elif var == "nZ": var0, var1 = "GZ", "CZ" elif var == "w0": - var0, var1 = "Mt", "Mt" + var0, var1 = "Mt", "MtJm2" """ TRANSPORT calculation --------------------- """ - dictOFs[f"{var0}Turb_{i+1}"]["value"] = powerstate.plasma[ - f"{var1}_tr_turb" - ][0, i+1] - dictOFs[f"{var0}Turb_{i+1}"]["error"] = powerstate.plasma[ - f"{var1}_tr_turb_stds" - ][0, i+1] + dictOFs[f"{var0}_tr_turb_{i+1}"]["value"] = powerstate.plasma[f"{var1}_tr_turb"][0, i+1] + dictOFs[f"{var0}_tr_turb_{i+1}"]["error"] = powerstate.plasma[f"{var1}_tr_turb_stds"][0, i+1] - dictOFs[f"{var0}Neo_{i+1}"]["value"] = powerstate.plasma[ - f"{var1}_tr_neo" - ][0, i+1] - dictOFs[f"{var0}Neo_{i+1}"]["error"] = powerstate.plasma[ - f"{var1}_tr_neo_stds" - ][0, i+1] + dictOFs[f"{var0}_tr_neo_{i+1}"]["value"] = powerstate.plasma[f"{var1}_tr_neo"][0, i+1] + dictOFs[f"{var0}_tr_neo_{i+1}"]["error"] = powerstate.plasma[f"{var1}_tr_neo_stds"][0, i+1] """ TARGET calculation @@ -688,12 +677,8 @@ def map_powerstate_to_portals(powerstate, dictOFs): If that radius & profile position has target, evaluate """ - dictOFs[f"{var0}Tar_{i+1}"]["value"] = powerstate.plasma[f"{var1}"][ - 0, i+1 - ] - dictOFs[f"{var0}Tar_{i+1}"]["error"] = powerstate.plasma[ - f"{var1}_stds" - ][0, i+1] + dictOFs[f"{var0}_tar_{i+1}"]["value"] = powerstate.plasma[f"{var1}"][0, i+1] + dictOFs[f"{var0}_tar_{i+1}"]["error"] = powerstate.plasma[f"{var1}_stds"][0, i+1] """ Turbulent Exchange @@ -701,12 +686,8 @@ def map_powerstate_to_portals(powerstate, dictOFs): """ if 'PexchTurb_1' in dictOFs: for i in range(powerstate.plasma["rho"].shape[1] - 1): - dictOFs[f"PexchTurb_{i+1}"]["value"] = powerstate.plasma["PexchTurb"][ - 0, i+1 - ] - dictOFs[f"PexchTurb_{i+1}"]["error"] = powerstate.plasma[ - "PexchTurb_stds" - ][0, i+1] + dictOFs[f"PexchTurb_{i+1}"]["value"] = powerstate.plasma["PexchTurb"][0, i+1] + dictOFs[f"PexchTurb_{i+1}"]["error"] = powerstate.plasma["PexchTurb_stds"][0, i+1] return dictOFs diff --git a/src/mitim_modules/portals/PORTALStools.py b/src/mitim_modules/portals/PORTALStools.py index 310cac4e..e97ed9f1 100644 --- a/src/mitim_modules/portals/PORTALStools.py +++ b/src/mitim_modules/portals/PORTALStools.py @@ -2,26 +2,25 @@ import gpytorch import copy import numpy as np +from collections import OrderedDict from mitim_tools.opt_tools import STRATEGYtools from mitim_tools.misc_tools import PLASMAtools -from collections import OrderedDict from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed -def selectSurrogate(output, surrogate_options, CGYROrun=False): +def surrogate_selection_portals(output, surrogate_options, CGYROrun=False): print(f'\t- Selecting surrogate options for "{output}" to be run') if output is not None: # If it's a target, just linear - if output[2:5] == "Tar": + if output[3:6] == "tar": surrogate_options["TypeMean"] = 1 surrogate_options["TypeKernel"] = 2 # Constant kernel - # If it's not, stndard + # If it's not, standard case for fluxes else: surrogate_options["TypeMean"] = 2 # Linear in gradients, constant in rest surrogate_options["TypeKernel"] = 1 # RBF - # surrogate_options['ExtraNoise'] = True surrogate_options["additional_constraints"] = { 'lenghtscale_constraint': gpytorch.constraints.constraints.GreaterThan(0.01) # inputs normalized to [0,1], this is 1% lengthscale @@ -94,7 +93,7 @@ def default_portals_transformation_variables(additional_params = []): return portals_transformation_variables, portals_transformation_variables_trace -def produceNewInputs(Xorig, output, surrogate_parameters, surrogate_transformation_variables): +def input_transform_portals(Xorig, output, surrogate_parameters, surrogate_transformation_variables): """ - Xorig will be a tensor (batch1...N,dim) unnormalized (with or without gradients). @@ -125,7 +124,7 @@ def produceNewInputs(Xorig, output, surrogate_parameters, surrogate_transformati initialize it with a larger batch """ - _, num = output.split("_") + num = output.split("_")[-1] index = powerstate.indexes_simulation[int(num)] # num=1 -> pos=1, so that it takes the second value in vectors xFit = torch.Tensor().to(X) @@ -146,13 +145,11 @@ def produceNewInputs(Xorig, output, surrogate_parameters, surrogate_transformati return xFit, parameters_combined - # ---------------------------------------------------------------------- # Transformation of Outputs # ---------------------------------------------------------------------- - -def transformPORTALS(X, surrogate_parameters, output): +def output_transform_portals(X, surrogate_parameters, output): """ 1. Make sure all batches are squeezed into a single dimension ------------------------------------------------------------------ @@ -226,8 +223,8 @@ def computeTurbExchangeIndividual(PexchTurb, powerstate): def GBfromXnorm(x, output, powerstate): # Decide, depending on the output here, which to use as normalization and at what location - varFull = output.split("_")[0] - pos = int(output.split("_")[1]) + varFull = '_'.join(output.split("_")[:-1]) + pos = int(output.split("_")[-1]) # Select GB unit if varFull[:2] == "Qe": @@ -253,7 +250,7 @@ def ImpurityGammaTrick(x, surrogate_parameters, output, powerstate): Trick to make GZ a function of a/Lnz only (flux as GammaZ_hat = GammaZ /nZ ) """ - pos = int(output.split("_")[1]) + pos = int(output.split("_")[-1]) if ("GZ" in output) and surrogate_parameters["applyImpurityGammaTrick"]: factor = powerstate.plasma["ni"][: x.shape[0],powerstate.indexes_simulation[pos],powerstate.impurityPosition].unsqueeze(-1) @@ -406,3 +403,248 @@ def stopping_criteria_portals(mitim_bo, parameters = {}): else: print("\t- No convergence yet, providing as iteration values the scalarized objective") return False, yvals + + + +def calculate_residuals(powerstate, PORTALSparameters, specific_vars=None): + """ + Notes + ----- + - Works with tensors + - It should be independent on how many dimensions it has, except that the last dimension is the multi-ofs + """ + + # Case where I have already constructed the dictionary (i.e. in scalarized objective) + if specific_vars is not None: + var_dict = specific_vars + # Prepare dictionary from powerstate (for use in Analysis) + else: + var_dict = {} + + mapper = { + "Qe_tr_turb": "QeMWm2_tr_turb", + "Qi_tr_turb": "QiMWm2_tr_turb", + "Ge_tr_turb": "Ce_tr_turb", + "GZ_tr_turb": "CZ_tr_turb", + "Mt_tr_turb": "MtJm2_tr_turb", + "Qe_tr_neo": "QeMWm2_tr_neo", + "Qi_tr_neo": "QiMWm2_tr_neo", + "Ge_tr_neo": "Ce_tr_neo", + "GZ_tr_neo": "CZ_tr_neo", + "Mt_tr_neo": "MtJm2_tr_neo", + "Qe_tar": "QeMWm2", + "Qi_tar": "QiMWm2", + "Ge_tar": "Ce", + "GZ_tar": "CZ", + "Mt_tar": "MtJm2", + "PexchTurb": "PexchTurb" + } + + for ikey in mapper: + var_dict[ikey] = powerstate.plasma[mapper[ikey]][..., 1:] + if mapper[ikey] + "_stds" in powerstate.plasma: + var_dict[ikey + "_stds"] = powerstate.plasma[mapper[ikey] + "_stds"][..., 1:] + else: + var_dict[ikey + "_stds"] = None + + dfT = list(var_dict.values())[0] # as a reference for sizes + + # ------------------------------------------------------------------------- + # Volume integrate energy exchange from MW/m^3 to a flux MW/m^2 to be added + # ------------------------------------------------------------------------- + + if PORTALSparameters["surrogateForTurbExch"]: + PexchTurb_integrated = computeTurbExchangeIndividual( + var_dict["PexchTurb"], powerstate + ) + else: + PexchTurb_integrated = torch.zeros(dfT.shape).to(dfT) + + # ------------------------------------------------------------------------ + # Go through each profile that needs to be predicted, calculate components + # ------------------------------------------------------------------------ + + of, cal, res = ( + torch.Tensor().to(dfT), + torch.Tensor().to(dfT), + torch.Tensor().to(dfT), + ) + for prof in powerstate.ProfilesPredicted: + if prof == "te": + var = "Qe" + elif prof == "ti": + var = "Qi" + elif prof == "ne": + var = "Ge" + elif prof == "nZ": + var = "GZ" + elif prof == "w0": + var = "Mt" + + """ + ----------------------------------------------------------------------------------- + Transport (_tr_turb+_tr_neo) + ----------------------------------------------------------------------------------- + """ + of0 = var_dict[f"{var}_tr_turb"] + var_dict[f"{var}_tr_neo"] + + """ + ----------------------------------------------------------------------------------- + Target (Sum here the turbulent exchange power) + ----------------------------------------------------------------------------------- + """ + if var == "Qe": + cal0 = var_dict[f"{var}_tar"] + PexchTurb_integrated + elif var == "Qi": + cal0 = var_dict[f"{var}_tar"] - PexchTurb_integrated + else: + cal0 = var_dict[f"{var}_tar"] + + """ + ----------------------------------------------------------------------------------- + Ad-hoc modifications for different weighting + ----------------------------------------------------------------------------------- + """ + + if var == "Qe": + of0, cal0 = ( + of0 * PORTALSparameters["Pseudo_multipliers"][0], + cal0 * PORTALSparameters["Pseudo_multipliers"][0], + ) + elif var == "Qi": + of0, cal0 = ( + of0 * PORTALSparameters["Pseudo_multipliers"][1], + cal0 * PORTALSparameters["Pseudo_multipliers"][1], + ) + elif var == "Ge": + of0, cal0 = ( + of0 * PORTALSparameters["Pseudo_multipliers"][2], + cal0 * PORTALSparameters["Pseudo_multipliers"][2], + ) + elif var == "GZ": + of0, cal0 = ( + of0 * PORTALSparameters["Pseudo_multipliers"][3], + cal0 * PORTALSparameters["Pseudo_multipliers"][3], + ) + elif var == "MtJm2": + of0, cal0 = ( + of0 * PORTALSparameters["Pseudo_multipliers"][4], + cal0 * PORTALSparameters["Pseudo_multipliers"][4], + ) + + of, cal = torch.cat((of, of0), dim=-1), torch.cat((cal, cal0), dim=-1) + + # ----------- + # Composition + # ----------- + + # Source term is (TARGET - TRANSPORT) + source = cal - of + + # Residual is defined as the negative (bc it's maximization) normalized (1/N) norm of radial & channel residuals -> L2 + res = -1 / source.shape[-1] * torch.norm(source, p=2, dim=-1) + + return of, cal, source, res + + +def calculate_residuals_distributions(powerstate, PORTALSparameters): + """ + - Works with tensors + - It should be independent on how many dimensions it has, except that the last dimension is the multi-ofs + """ + + # Prepare dictionary from powerstate (for use in Analysis) + + mapper = { + "Qe_tr_turb": "QeMWm2_tr_turb", + "Qi_tr_turb": "QiMWm2_tr_turb", + "Ge_tr_turb": "Ce_tr_turb", + "GZ_tr_turb": "CZ_tr_turb", + "Mt_tr_turb": "MtJm2_tr_turb", + "Qe_tr_neo": "QeMWm2_tr_neo", + "Qi_tr_neo": "QiMWm2_tr_neo", + "Ge_tr_neo": "Ce_tr_neo", + "GZ_tr_neo": "CZ_tr_neo", + "Mt_tr_neo": "MtJm2_tr_neo", + "Qe_tar": "QeMWm2", + "Qi_tar": "QiMWm2", + "Ge_tar": "Ce", + "GZ_tar": "CZ", + "Mt_tar": "MtJm2", + "PexchTurb": "PexchTurb" + } + + var_dict = {} + for ikey in mapper: + var_dict[ikey] = powerstate.plasma[mapper[ikey]][:, 1:] + if mapper[ikey] + "_stds" in powerstate.plasma: + var_dict[ikey + "_stds"] = powerstate.plasma[mapper[ikey] + "_stds"][:, 1:] + else: + var_dict[ikey + "_stds"] = None + + dfT = var_dict["Qe_tr_turb"] # as a reference for sizes + + # ------------------------------------------------------------------------- + # Volume integrate energy exchange from MW/m^3 to a flux MW/m^2 to be added + # ------------------------------------------------------------------------- + + if PORTALSparameters["surrogateForTurbExch"]: + PexchTurb_integrated = computeTurbExchangeIndividual( + var_dict["PexchTurb"], powerstate + ) + PexchTurb_integrated_stds = computeTurbExchangeIndividual( + var_dict["PexchTurb_stds"], powerstate + ) + else: + PexchTurb_integrated = torch.zeros(dfT.shape).to(dfT) + PexchTurb_integrated_stds = torch.zeros(dfT.shape).to(dfT) + + # ------------------------------------------------------------------------ + # Go through each profile that needs to be predicted, calculate components + # ------------------------------------------------------------------------ + + of, cal = torch.Tensor().to(dfT), torch.Tensor().to(dfT) + ofE, calE = torch.Tensor().to(dfT), torch.Tensor().to(dfT) + for prof in powerstate.ProfilesPredicted: + if prof == "te": + var = "Qe" + elif prof == "ti": + var = "Qi" + elif prof == "ne": + var = "Ge" + elif prof == "nZ": + var = "GZ" + elif prof == "w0": + var = "MtJm2" + + """ + ----------------------------------------------------------------------------------- + Transport (_tr_turb+_tr_neo) + ----------------------------------------------------------------------------------- + """ + of0 = var_dict[f"{var}_tr_turb"] + var_dict[f"{var}_tr_neo"] + of0E = (var_dict[f"{var}_tr_turb_stds"] ** 2 + var_dict[f"{var}_tr_neo_stds"] ** 2) ** 0.5 + + """ + ----------------------------------------------------------------------------------- + Target (Sum here the turbulent exchange power) + ----------------------------------------------------------------------------------- + """ + if var == "Qe": + cal0 = var_dict[f"{var}_tar"] + PexchTurb_integrated + cal0E = ( + var_dict[f"{var}_tar_stds"] ** 2 + PexchTurb_integrated_stds**2 + ) ** 0.5 + elif var == "Qi": + cal0 = var_dict[f"{var}_tar"] - PexchTurb_integrated + cal0E = ( + var_dict[f"{var}_tar_stds"] ** 2 + PexchTurb_integrated_stds**2 + ) ** 0.5 + else: + cal0 = var_dict[f"{var}_tar"] + cal0E = var_dict[f"{var}_tar_stds"] + + of, cal = torch.cat((of, of0), dim=-1), torch.cat((cal, cal0), dim=-1) + ofE, calE = torch.cat((ofE, of0E), dim=-1), torch.cat((calE, cal0E), dim=-1) + + return of, cal, ofE, calE diff --git a/src/mitim_modules/portals/utils/PORTALSanalysis.py b/src/mitim_modules/portals/utils/PORTALSanalysis.py index 3f5aee3d..bc737768 100644 --- a/src/mitim_modules/portals/utils/PORTALSanalysis.py +++ b/src/mitim_modules/portals/utils/PORTALSanalysis.py @@ -6,8 +6,8 @@ from mitim_tools.opt_tools import STRATEGYtools from mitim_tools.misc_tools import IOtools, PLASMAtools, GRAPHICStools from mitim_tools.gacode_tools import TGLFtools, TGYROtools, PROFILEStools -from mitim_tools.gacode_tools.utils import PORTALSinteraction from mitim_modules.portals.utils import PORTALSplot +from mitim_modules.portals import PORTALStools from mitim_modules.powertorch import STATEtools from mitim_modules.powertorch.utils import POWERplot from mitim_tools.misc_tools.LOGtools import printMsg as print @@ -240,7 +240,7 @@ def prep_metrics(self, ilast=None): # Residual definitions # ------------------------------------------------ - _, _, source, res = PORTALSinteraction.calculate_residuals( + _, _, source, res = PORTALStools.calculate_residuals( power, self.PORTALSparameters, ) @@ -284,7 +284,7 @@ def prep_metrics(self, ilast=None): y2, y1_std, y2_std, - ) = PORTALSinteraction.calculate_residuals_distributions( + ) = PORTALStools.calculate_residuals_distributions( power, self.PORTALSparameters, ) @@ -474,7 +474,7 @@ def extractModels(self, step=-1): 1. Look at the dictionary keys to see which models are available: models.keys() 2. Select one model and print its information (e.g. variable labels and order): - m = models['QeTurb_1'] + m = models['Qe_tr_turb_1'] m.printInfo() 3. Trained points are stored as m.x, m.y, m.yvar, and you can make predictions with: x_test = m.x @@ -736,7 +736,7 @@ def __init__(self, gpdict): self._training_outputs = {} if isinstance(gpdict, dict): for key in gpdict: - if 'Tar' in key: + if '_tar' in key: self._targets[key] = gpdict[key] else: self._models[key] = gpdict[key] @@ -980,7 +980,7 @@ def __init__(self, folder): for i in range(100): try: p = STATEtools.read_saved_state( - self.folder / "Initialization" / "initialization_simple_relax" / f"portals_sr_{IOtools.reducePathLevel(self.folder)[1]}_ev_{i}" / "powerstate.pkl" + self.folder / "Initialization" / "initialization_simple_relax" / f"portals_sr_ev_{i}" / "powerstate.pkl" ) except FileNotFoundError: break @@ -1094,3 +1094,5 @@ def plotMetrics(self, extra_lab="", **kwargs): axs[0].legend(prop={"size": 8}) axsGrads[0].legend(prop={"size": 8}) + + diff --git a/src/mitim_modules/portals/utils/PORTALScgyro.py b/src/mitim_modules/portals/utils/PORTALScgyro.py deleted file mode 100644 index d2752041..00000000 --- a/src/mitim_modules/portals/utils/PORTALScgyro.py +++ /dev/null @@ -1,839 +0,0 @@ -import shutil -import copy -import numpy as np -from IPython import embed -from mitim_tools.misc_tools import IOtools, PLASMAtools -from mitim_tools.gacode_tools import PROFILEStools, TGYROtools -from mitim_tools.misc_tools.LOGtools import printMsg as print - -""" -__________________ -To run standalone: - run ~/MITIM/mitim_opt/mitim/utils/PORTALScgyro.py ./run5/ ~/PRF/mitim_cgyro/sparc_results.txt 0,1,2,3,4 -or - run ~/MITIM/mitim_opt/mitim/utils/PORTALScgyro.py ./run5/ ~/PRF/mitim_cgyro/sparc_results.txt 0[Evaluation.X] 0[position_in_txt] -__________________ -The CGYRO file must contain GB units, and the gb unit is MW/m^2, 1E19m^2/s -The CGYRO file must use particle flux. Convective transformation occurs later -""" - - -def evaluateCGYRO(PORTALSparameters, folder, numPORTALS, FolderEvaluation, unmodified_profiles, radii, ProfilesPredicted): - print("\n ** CGYRO evaluation of fluxes has been requested before passing information to the STRATEGY module **",typeMsg="i",) - - if isinstance(numPORTALS, int): - numPORTALS = str(numPORTALS) - - # ------------------------------------------------------------------------------------------------ - # Harcoded - # ------------------------------------------------------------------------------------------------ - if PORTALSparameters['hardCodedCGYRO'] is not None: - """ - train_sep is the number of initial runs in it#0 results file. Now, it's usually 1 - start_num is the number of the first iteration, usually 0 - trick_harcoded_f is the name of the file until the iteration number. E.g. 'example_run/Outputs/cgyro_results/iter_rmp_75_' - - e.g.: - includeMtAndGz_hardcoded, train_sep,start_num,last_one,trick_hardcoded_f = True, 1, 0,100, 'example_run/Outputs/cgyro_results/d3d_5chan_it_' - - """ - - includeMtAndGz_hardcoded = PORTALSparameters["hardCodedCGYRO"]["includeMtAndGz_hardcoded"] - train_sep = PORTALSparameters["hardCodedCGYRO"]["train_sep"] - start_num = PORTALSparameters["hardCodedCGYRO"]["start_num"] - last_one = PORTALSparameters["hardCodedCGYRO"]["last_one"] - trick_hardcoded_f = PORTALSparameters["hardCodedCGYRO"]["trick_hardcoded_f"] - else: - includeMtAndGz_hardcoded = None - train_sep = None - start_num = None - last_one = None - trick_hardcoded_f = None - # ------------------------------------------------------------------------------------------------ - - minErrorPercent = PORTALSparameters["percentError_stable"] - Qi_criterion_stable = PORTALSparameters["Qi_criterion_stable"] - percentNeo = PORTALSparameters["percentError"][1] - useConvectiveFluxes = PORTALSparameters["useConvectiveFluxes"] - - try: - impurityPosition = PROFILEStools.impurity_location(PROFILEStools.PROFILES_GACODE(unmodified_profiles), PORTALSparameters["ImpurityOfInterest"]) - except ValueError: - if 'nZ' in ProfilesPredicted: - raise ValueError(f"Impurity {PORTALSparameters['ImpurityOfInterest']} not found in the profiles and needed for CGYRO evaluation") - else: - impurityPosition = 0 - print(f'\t- Impurity location not found. Using hardcoded value of {impurityPosition}') - - OriginalFimp = PORTALSparameters["fImp_orig"] - - cgyroing_file = ( - lambda file_cgyro, numPORTALS_this=0, includeMtAndGz=False: cgyroing( - FolderEvaluation, - unmodified_profiles, - numPORTALS, - minErrorPercent, - Qi_criterion_stable, - useConvectiveFluxes, - percentNeo, - radii, - OriginalFimp=OriginalFimp, - evaluationsInFile=f"{numPORTALS_this}", - impurityPosition=impurityPosition, - file=file_cgyro, - includeMtAndGz=includeMtAndGz, - ) - ) - print(f"\t- Suggested function call for mitim evaluation {numPORTALS} (lambda for cgyroing):",typeMsg="i") - cgyropath = IOtools.expandPath(folder, ensurePathValid=True) / 'Outputs' / 'cgyro_results' / f'cgyro_it_{numPORTALS}.txt' - print(f"\tcgyroing_file('{cgyropath}')") - - print('\t- Then insert "exit" and RETURN', typeMsg="i") - if (trick_hardcoded_f is None) or (int(numPORTALS) > last_one): - embed() - else: - # ------------------------------------------------------------------ - # Hard-coded stuff for quick modifications - # ------------------------------------------------------------------ - if int(numPORTALS) < train_sep: - cgyroing_file( - f"{trick_hardcoded_f}{start_num}.txt", - numPORTALS_this=numPORTALS, - includeMtAndGz=includeMtAndGz_hardcoded, - ) - else: - cgyroing_file( - f"{trick_hardcoded_f}{int(numPORTALS)-train_sep+1+start_num}.txt", - numPORTALS_this=0, - includeMtAndGz=includeMtAndGz_hardcoded, - ) - - -def cgyroing( - FolderEvaluation, - unmodified_profiles, - evaluations, - minErrorPercent, - Qi_criterion_stable, - useConvectiveFluxes, - percentNeo, - radii, - OriginalFimp=1.0, - file=None, - evaluationsInFile=0, - impurityPosition=3, - includeMtAndGz=False, -): - """ - Variables need to have dimensions of (evaluation,rho) - """ - - evaluations = np.array([int(i) for i in evaluations.split(",")]) - evaluationsInFile = np.array([int(i) for i in evaluationsInFile.split(",")]) - - ( - aLTe, - aLTi, - aLne, - Q_gb, - Qe, - Qi, - Ge, - GZ, - Mt, - Pexch, - QeE, - QiE, - GeE, - GZE, - MtE, - PexchE, - _, - _, - ) = readCGYROresults(file, radii, includeMtAndGz=includeMtAndGz) - - cont = 0 - for i in evaluations: - k = evaluationsInFile[cont] - cont += 1 - - print( - f"\t- Modifying {IOtools.clipstr(FolderEvaluation)} with position {k} in CGYRO results file {IOtools.clipstr(file)}" - ) - - # Get TGYRO - tgyro = TGYROtools.TGYROoutput( - FolderEvaluation, - profiles=PROFILEStools.PROFILES_GACODE(unmodified_profiles), - ) - - # Quick checker of correct file - wasThisTheCorrectRun(aLTe, aLTi, aLne, Q_gb, tgyro) - - modifyResults( - Qe[k, :], - Qi[k, :], - Ge[k, :], - GZ[k, :], - Mt[k, :], - Pexch[k, :], - QeE[k, :], - QiE[k, :], - GeE[k, :], - GZE[k, :], - MtE[k, :], - PexchE[k, :], - tgyro, - FolderEvaluation, - minErrorPercent=minErrorPercent, - useConvectiveFluxes=useConvectiveFluxes, - Qi_criterion_stable=Qi_criterion_stable, - percentNeo=percentNeo, - impurityPosition=impurityPosition, - OriginalFimp=OriginalFimp, - ) - - -def wasThisTheCorrectRun(aLTe, aLTi, aLne, Q_gb, tgyro, ErrorRaised=0.005): - print("\t- Checking that this was the correct run...") - - tgyro_new = copy.deepcopy(tgyro) - tgyro_new.aLti = tgyro_new.aLti[:, 0, :] - - variables = [ - [aLTe, tgyro_new.aLte, "aLTe"], - [aLTi, tgyro_new.aLti, "aLTi"], - [aLne, tgyro_new.aLne, "aLne"], - [Q_gb, tgyro_new.Q_GB, "Qgb"], - ] - - for var in variables: - [c, t, n] = var - - for pos in range(c.shape[0]): - for i in range(c.shape[1]): - error = np.max(abs((t[pos, i + 1] - c[pos, i]) / t[pos, i + 1])) - print( - f"\t\t* Error in {n}[{i}] was {error*100.0:.2f}% (TGYRO {t[pos,i+1]:.3f} vs. CGYRO {c[pos,i]:.3f})", - typeMsg="w" if error > ErrorRaised else "", - ) - - -def readlineNTH(line, full_file=False, unnormalize=True): - s = line.split() - - i = 2 - roa = float(s[i]) - i += 3 - aLne = float(s[i]) - i += 3 - aLTi = float(s[i]) - i += 3 - aLTe = float(s[i]) - i += 3 - - Qi = float(s[i]) - i += 3 - Qi_std = float(s[i]) - i += 3 - Qe = float(s[i]) - i += 3 - Qe_std = float(s[i]) - i += 3 - Ge = float(s[i]) - i += 3 - Ge_std = float(s[i]) - i += 3 - - if full_file: - GZ = float(s[i]) - i += 3 - GZ_std = float(s[i]) - i += 3 - - Mt = float(s[i]) - i += 3 - Mt_std = float(s[i]) - i += 3 - - Pexch = float(s[i]) - i += 3 - Pexch_std = float(s[i]) - i += 3 - - Q_gb = float(s[i]) - i += 3 - G_gb = float(s[i]) * 1e-1 - i += 3 # From 1E19 to 1E20 - - if full_file: - Mt_gb = float(s[i]) - i += 3 - Pexch_gb = float(s[i]) - i += 3 - - tstart = float(s[i]) - i += 3 - tend = float(s[i]) - i += 3 - - if unnormalize: - QiReal = Qi * Q_gb - QiReal_std = Qi_std * Q_gb - QeReal = Qe * Q_gb - QeReal_std = Qe_std * Q_gb - GeReal = Ge * G_gb - GeReal_std = Ge_std * G_gb - else: - QiReal = Qi - QiReal_std = Qi_std - QeReal = Qe - QeReal_std = Qe_std - GeReal = Ge - GeReal_std = Ge_std - - if full_file: - if unnormalize: - GZReal = GZ * G_gb - GZReal_std = GZ_std * G_gb - - MtReal = Mt * Mt_gb - MtReal_std = Mt_std * Mt_gb - - PexchReal = Pexch * Pexch_gb - PexchReal_std = Pexch_std * Pexch_gb - else: - GZReal = GZ - GZReal_std = GZ_std - - MtReal = Mt - MtReal_std = Mt_std - - PexchReal = Pexch - PexchReal_std = Pexch_std - - return ( - roa, - aLTe, - aLTi, - aLne, - Q_gb, - QeReal, - QiReal, - GeReal, - GZReal, - MtReal, - PexchReal, - QeReal_std, - QiReal_std, - GeReal_std, - GZReal_std, - MtReal_std, - PexchReal_std, - tstart, - tend, - ) - else: - return ( - roa, - aLTe, - aLTi, - aLne, - Q_gb, - QeReal, - QiReal, - GeReal, - 0.0, - 0.0, - 0.0, - QeReal_std, - QiReal_std, - GeReal_std, - 0.0, - 0.0, - 0.0, - tstart, - tend, - ) - - -def readCGYROresults(file, radii, includeMtAndGz=False, unnormalize=True): - """ - Arrays are in (batch,radii) - MW/m^2 and 1E20 - """ - - with open(file, "r") as f: - lines = f.readlines() - - rad = len(radii) - num = len(lines) // rad - - roa = np.zeros((num, rad)) - aLTe = np.zeros((num, rad)) - aLTi = np.zeros((num, rad)) - aLne = np.zeros((num, rad)) - Q_gb = np.zeros((num, rad)) - - Qe = np.zeros((num, rad)) - Qe_std = np.zeros((num, rad)) - Qi = np.zeros((num, rad)) - Qi_std = np.zeros((num, rad)) - Ge = np.zeros((num, rad)) - Ge_std = np.zeros((num, rad)) - - GZ = np.zeros((num, rad)) - GZ_std = np.zeros((num, rad)) - - Mt = np.zeros((num, rad)) - Mt_std = np.zeros((num, rad)) - - Pexch = np.zeros((num, rad)) - Pexch_std = np.zeros((num, rad)) - - tstart = np.zeros((num, rad)) - tend = np.zeros((num, rad)) - - p = {} - for r in range(len(radii)): - p[r] = 0 - for i in range(len(lines)): - - # -------------------------------------------------------- - # Line not empty - # -------------------------------------------------------- - if len(lines[i].split()) < 10: - continue - - # -------------------------------------------------------- - # Read line - # -------------------------------------------------------- - ( - roa_read, - aLTe_read, - aLTi_read, - aLne_read, - Q_gb_read, - Qe_read, - Qi_read, - Ge_read, - GZ_read, - Mt_read, - Pexch_read, - Qe_std_read, - Qi_std_read, - Ge_std_read, - GZ_std_read, - Mt_std_read, - Pexch_std_read, - tstart_read, - tend_read, - ) = readlineNTH(lines[i], full_file=includeMtAndGz, unnormalize=unnormalize) - - # -------------------------------------------------------- - # Radial location position - # -------------------------------------------------------- - threshold_radii = 1E-4 - r = np.where(np.abs(radii-roa_read)> Creating powerstate object...') self.TransportOptions = TransportOptions @@ -90,11 +104,11 @@ def _ensure_ne_before_nz(lst): tensors will be the same as in self.ProfilesPredicted ''' self.profile_map = { - "te": ("Pe", "Pe_tr"), - "ti": ("Pi", "Pi_tr"), + "te": ("QeMWm2", "QeMWm2_tr"), + "ti": ("QiMWm2", "QiMWm2_tr"), "ne": ("Ce", "Ce_tr"), "nZ": ("CZ", "CZ_tr"), - "w0": ("Mt", "Mt_tr") + "w0": ("MtJm2", "MtJm2_tr") } # ------------------------------------------------------------------------------------- @@ -117,13 +131,20 @@ def _ensure_ne_before_nz(lst): self.labelsFM.append([f'aL{profile}', list(self.profile_map[profile])[0], list(self.profile_map[profile])[1]]) # ------------------------------------------------------------------------------------- - # input.gacode + # Object type (e.g. input.gacode) # ------------------------------------------------------------------------------------- - # Use a copy because I'm deriving, it may be expensive and I don't want to carry that out outside of this class - self.profiles = copy.deepcopy(profiles) - if "derived" not in self.profiles.__dict__: - self.profiles.deriveQuantities() + if isinstance(profiles_object, PROFILEStools.PROFILES_GACODE): + self.to_powerstate = TRANSFORMtools.gacode_to_powerstate + self.from_powerstate = MethodType(TRANSFORMtools.to_gacode, self) + + # Use a copy because I'm deriving, it may be expensive and I don't want to carry that out outside of this class + self.profiles = copy.deepcopy(profiles_object) + if "derived" not in self.profiles.__dict__: + self.profiles.deriveQuantities() + + else: + raise ValueError("[MITIM] The input profile object is not recognized, please use PROFILES_GACODE") # ------------------------------------------------------------------------------------- # Fine targets (need to do it here so that it's only once per definition of powerstate) @@ -143,7 +164,7 @@ def _ensure_ne_before_nz(lst): TRANSFORMtools.improve_resolution_profiles(self.profiles, rho_vec) # Convert to powerstate - TRANSFORMtools.gacode_to_powerstate(self, self.profiles, self.plasma["rho"]) + self.to_powerstate(self) # Convert into a batch so that always the quantities are (batch,dimX) self.batch_size = 0 @@ -189,51 +210,12 @@ def _fine_grid(self): ) # Recalculate with higher resolution - TRANSFORMtools.gacode_to_powerstate(self, self.profiles, rho_new) + TRANSFORMtools.gacode_to_powerstate(self, rho_vec = rho_new) self.plasma_fine = copy.deepcopy(self.plasma) # Revert plasma back self.plasma = plasma_copy - def to_gacode( - self, - write_input_gacode=None, - position_in_powerstate_batch=0, - postprocess_input_gacode={}, - insert_highres_powers=False, - rederive_profiles=True, - ): - ''' - Notes: - - insert_highres_powers: whether to insert high resolution powers (will calculate them with powerstate targets object, not other custom ones) - ''' - print(">> Inserting powerstate into input.gacode") - - profiles = TRANSFORMtools.powerstate_to_gacode( - self, - position_in_powerstate_batch=position_in_powerstate_batch, - postprocess_input_gacode=postprocess_input_gacode, - insert_highres_powers=insert_highres_powers, - rederive=rederive_profiles, - ) - - # Write input.gacode - if write_input_gacode is not None: - write_input_gacode = Path(write_input_gacode) - print(f"\t- Writing input.gacode file: {IOtools.clipstr(write_input_gacode)}") - write_input_gacode.parent.mkdir(parents=True, exist_ok=True) - profiles.writeCurrentStatus(file=write_input_gacode) - - # If corrections modify the ions set... it's better to re-read, otherwise powerstate will be confused - if rederive_profiles: - TRANSFORMtools.defineIons(self, profiles, self.plasma["rho"][position_in_powerstate_batch, :], self.dfT) - # Repeat, that's how it's done earlier - self._repeat_tensors(batch_size=self.plasma["rho"].shape[0], - specific_keys=["ni","ions_set_mi","ions_set_Zi","ions_set_Dion","ions_set_Tion","ions_set_c_rad"], - positionToUnrepeat=None) - - return profiles - # ------------------------------------------------------------------ # Storing and combining # ------------------------------------------------------------------ @@ -311,8 +293,8 @@ def calculate( self.calculateProfileFunctions() # 3. Sources and sinks (populates components and Pe,Pi,...) - assumedPercentError = self.TransportOptions["ModelOptions"].get("percentError", [5, 1, 0.5])[-1] - self.calculateTargets(assumedPercentError=assumedPercentError) # Calculate targets based on powerstate functions (it may be overwritten in next step, if chosen) + relative_error_assumed = self.TransportOptions["ModelOptions"].get("percentError", [5, 1, 0.5])[-1] + self.calculateTargets(relative_error_assumed=relative_error_assumed) # Calculate targets based on powerstate functions (it may be overwritten in next step, if chosen) # 4. Turbulent and neoclassical transport (populates components and Pe_tr,Pi_tr,...) self.calculateTransport( @@ -414,9 +396,12 @@ def evaluator(X, y_history=None, x_history=None, metric_history=None): Xpass = X[best_candidate, :].detach() # Store values - if y_history is not None: y_history.append(yRes) - if x_history is not None: x_history.append(Xpass) - if metric_history is not None: metric_history.append(yMetric) + if y_history is not None: + y_history.append(yRes) + if x_history is not None: + x_history.append(Xpass) + if metric_history is not None: + metric_history.append(yMetric) return QTransport, QTarget, yMetric @@ -432,8 +417,7 @@ def evaluator(X, y_history=None, x_history=None, metric_history=None): _,Yopt, Xopt, metric_history = solver_fun(evaluator,x0, bounds=self.bounds_current,solver_options=solver_options) # For simplicity, return the trajectory of only the best candidate - self.FluxMatch_Yopt = Yopt - self.FluxMatch_Xopt = Xopt + self.FluxMatch_Yopt, self.FluxMatch_Xopt = Yopt, Xopt print("**********************************************************************************************") print(f"\t- Flux matching of powerstate finished, and took {IOtools.getTimeDifference(timeBeginning)}\n") @@ -563,7 +547,7 @@ def _cpu_tensors(self): if hasattr(self, 'profiles'): self.profiles.toNumpyArrays() - def update_var(self, name, var=None, specific_deparametrizer=None): + def update_var(self, name, var=None, specific_profile_constructor=None): """ This inserts gradients and updates coarse profiles @@ -577,17 +561,13 @@ def update_var(self, name, var=None, specific_deparametrizer=None): # General function to update a variable # ------------------------------------------------------------------------------------- - deparametrizers_choice = ( - self.deparametrizers_coarse - if specific_deparametrizer is None - else specific_deparametrizer - ) + profile_constructor_choice = self.profile_constructors_coarse if specific_profile_constructor is None else specific_profile_constructor def _update_plasma_var(var_key, clamp_min=None, clamp_max=None): if var is not None: self.plasma[f"aL{var_key}"][: var.shape[0], :] = var[:, :] aLT_withZero = self.plasma[f"aL{var_key}"] - _, varN = deparametrizers_choice[var_key]( + _, varN = profile_constructor_choice[var_key]( self.plasma["roa"], aLT_withZero) self.plasma[var_key] = varN.clamp(min=clamp_min, max=clamp_max) if ( (clamp_min is not None) or (clamp_max is not None) ) else varN self.plasma[f"aL{var_key}"] = torch.cat( @@ -694,7 +674,7 @@ def calculateProfileFunctions(self, calculateRotationQuantities=True, mref=2.013 self.plasma["w0_n"] = self.plasma["w0"] / self.plasma["c_s"] self.plasma["aLw0_n"] = (self.plasma["aLw0"] * self.plasma["w0"] / self.plasma["c_s"]) # aLw0 * w0 = -a*dw0/dr; then aLw0_n = -dw0/dr * a/c_s - def calculateTargets(self, assumedPercentError=1.0): + def calculateTargets(self, relative_error_assumed=1.0): """ Update the targets of the current state """ @@ -721,7 +701,7 @@ def calculateTargets(self, assumedPercentError=1.0): # Merge targets, calculate errors and normalize targets.postprocessing( - assumedPercentError=assumedPercentError, + relative_error_assumed=relative_error_assumed, useConvectiveFluxes=self.useConvectiveFluxes, forceZeroParticleFlux=self.TransportOptions["ModelOptions"].get("forceZeroParticleFlux", False)) @@ -758,7 +738,7 @@ def _concatenate_flux(plasma, profile_key, flux_key): plasma["P"] = torch.cat((plasma["P"], plasma[profile_key][:, 1:]), dim=1).to(plasma["P"].device) plasma["P_tr"] = torch.cat((plasma["P_tr"], plasma[flux_key][:, 1:]), dim=1).to(plasma["P"].device) - self.plasma["P"], self.plasma["P_tr"] = torch.Tensor().to(self.plasma["Pe"]), torch.Tensor().to(self.plasma["Pe"]) + self.plasma["P"], self.plasma["P_tr"] = torch.Tensor().to(self.plasma["QeMWm2"]), torch.Tensor().to(self.plasma["QeMWm2"]) for profile in self.ProfilesPredicted: _concatenate_flux(self.plasma, *self.profile_map[profile]) diff --git a/src/mitim_modules/powertorch/physics/TRANSPORTtools.py b/src/mitim_modules/powertorch/physics/TRANSPORTtools.py deleted file mode 100644 index 8955116c..00000000 --- a/src/mitim_modules/powertorch/physics/TRANSPORTtools.py +++ /dev/null @@ -1,881 +0,0 @@ -import copy -import shutil -import torch -import numpy as np -from mitim_tools.misc_tools import PLASMAtools, IOtools -from mitim_tools.gacode_tools import TGYROtools, PROFILEStools -from mitim_modules.portals.utils import PORTALScgyro -from mitim_tools.misc_tools.LOGtools import printMsg as print -from IPython import embed - -class power_transport: - ''' - Default class for power transport models, change "evaluate" method to implement a new model and produce_profiles if the model requires written input.gacode written - - Notes: - - After evaluation, the self.model_results attribute will contain the results of the model, which can be used for plotting and analysis - - model results can have .plot() method that can grab kwargs or be similar to TGYRO plot - - ''' - def __init__(self, powerstate, name = "test", folder = "~/scratch/", evaluation_number = 0): - - self.name = name - self.folder = IOtools.expandPath(folder) - self.evaluation_number = evaluation_number - self.powerstate = powerstate - - # Allowed fluxes in powerstate so far - self.quantities = ['Pe', 'Pi', 'Ce', 'CZ', 'Mt'] - - # Each flux has a turbulent and neoclassical component - self.variables = [f'{i}_tr_turb' for i in self.quantities] + [f'{i}_tr_neo' for i in self.quantities] - - # Each flux component has a standard deviation - self.variables += [f'{i}_stds' for i in self.variables] - - # There is also turbulent exchange - self.variables += ['PexchTurb', 'PexchTurb_stds'] - - # And total transport flux - self.variables += [f'{i}_tr' for i in self.quantities] - - # Model results is None by default, but can be assigned in evaluate - self.model_results = None - - # Assign zeros to transport ones if not evaluated - for i in self.variables: - self.powerstate.plasma[i] = self.powerstate.plasma["te"] * 0.0 - - # There is also target components - self.variables += [f'{i}' for i in self.quantities] + [f'{i}_stds' for i in self.quantities] - - # ---------------------------------------------------------------------------------------- - # labels for plotting - # ---------------------------------------------------------------------------------------- - - self.powerstate.labelsFluxes = { - "te": "$Q_e$ ($MW/m^2$)", - "ti": "$Q_i$ ($MW/m^2$)", - "ne": ( - "$Q_{conv}$ ($MW/m^2$)" - if self.powerstate.TransportOptions["ModelOptions"].get("useConvectiveFluxes", True) - else "$\\Gamma_e$ ($10^{20}/s/m^2$)" - ), - "nZ": ( - "$Q_{conv}$ $\\cdot f_{Z,0}$ ($MW/m^2$)" - if self.powerstate.TransportOptions["ModelOptions"].get("useConvectiveFluxes", True) - else "$\\Gamma_Z$ $\\cdot f_{Z,0}$ ($10^{20}/s/m^2$)" - ), - "w0": "$M_T$ ($J/m^2$)", - } - - def produce_profiles(self): - # Only add self._produce_profiles() if it's needed (e.g. full TGLF), otherwise this is somewhat expensive (e.g. for flux matching) - pass - - def _produce_profiles(self,deriveQuantities=True): - - self.applyCorrections = self.powerstate.TransportOptions["ModelOptions"].get("MODELparameters", {}).get("applyCorrections", {}) - - # Write this updated profiles class (with parameterized profiles and target powers) - self.file_profs = self.folder / "input.gacode" - - powerstate_detached = self.powerstate.copy_state() - - self.powerstate.profiles = powerstate_detached.to_gacode( - write_input_gacode=self.file_profs, - postprocess_input_gacode=self.applyCorrections, - rederive_profiles = deriveQuantities, # Derive quantities so that it's ready for analysis and plotting later - insert_highres_powers = deriveQuantities, # Insert powers so that Q, Pfus and all that it's consistent when read later - ) - - self.profiles_transport = copy.deepcopy(self.powerstate.profiles) - - self._modify_profiles() - - def _modify_profiles(self): - ''' - Modify the profiles (e.g. lumping) before running the transport model - ''' - - # After producing the profiles, copy for future modifications - self.file_profs_unmod = self.file_profs.parent / f"{self.file_profs.name}_unmodified" - shutil.copy2(self.file_profs, self.file_profs_unmod) - - profiles_postprocessing_fun = self.powerstate.TransportOptions["ModelOptions"].get("profiles_postprocessing_fun", None) - - if profiles_postprocessing_fun is not None: - print(f"\t- Modifying input.gacode to run transport calculations based on {profiles_postprocessing_fun}",typeMsg="i") - self.profiles_transport = profiles_postprocessing_fun(self.file_profs) - - # Position of impurity ion may have changed - p_old = PROFILEStools.PROFILES_GACODE(self.file_profs_unmod) - p_new = PROFILEStools.PROFILES_GACODE(self.file_profs) - - impurity_of_interest = p_old.Species[self.powerstate.impurityPosition] - - try: - impurityPosition_new = p_new.Species.index(impurity_of_interest) - - except ValueError: - print(f"\t- Impurity {impurity_of_interest} not found in new profiles, keeping position {self.powerstate.impurityPosition}",typeMsg="w") - impurityPosition_new = self.powerstate.impurityPosition - - if impurityPosition_new != self.powerstate.impurityPosition: - print(f"\t- Impurity position has changed from {self.powerstate.impurityPosition} to {impurityPosition_new}",typeMsg="w") - self.powerstate.impurityPosition_transport = p_new.Species.index(impurity_of_interest) - - # ---------------------------------------------------------------------------------------------------- - # EVALUATE (custom part) - # ---------------------------------------------------------------------------------------------------- - def evaluate(self): - ''' - This needs to populate the following in self.powerstate.plasma - - Pe, Pe_tr, Pe_tr_turb, Pe_tr_neo -> MW/m^2 - - Pi, Pi_tr, Pi_tr_turb, Pi_tr_neo -> MW/m^2 - - Ce, Ce_tr, Ce_tr_turb, Ce_tr_neo -> MW/m^2 - * Ce_raw, Ce_raw_tr, Ce_raw_tr_turb, Ce_raw_tr_neo -> 10^20/s/m^2 - - CZ, CZ_tr, CZ_tr_turb, CZ_tr_neo -> MW/m^2 (but modified as needed, for example dividing by fZ0) - * CZ_raw, CZ_raw_tr, CZ_raw_tr_turb, CZ_raw_tr_neo -> 10^20/s/m^2 (NOT modified) - - Mt, Mt_tr, Mt_tr_turb, Mt_tr_neo -> J/m^2 - - PexchTurb -> MW/m^3 - and their respective standard deviations - ''' - - print(">> No transport fluxes to evaluate", typeMsg="w") - pass - -# ---------------------------------------------------------------------------------------------------- -# FULL TGYRO -# ---------------------------------------------------------------------------------------------------- - -class tgyro_model(power_transport): - def __init__(self, powerstate, **kwargs): - super().__init__(powerstate, **kwargs) - - def produce_profiles(self): - self._produce_profiles() - - def evaluate(self): - - # ------------------------------------------------------------------------------------------------------------------------ - # Model Options - # ------------------------------------------------------------------------------------------------------------------------ - - ModelOptions = self.powerstate.TransportOptions["ModelOptions"] - - MODELparameters = ModelOptions.get("MODELparameters",None) - includeFast = ModelOptions.get("includeFastInQi",False) - useConvectiveFluxes = ModelOptions.get("useConvectiveFluxes", True) - UseFineGridTargets = ModelOptions.get("UseFineGridTargets", False) - launchMODELviaSlurm = ModelOptions.get("launchMODELviaSlurm", False) - cold_start = ModelOptions.get("cold_start", False) - provideTurbulentExchange = ModelOptions.get("TurbulentExchange", False) - OriginalFimp = ModelOptions.get("OriginalFimp", 1.0) - forceZeroParticleFlux = ModelOptions.get("forceZeroParticleFlux", False) - percentError = ModelOptions.get("percentError", [5, 1, 0.5]) - use_tglf_scan_trick = ModelOptions.get("use_tglf_scan_trick", None) - cores_per_tglf_instance = ModelOptions.get("extra_params", {}).get('PORTALSparameters', {}).get("cores_per_tglf_instance", 1) - - # Grab impurity from powerstate ( because it may have been modified in produce_profiles() ) - impurityPosition = self.powerstate.impurityPosition_transport #ModelOptions.get("impurityPosition", 1) - - # ------------------------------------------------------------------------------------------------------------------------ - # 1. tglf_neo_original: Run TGYRO workflow - TGLF + NEO in subfolder tglf_neo_original (original as in... without stds or merging) - # ------------------------------------------------------------------------------------------------------------------------ - - RadiisToRun = [self.powerstate.plasma["rho"][0, 1:][i].item() for i in range(len(self.powerstate.plasma["rho"][0, 1:]))] - - tgyro = TGYROtools.TGYRO(cdf=dummyCDF(self.folder, self.folder)) - tgyro.prep(self.folder, profilesclass_custom=self.profiles_transport) - - if launchMODELviaSlurm: - print("\t- Launching TGYRO evaluation as a batch job") - else: - print("\t- Launching TGYRO evaluation as a terminal job") - - tgyro.run( - subFolderTGYRO="tglf_neo_original", - cold_start=cold_start, - forceIfcold_start=True, - special_radii=RadiisToRun, - iterations=0, - PredictionSet=[ - int("te" in self.powerstate.ProfilesPredicted), - int("ti" in self.powerstate.ProfilesPredicted), - int("ne" in self.powerstate.ProfilesPredicted), - ], - TGLFsettings=MODELparameters["transport_model"]["TGLFsettings"], - extraOptionsTGLF=MODELparameters["transport_model"]["extraOptionsTGLF"], - TGYRO_physics_options=MODELparameters["Physics_options"], - launchSlurm=launchMODELviaSlurm, - minutesJob=5, - forcedName=self.name, - ) - - tgyro.read(label="tglf_neo_original") - - # Copy one with evaluated targets - self.file_profs_targets = tgyro.FolderTGYRO / "input.gacode.new" - - # ------------------------------------------------------------------------------------------------------------------------ - # 2. tglf_neo: Write TGLF, NEO and TARGET errors in tgyro files as well - # ------------------------------------------------------------------------------------------------------------------------ - - # Copy original TGYRO folder - if (self.folder / "tglf_neo").exists(): - IOtools.shutil_rmtree(self.folder / "tglf_neo") - shutil.copytree(self.folder / "tglf_neo_original", self.folder / "tglf_neo") - - # Add errors and merge fluxes as we would do if this was a CGYRO run - curateTGYROfiles( - tgyro, - "tglf_neo_original", - RadiisToRun, - self.powerstate.ProfilesPredicted, - self.folder / "tglf_neo", - percentError, - impurityPosition=impurityPosition, - includeFast=includeFast, - provideTurbulentExchange=provideTurbulentExchange, - use_tglf_scan_trick = use_tglf_scan_trick, - cold_start=cold_start, - extra_name = self.name, - cores_per_tglf_instance=cores_per_tglf_instance - ) - - # Read again to capture errors - tgyro.read(label="tglf_neo", folder=self.folder / "tglf_neo") - - # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ - # Run TGLF standalone --> In preparation for the transition - # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ - - # from mitim_tools.gacode_tools import TGLFtools - # tglf = TGLFtools.TGLF(rhos=RadiisToRun) - # _ = tglf.prep( - # self.folder / 'stds', - # inputgacode=self.file_profs, - # recalculatePTOT=False, # Use what's in the input.gacode, which is what PORTALS TGYRO does - # cold_start=cold_start) - - # tglf.run( - # subFolderTGLF="tglf_neo_original", - # TGLFsettings=MODELparameters["transport_model"]["TGLFsettings"], - # cold_start=cold_start, - # forceIfcold_start=True, - # extraOptions=MODELparameters["transport_model"]["extraOptionsTGLF"], - # launchSlurm=launchMODELviaSlurm, - # slurm_setup={"cores": 4, "minutes": 1}, - # ) - - # tglf.read(label="tglf_neo_original") - - # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ - # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ - - # ------------------------------------------------------------------------------------------------------------------------ - # 3. tglf_neo: Populate powerstate with the TGYRO results - # ------------------------------------------------------------------------------------------------------------------------ - - # Produce right quantities (TGYRO -> powerstate.plasma) - self.powerstate = tgyro.results["tglf_neo"].TGYROmodeledVariables( - self.powerstate, - useConvectiveFluxes=useConvectiveFluxes, - includeFast=includeFast, - impurityPosition=impurityPosition, - UseFineGridTargets=UseFineGridTargets, - OriginalFimp=OriginalFimp, - forceZeroParticleFlux=forceZeroParticleFlux, - provideTurbulentExchange=provideTurbulentExchange, - provideTargets=self.powerstate.TargetOptions['ModelOptions']['TargetCalc'] == "tgyro", - ) - - # ------------------------------------------------------------------------------------------------------------------------ - # 4. cgyro_neo: Trick to fake a tgyro output to reflect CGYRO - # ------------------------------------------------------------------------------------------------------------------------ - - if MODELparameters['transport_model']['turbulence'] == 'CGYRO': - - print("\t- Checking whether cgyro_neo folder exists and it was written correctly via cgyro_trick...") - - correctly_run = (self.folder / "cgyro_neo").exists() - if correctly_run: - print("\t\t- Folder exists, but was cgyro_trick run?") - with open(self.folder / "cgyro_neo" / "mitim_flag", "r") as f: - correctly_run = bool(float(f.readline())) - - if correctly_run: - print("\t\t\t* Yes, it was", typeMsg="w") - else: - print("\t\t\t* No, it was not, repating process", typeMsg="i") - - # Remove cgyro_neo folder - if (self.folder / "cgyro_neo").exists(): - IOtools.shutil_rmtree(self.folder / "cgyro_neo") - - # Copy tglf_neo results - shutil.copytree(self.folder / "tglf_neo", self.folder / "cgyro_neo") - - # CGYRO writter - cgyro_trick(self,self.folder / "cgyro_neo") - - # Read TGYRO files and construct portals variables - - tgyro.read(label="cgyro_neo", folder=self.folder / "cgyro_neo") - - powerstate_orig = copy.deepcopy(self.powerstate) - - self.powerstate = tgyro.results["cgyro_neo"].TGYROmodeledVariables( - self.powerstate, - useConvectiveFluxes=useConvectiveFluxes, - includeFast=includeFast, - impurityPosition=impurityPosition, - UseFineGridTargets=UseFineGridTargets, - OriginalFimp=OriginalFimp, - forceZeroParticleFlux=forceZeroParticleFlux, - provideTurbulentExchange=provideTurbulentExchange, - provideTargets=self.powerstate.TargetOptions['ModelOptions']['TargetCalc'] == "tgyro", - ) - - print("\t- Checking model modifications:") - for r in ["Pe_tr_turb", "Pi_tr_turb", "Ce_tr_turb", "CZ_tr_turb", "Mt_tr_turb"]: #, "PexchTurb"]: #TODO: FIX - print(f"\t\t{r}(tglf) = {' '.join([f'{k:.1e} (+-{ke:.1e})' for k,ke in zip(powerstate_orig.plasma[r][0][1:],powerstate_orig.plasma[r+'_stds'][0][1:]) ])}") - print(f"\t\t{r}(cgyro) = {' '.join([f'{k:.1e} (+-{ke:.1e})' for k,ke in zip(self.powerstate.plasma[r][0][1:],self.powerstate.plasma[r+'_stds'][0][1:]) ])}") - - # ** - tgyro.results["use"] = tgyro.results["cgyro_neo"] - - else: - # copy profiles too! - profilesToShare(self) - - # ** - tgyro.results["use"] = tgyro.results["tglf_neo"] - - # ------------------------------------------------------------------------------------------------------------------------ - # Results class that can be used for further plotting and analysis in PORTALS - # ------------------------------------------------------------------------------------------------------------------------ - - self.model_results = copy.deepcopy(tgyro.results["use"]) # Pass the TGYRO results class that should be use for plotting and analysis - - self.model_results.extra_analysis = {} - for ikey in tgyro.results: - if ikey != "use": - self.model_results.extra_analysis[ikey] = tgyro.results[ikey] - -def tglf_scan_trick( - fluxesTGYRO, - tgyro, - label, - RadiisToRun, - ProfilesPredicted, - impurityPosition=1, includeFast=False, - delta=0.02, - cold_start=False, - check_coincidence_thr=1E-2, - extra_name="", - remove_folders_out = False, - cores_per_tglf_instance = 4 # e.g. 4 core per radius, since this is going to launch ~ Nr=5 x (Nv=6 x Nd=2 + 1) = 65 TGLFs at once - ): - - print(f"\t- Running TGLF standalone scans ({delta = }) to determine relative errors") - - # Grab fluxes from TGYRO - Qe_tgyro, Qi_tgyro, Ge_tgyro, GZ_tgyro, Mt_tgyro, Pexch_tgyro = fluxesTGYRO - - # ------------------------------------------------------------------------------------------------------------------------ - # TGLF scans - # ------------------------------------------------------------------------------------------------------------------------ - - # Prepare scan - - tglf = tgyro.grab_tglf_objects(fromlabel=label, subfolder = 'tglf_explorations') - - variables_to_scan = [] - for i in ProfilesPredicted: - if i == 'te': variables_to_scan.append('RLTS_1') - if i == 'ti': variables_to_scan.append('RLTS_2') - if i == 'ne': variables_to_scan.append('RLNS_1') - if i == 'nZ': variables_to_scan.append(f'RLNS_{impurityPosition+2}') - if i == 'w0': variables_to_scan.append('VEXB_SHEAR') #TODO: is this correct? or VPAR_SHEAR? - - #TODO: Only if that parameter is changing at that location - if 'te' in ProfilesPredicted or 'ti' in ProfilesPredicted: - variables_to_scan.append('TAUS_2') - if 'te' in ProfilesPredicted or 'ne' in ProfilesPredicted: - variables_to_scan.append('XNUE') - if 'te' in ProfilesPredicted or 'ne' in ProfilesPredicted: - variables_to_scan.append('BETAE') - - relative_scan = [1-delta, 1+delta] - - name = 'turb_drives' - - tglf.rhos = RadiisToRun # To avoid the case in which TGYRO was run with an extra rho point - - # Estimate job minutes based on cases and cores (mostly IO I think at this moment, otherwise it should be independent on cases) - num_cases = len(RadiisToRun) * len(variables_to_scan) * len(relative_scan) - if cores_per_tglf_instance == 1: - minutes = 10 * (num_cases / 60) # Ad-hoc formula - else: - minutes = 1 * (num_cases / 60) # Ad-hoc formula - - # Enforce minimum minutes - minutes = max(2, minutes) - - tglf.runScanTurbulenceDrives( - subFolderTGLF = name, - variablesDrives = variables_to_scan, - varUpDown = relative_scan, - TGLFsettings = None, - ApplyCorrections = False, - add_baseline_to = 'first', - cold_start=cold_start, - forceIfcold_start=True, - slurm_setup={ - "cores": cores_per_tglf_instance, - "minutes": minutes, - }, - extra_name = f'{extra_name}_{name}', - positionIon=impurityPosition+2, - attempts_execution=2, - only_minimal_files=True, # Since I only care about fluxes here, do not retrieve all the files - ) - - # Remove folders because they are heavy to carry many throughout - if remove_folders_out: - IOtools.shutil_rmtree(tglf.FolderGACODE) - - Qe = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) - Qi = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) - Ge = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) - GZ = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) - - cont = 0 - for vari in variables_to_scan: - jump = tglf.scans[f'{name}_{vari}']['Qe'].shape[-1] - - Qe[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Qe'] - Qi[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Qi'] - Ge[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Ge'] - GZ[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Gi'] - cont += jump - - # ---------------------------------------------------- - # Do a check that TGLF scans are consistent with TGYRO - Qe_err = np.abs( (Qe[:,0] - Qe_tgyro) / Qe_tgyro ) if 'te' in ProfilesPredicted else np.zeros_like(Qe[:,0]) - Qi_err = np.abs( (Qi[:,0] - Qi_tgyro) / Qi_tgyro ) if 'ti' in ProfilesPredicted else np.zeros_like(Qi[:,0]) - Ge_err = np.abs( (Ge[:,0] - Ge_tgyro) / Ge_tgyro ) if 'ne' in ProfilesPredicted else np.zeros_like(Ge[:,0]) - GZ_err = np.abs( (GZ[:,0] - GZ_tgyro) / GZ_tgyro ) if 'nZ' in ProfilesPredicted else np.zeros_like(GZ[:,0]) - - F_err = np.concatenate((Qe_err, Qi_err, Ge_err, GZ_err)) - if F_err.max() > check_coincidence_thr: - print(f"\t- TGLF scans are not consistent with TGYRO, maximum error = {F_err.max()*100:.2f}%",typeMsg="w") - if 'te' in ProfilesPredicted: - print('\t\t* Qe:',Qe_err) - if 'ti' in ProfilesPredicted: - print('\t\t* Qi:',Qi_err) - if 'ne' in ProfilesPredicted: - print('\t\t* Ge:',Ge_err) - if 'nZ' in ProfilesPredicted: - print('\t\t* GZ:',GZ_err) - else: - print(f"\t- TGLF scans are consistent with TGYRO, maximum error = {F_err.max()*100:.2f}%") - # ---------------------------------------------------- - - # Calculate the standard deviation of the scans, that's going to be the reported stds - - def calculate_mean_std(Q): - # Assumes Q is [radii, points], with [radii, 0] being the baseline - - Qm = np.mean(Q, axis=1) - Qstd = np.std(Q, axis=1) - - # Qm = Q[:,0] - # Qstd = np.std(Q, axis=1) - - # Qstd = ( Q.max(axis=1)-Q.min(axis=1) )/2 /2 # Such that the range is 2*std - # Qm = Q.min(axis=1) + Qstd*2 # Mean is at the middle of the range - - return Qm, Qstd - - Qe_point, Qe_std = calculate_mean_std(Qe) - Qi_point, Qi_std = calculate_mean_std(Qi) - Ge_point, Ge_std = calculate_mean_std(Ge) - GZ_point, GZ_std = calculate_mean_std(GZ) - - #TODO: Implement Mt and Pexch - Mt_point, Pexch_point = Mt_tgyro, Pexch_tgyro - Mt_std, Pexch_std = abs(Mt_point) * 0.1, abs(Pexch_point) * 0.1 - - #TODO: Careful with fast particles - - return Qe_point, Qi_point, Ge_point, GZ_point, Mt_point, Pexch_point, Qe_std, Qi_std, Ge_std, GZ_std, Mt_std, Pexch_std - - -# ------------------------------------------------------------------ -# SIMPLE Diffusion (#TODO: implement with particle flux and the raw) -# ------------------------------------------------------------------ - -class diffusion_model(power_transport): - def __init__(self, powerstate, **kwargs): - super().__init__(powerstate, **kwargs) - - # Ensure that the provided diffusivities include the zero location - self.chi_e = self.powerstate.TransportOptions["ModelOptions"]["chi_e"] - self.chi_i = self.powerstate.TransportOptions["ModelOptions"]["chi_i"] - - if self.chi_e.shape[0] < self.powerstate.plasma['rho'].shape[-1]: - self.chi_e = torch.cat((torch.zeros(1), self.chi_e)) - - if self.chi_i.shape[0] < self.powerstate.plasma['rho'].shape[-1]: - self.chi_i = torch.cat((torch.zeros(1), self.chi_i)) - - def produce_profiles(self): - pass - - def evaluate(self): - - # Make sure the chis are applied to all the points in the batch - Pe_tr = PLASMAtools.conduction( - self.powerstate.plasma["ne"], - self.powerstate.plasma["te"], - self.chi_e.repeat(self.powerstate.plasma['rho'].shape[0],1), - self.powerstate.plasma["aLte"], - self.powerstate.plasma["a"].unsqueeze(-1), - ) - Pi_tr = PLASMAtools.conduction( - self.powerstate.plasma["ni"].sum(axis=-1), - self.powerstate.plasma["ti"], - self.chi_i.repeat(self.powerstate.plasma['rho'].shape[0],1), - self.powerstate.plasma["aLti"], - self.powerstate.plasma["a"].unsqueeze(-1), - ) - - self.powerstate.plasma["Pe_tr_turb"] = Pe_tr * 2 / 3 - self.powerstate.plasma["Pi_tr_turb"] = Pi_tr * 2 / 3 - - self.powerstate.plasma["Pe_tr_neo"] = Pe_tr * 1 / 3 - self.powerstate.plasma["Pi_tr_neo"] = Pi_tr * 1 / 3 - - self.powerstate.plasma["Pe_tr"] = self.powerstate.plasma["Pe_tr_turb"] + self.powerstate.plasma["Pe_tr_neo"] - self.powerstate.plasma["Pi_tr"] = self.powerstate.plasma["Pi_tr_turb"] + self.powerstate.plasma["Pi_tr_neo"] - -# ------------------------------------------------------------------ -# SURROGATE -# ------------------------------------------------------------------ - -class surrogate_model(power_transport): - def __init__(self, powerstate, **kwargs): - super().__init__(powerstate, **kwargs) - - def produce_profiles(self): - pass - - def evaluate(self): - - """ - flux_fun as given in ModelOptions must produce Q and Qtargets in order of te,ti,ne - """ - - X = torch.Tensor() - for prof in self.powerstate.ProfilesPredicted: - X = torch.cat((X,self.powerstate.plasma['aL'+prof][:,1:]),axis=1) - - _, Q, _, _ = self.powerstate.TransportOptions["ModelOptions"]["flux_fun"](X) - - numeach = self.powerstate.plasma["rho"].shape[1] - 1 - - quantities = { - "te": "Pe", - "ti": "Pi", - "ne": "Ce", - "nZ": "CZ", - "w0": "Mt", - } - - for c, i in enumerate(self.powerstate.ProfilesPredicted): - self.powerstate.plasma[f"{quantities[i]}_tr"] = torch.cat((torch.tensor([[0.0]]),Q[:, numeach * c : numeach * (c + 1)]),dim=1) - -# ************************************************************************************************** -# Functions -# ************************************************************************************************** - -def curateTGYROfiles( - tgyroObject, - label, - RadiisToRun, - ProfilesPredicted, - folder, - percentError, - provideTurbulentExchange=False, - impurityPosition=1, - includeFast=False, - use_tglf_scan_trick=None, - cold_start=False, - extra_name="", - cores_per_tglf_instance = 4 - ): - - tgyro = tgyroObject.results[label] - - # Determine NEO and Target errors - relativeErrorNEO = percentError[1] / 100.0 - relativeErrorTAR = percentError[2] / 100.0 - - # ************************************************************************************************************************** - # TGLF - # ************************************************************************************************************************** - - # Grab fluxes - Qe = tgyro.Qe_sim_turb[0, 1:] - Qi = tgyro.QiIons_sim_turb[0, 1:] if includeFast else tgyro.QiIons_sim_turb_thr[0, 1:] - Ge = tgyro.Ge_sim_turb[0, 1:] - GZ = tgyro.Gi_sim_turb[impurityPosition, 0, 1:] - Mt = tgyro.Mt_sim_turb[0, 1:] - Pexch = tgyro.EXe_sim_turb[0, 1:] - - # Determine TGLF standard deviations - if use_tglf_scan_trick is not None: - - if provideTurbulentExchange: - print("> Turbulent exchange not implemented yet in TGLF scans", typeMsg="w") #TODO - - # -------------------------------------------------------------- - # If using the scan trick - # -------------------------------------------------------------- - - Qe, Qi, Ge, GZ, Mt, Pexch, QeE, QiE, GeE, GZE, MtE, PexchE = tglf_scan_trick( - [Qe, Qi, Ge, GZ, Mt, Pexch], - tgyroObject, - label, - RadiisToRun, - ProfilesPredicted, - impurityPosition=impurityPosition, - includeFast=includeFast, - delta = use_tglf_scan_trick, - cold_start=cold_start, - extra_name=extra_name, - cores_per_tglf_instance=cores_per_tglf_instance - ) - - min_relative_error = 0.01 # To avoid problems with gpytorch, 1% error minimum - - QeE = QeE.clip(abs(Qe)*min_relative_error) - QiE = QiE.clip(abs(Qi)*min_relative_error) - GeE = GeE.clip(abs(Ge)*min_relative_error) - GZE = GZE.clip(abs(GZ)*min_relative_error) - MtE = MtE.clip(abs(Mt)*min_relative_error) - PexchE = PexchE.clip(abs(Pexch)*min_relative_error) - - else: - - # -------------------------------------------------------------- - # If simply a percentage error provided - # -------------------------------------------------------------- - - relativeErrorTGLF = [percentError[0] / 100.0]*len(RadiisToRun) - - QeE = abs(Qe) * relativeErrorTGLF - QiE = abs(Qi) * relativeErrorTGLF - GeE = abs(Ge) * relativeErrorTGLF - GZE = abs(GZ) * relativeErrorTGLF - MtE = abs(Mt) * relativeErrorTGLF - PexchE = abs(Pexch) * relativeErrorTGLF - - # ************************************************************************************************************************** - # Neo - # ************************************************************************************************************************** - - QeNeo = tgyro.Qe_sim_neo[0, 1:] - if includeFast: - QiNeo = tgyro.QiIons_sim_neo[0, 1:] - else: - QiNeo = tgyro.QiIons_sim_neo_thr[0, 1:] - GeNeo = tgyro.Ge_sim_neo[0, 1:] - GZNeo = tgyro.Gi_sim_neo[impurityPosition, 0, 1:] - MtNeo = tgyro.Mt_sim_neo[0, 1:] - - QeNeoE = abs(tgyro.Qe_sim_neo[0, 1:]) * relativeErrorNEO - if includeFast: - QiNeoE = abs(tgyro.QiIons_sim_neo[0, 1:]) * relativeErrorNEO - else: - QiNeoE = abs(tgyro.QiIons_sim_neo_thr[0, 1:]) * relativeErrorNEO - GeNeoE = abs(tgyro.Ge_sim_neo[0, 1:]) * relativeErrorNEO - GZNeoE = abs(tgyro.Gi_sim_neo[impurityPosition, 0, 1:]) * relativeErrorNEO - MtNeoE = abs(tgyro.Mt_sim_neo[0, 1:]) * relativeErrorNEO - - # Merge - - PORTALScgyro.modifyFLUX( - tgyro, - folder, - Qe, - Qi, - Ge, - GZ, - Mt, - Pexch, - QeNeo=QeNeo, - QiNeo=QiNeo, - GeNeo=GeNeo, - GZNeo=GZNeo, - MtNeo=MtNeo, - impurityPosition=impurityPosition, - ) - - PORTALScgyro.modifyFLUX( - tgyro, - folder, - QeE, - QiE, - GeE, - GZE, - MtE, - PexchE, - QeNeo=QeNeoE, - QiNeo=QiNeoE, - GeNeo=GeNeoE, - GZNeo=GZNeoE, - MtNeo=MtNeoE, - impurityPosition=impurityPosition, - special_label="_stds", - ) - - # ************************************************************************************************************************** - # Targets - # ************************************************************************************************************************** - - QeTargetE = abs(tgyro.Qe_tar[0, 1:]) * relativeErrorTAR - QiTargetE = abs(tgyro.Qi_tar[0, 1:]) * relativeErrorTAR - GeTargetE = abs(tgyro.Ge_tar[0, 1:]) * relativeErrorTAR - GZTargetE = GeTargetE * 0.0 - MtTargetE = abs(tgyro.Mt_tar[0, 1:]) * relativeErrorTAR - - PORTALScgyro.modifyEVO( - tgyro, - folder, - QeTargetE * 0.0, - QiTargetE * 0.0, - GeTargetE * 0.0, - GZTargetE * 0.0, - MtTargetE * 0.0, - impurityPosition=impurityPosition, - positionMod=1, - special_label="_stds", - ) - PORTALScgyro.modifyEVO( - tgyro, - folder, - QeTargetE, - QiTargetE, - GeTargetE, - GZTargetE, - MtTargetE, - impurityPosition=impurityPosition, - positionMod=2, - special_label="_stds", - ) - - -def profilesToShare(self): - if "extra_params" in self.powerstate.TransportOptions["ModelOptions"] and "folder" in self.powerstate.TransportOptions["ModelOptions"]["extra_params"]: - whereFolder = IOtools.expandPath(self.powerstate.TransportOptions["ModelOptions"]["extra_params"]["folder"] / "Outputs" / "portals_profiles") - if not whereFolder.exists(): - IOtools.askNewFolder(whereFolder) - - fil = whereFolder / f"input.gacode.{self.evaluation_number}" - shutil.copy2(self.file_profs, fil) - shutil.copy2(self.file_profs_unmod, fil.parent / f"{fil.name}_unmodified") - shutil.copy2(self.file_profs_targets, fil.parent / f"{fil.name}.new") - print(f"\t- Copied profiles to {IOtools.clipstr(fil)}") - else: - print("\t- Could not move files", typeMsg="w") - - -def cgyro_trick(self,FolderEvaluation_TGYRO): - - with open(FolderEvaluation_TGYRO / "mitim_flag", "w") as f: - f.write("0") - - # ************************************************************************************************************************** - # Print Information - # ************************************************************************************************************************** - - txt = "\nFluxes to be matched by CGYRO ( TARGETS - NEO ):" - - for var, varn in zip( - ["r/a ", "rho ", "a/LTe", "a/LTi", "a/Lne", "a/LnZ", "a/Lw0"], - ["roa", "rho", "aLte", "aLti", "aLne", "aLnZ", "aLw0"], - ): - txt += f"\n{var} = " - for j in range(self.powerstate.plasma["rho"].shape[1] - 1): - txt += f"{self.powerstate.plasma[varn][0,j+1]:.6f} " - - for var, varn in zip( - ["Qe (MW/m^2)", "Qi (MW/m^2)", "Ce (MW/m^2)", "CZ (MW/m^2)", "Mt (J/m^2) "], - ["Pe", "Pi", "Ce", "CZ", "Mt"], - ): - txt += f"\n{var} = " - for j in range(self.powerstate.plasma["rho"].shape[1] - 1): - txt += f"{self.powerstate.plasma[varn][0,j+1]-self.powerstate.plasma[f'{varn}_tr_neo'][0,j+1]:.4e} " - - print(txt) - - # Copy profiles so that later it is easy to grab all the input.gacodes that were evaluated - profilesToShare(self) - - # ************************************************************************************************************************** - # Evaluate CGYRO - # ************************************************************************************************************************** - - PORTALScgyro.evaluateCGYRO( - self.powerstate.TransportOptions["ModelOptions"]["extra_params"]["PORTALSparameters"], - self.powerstate.TransportOptions["ModelOptions"]["extra_params"]["folder"], - self.evaluation_number, - FolderEvaluation_TGYRO, - self.file_profs, - self.powerstate.plasma["roa"][0,1:], - self.powerstate.ProfilesPredicted, - ) - - # ************************************************************************************************************************** - # EXTRA - # ************************************************************************************************************************** - - # Make tensors - for i in ["Pe_tr_turb", "Pi_tr_turb", "Ce_tr_turb", "CZ_tr_turb", "Mt_tr_turb"]: - try: - self.powerstate.plasma[i] = torch.from_numpy(self.powerstate.plasma[i]).to(self.powerstate.dfT).unsqueeze(0) - except: - pass - - # Write a flag indicating this was performed, to avoid an issue that... the script crashes when it has copied tglf_neo, without cgyro_trick modification - with open(FolderEvaluation_TGYRO / "mitim_flag", "w") as f: - f.write("1") - -def dummyCDF(GeneralFolder, FolderEvaluation): - """ - This routine creates path to a dummy CDF file in FolderEvaluation, with the name "simulation_evaluation.CDF" - - GeneralFolder, e.g. ~/runs_portals/run10/ - FolderEvaluation, e.g. ~/runs_portals/run10000/Execution/Evaluation.0/model_complete/ - """ - - # ------- Name construction for scratch folders in parallel ---------------- - - GeneralFolder = IOtools.expandPath(GeneralFolder, ensurePathValid=True) - - a, subname = IOtools.reducePathLevel(GeneralFolder, level=1, isItFile=False) - - FolderEvaluation = IOtools.expandPath(FolderEvaluation) - - name = FolderEvaluation.name.split(".")[-1] # 0 (evaluation #) - - if name == "": - name = "0" - - cdf = FolderEvaluation / f"{subname}_ev{name}.CDF" - - return cdf diff --git a/src/mitim_modules/powertorch/physics/__init__.py b/src/mitim_modules/powertorch/physics/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/mitim_modules/powertorch/physics_models/parameterizers.py b/src/mitim_modules/powertorch/physics_models/parameterizers.py new file mode 100644 index 00000000..36bfe27b --- /dev/null +++ b/src/mitim_modules/powertorch/physics_models/parameterizers.py @@ -0,0 +1,175 @@ +import copy +import torch +import numpy as np +from mitim_modules.powertorch.utils import CALCtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +# <> Function to interpolate a curve <> +from mitim_tools.misc_tools.MATHtools import extrapolateCubicSpline as interpolation_function + +def piecewise_linear( + x_coord, + y_coord_raw, + x_coarse_tensor, + parameterize_in_aLx=True, + multiplier_quantity=1.0, + ): + """ + Notes: + - x_coarse_tensor must be torch + """ + + # ********************************************************************************************************** + # Define the integrator and derivator functions (based on whether I want to parameterize in aLx or in gradX) + # ********************************************************************************************************** + + if parameterize_in_aLx: + # 1/Lx = -1/X*dX/dr + integrator_function, derivator_function = ( + CALCtools.integrateGradient, + CALCtools.produceGradient, + ) + else: + # -dX/dr + integrator_function, derivator_function = ( + CALCtools.integrateGradient_lin, + CALCtools.produceGradient_lin, + ) + + y_coord = torch.from_numpy(y_coord_raw).to(x_coarse_tensor) * multiplier_quantity + + ygrad_coord = derivator_function( torch.from_numpy(x_coord).to(x_coarse_tensor), y_coord ) + + # ********************************************************************************************************** + # Get control points + # ********************************************************************************************************** + + x_coarse = x_coarse_tensor[1:].cpu().numpy() + + """ + Define region to get control points from + ------------------------------------------------------------ + Trick: Addition of extra point + This is important because if I don't, when I combine the trailing edge and the new + modified profile, there's going to be a discontinuity in the gradient. + """ + + ir_end = np.argmin(np.abs(x_coord - x_coarse[-1])) + + if ir_end < len(x_coord) - 1: + ir = ir_end + 2 # To prevent that TGYRO does a 2nd order derivative + x_coarse = np.append(x_coarse, [x_coord[ir]]) + else: + ir = ir_end + + # Definition of trailing edge. Any point after, and including, the extra point + x_trail = torch.from_numpy(x_coord[ir:]).to(x_coarse_tensor) + y_trail = y_coord[ir:] + x_notrail = torch.from_numpy(x_coord[: ir + 1]).to(x_coarse_tensor) + + # Produce control points, including a zero at the beginning + aLy_coarse = [[0.0, 0.0]] + for cont, i in enumerate(x_coarse): + yValue = ygrad_coord[np.argmin(np.abs(x_coord - i))] + aLy_coarse.append([i, yValue.cpu().item()]) + + aLy_coarse = torch.from_numpy(np.array(aLy_coarse)).to(ygrad_coord) + + # Since the last one is an extra point very close, I'm making it the same + aLy_coarse[-1, 1] = aLy_coarse[-2, 1] + + # Boundary condition at point moved by gridPointsAllowed + y_bc = torch.from_numpy(interpolation_function([x_coarse[-1]], x_coord, y_coord.cpu().numpy())).to(ygrad_coord) + + # Boundary condition at point (ACTUAL THAT I WANT to keep fixed, i.e. rho=0.8) + y_bc_real = torch.from_numpy(interpolation_function([x_coarse[-2]], x_coord, y_coord.cpu().numpy())).to(ygrad_coord) + + # ********************************************************************************************************** + # Define profile_constructor functions + # ********************************************************************************************************** + + def profile_constructor_coarse(x, y, multiplier=multiplier_quantity): + """ + Construct curve in a coarse grid + ---------------------------------------------------------------------------------------------------- + This constructs a curve in any grid, with any batch given in y=y. + Useful for surrogate evaluations. Fast in a coarse grid. For HF evaluations, + I need to do in a finer grid so that it is consistent with TGYRO. + x, y must be (batch, radii), y_bc must be (1) + """ + return x, integrator_function(x, y, y_bc_real) / multiplier + + def profile_constructor_middle(x, y, multiplier=multiplier_quantity): + """ + Deparamterizes a finer profile based on the values in the coarse. + Reason why something like this is not used for the full profile is because derivative of this will not be as original, + which is needed to match TGYRO + """ + yCPs = CALCtools.Interp1d()(aLy_coarse[:, 0][:-1].repeat((y.shape[0], 1)), y, x) + return x, integrator_function(x, yCPs, y_bc_real) / multiplier + + def profile_constructor_fine(x, y, multiplier=multiplier_quantity): + """ + Notes: + - x is a 1D array, but y can be a 2D array for a batch of individuals: (batch,x) + - I am assuming it is 1/LT for parameterization, but gives T + """ + + y = torch.atleast_2d(y) + x = x[0, :] if x.dim() == 2 else x + + # Add the extra trick point + x = torch.cat((x, aLy_coarse[-1][0].repeat((1)))) + y = torch.cat((y, aLy_coarse[-1][-1].repeat((y.shape[0], 1))), dim=1) + + # Model curve (basically, what happens in between points) + yBS = CALCtools.Interp1d()(x.repeat(y.shape[0], 1), y, x_notrail.repeat(y.shape[0], 1)) + + """ + --------------------------------------------------------------------------------------------------------- + Trick 1: smoothAroundCoarsing + TGYRO will use a 2nd order scheme to obtain gradients out of the profile, so a piecewise linear + will simply not give the right derivatives. + Here, this rough trick is to modify the points in gradient space around the coarse grid with the + same value of gradient, so in principle it doesn't matter the order of the derivative. + """ + num_around = 1 + for i in range(x.shape[0] - 2): + ir = torch.argmin(torch.abs(x[i + 1] - x_notrail)) + for k in range(-num_around, num_around + 1, 1): + yBS[:, ir + k] = yBS[:, ir] + # -------------------------------------------------------------------------------------------------------- + + yBS = integrator_function(x_notrail.repeat(yBS.shape[0], 1), yBS.clone(), y_bc) + + """ + Trick 2: Correct y_bc + The y_bc for the profile integration started at gridPointsAllowed, but that's not the real + y_bc. I want the temperature fixed at my first point that I actually care for. + Here, I multiply the profile to get that. + Multiplication works because: + 1/LT = 1/T * dT/dr + 1/LT' = 1/(T*m) * d(T*m)/dr = 1/T * dT/dr = 1/LT + Same logarithmic gradient, but with the right boundary condition + + """ + ir = torch.argmin(torch.abs(x_notrail - x[-2])) + yBS = yBS * torch.transpose((y_bc_real / yBS[:, ir]).repeat(yBS.shape[1], 1), 0, 1) + + # Add trailing edge + y_trailnew = copy.deepcopy(y_trail).repeat(yBS.shape[0], 1) + + x_notrail_t = torch.cat((x_notrail[:-1], x_trail), dim=0) + yBS = torch.cat((yBS[:, :-1], y_trailnew), dim=1) + + return x_notrail_t, yBS / multiplier + + # ********************************************************************************************************** + + return ( + aLy_coarse, + profile_constructor_fine, + profile_constructor_coarse, + profile_constructor_middle, + ) \ No newline at end of file diff --git a/src/mitim_modules/powertorch/physics/radiation_chebyshev.csv b/src/mitim_modules/powertorch/physics_models/radiation_chebyshev.csv similarity index 100% rename from src/mitim_modules/powertorch/physics/radiation_chebyshev.csv rename to src/mitim_modules/powertorch/physics_models/radiation_chebyshev.csv diff --git a/src/mitim_modules/powertorch/physics/TARGETStools.py b/src/mitim_modules/powertorch/physics_models/targets_analytic.py similarity index 52% rename from src/mitim_modules/powertorch/physics/TARGETStools.py rename to src/mitim_modules/powertorch/physics_models/targets_analytic.py index a13bd90c..b36dd63d 100644 --- a/src/mitim_modules/powertorch/physics/TARGETStools.py +++ b/src/mitim_modules/powertorch/physics_models/targets_analytic.py @@ -1,201 +1,9 @@ import torch from mitim_tools.misc_tools import PLASMAtools +from mitim_modules.powertorch.utils import TARGETStools from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed -# ------------------------------------------------------------------ -# Main classes -# ------------------------------------------------------------------ - -class power_targets: - ''' - Default class for power target models, change "evaluate" method to implement a new model - ''' - - def evaluate(self): - print("No model implemented for power targets", typeMsg="w") - - def __init__(self,powerstate): - self.powerstate = powerstate - - # Make sub-targets equal to zero - variables_to_zero = ["qfuse", "qfusi", "qie", "qrad", "qrad_bremms", "qrad_line", "qrad_sync"] - for i in variables_to_zero: - self.powerstate.plasma[i] = self.powerstate.plasma["te"] * 0.0 - - # ---------------------------------------------------- - # Fixed Targets (targets without a model) - # ---------------------------------------------------- - - if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 1: - self.Pe_orig, self.Pi_orig = ( - self.powerstate.plasma["Pe_orig_fusradexch"], - self.powerstate.plasma["Pi_orig_fusradexch"], - ) # Original integrated from input.gacode - elif self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 2: - self.Pe_orig, self.Pi_orig = ( - self.powerstate.plasma["Pe_orig_fusrad"], - self.powerstate.plasma["Pi_orig_fusrad"], - ) - elif self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 3: - self.Pe_orig, self.Pi_orig = self.powerstate.plasma["te"] * 0.0, self.powerstate.plasma["te"] * 0.0 - - # For the moment, I don't have a model for these, so I just grab the original from input.gacode - self.CextraE = self.powerstate.plasma["Gaux_e"] # 1E20/s/m^2 - self.CextraZ = self.powerstate.plasma["Gaux_Z"] # 1E20/s/m^2 - self.Mextra = self.powerstate.plasma["Maux"] # J/m^2 - - def fine_grid(self): - - """ - Make all quantities needed on the fine resolution - ------------------------------------------------- - In the powerstate creation, the plasma variables are stored in two different resolutions, one for the coarse grid and one for the fine grid, - if the option is activated. - - Here, at calculation stage I use some precalculated quantities in the fine grid and then integrate the gradients into that resolution - - Note that the set ['te','ti','ne','nZ','w0','ni'] will automatically be substituted during the update_var() that comes next, so - it's ok that I lose the torch leaf here. However, I must do this copy here because if any of those variables are not updated in - update_var() then it would fail. But first store them for later use. - """ - - self.plasma_original = {} - - # Bring to fine grid - variables_to_fine = ["B_unit", "B_ref", "volp", "rmin", "roa", "rho", "ni"] - for variable in variables_to_fine: - self.plasma_original[variable] = self.powerstate.plasma[variable].clone() - self.powerstate.plasma[variable] = self.powerstate.plasma_fine[variable] - - # Bring also the gradients and kinetic variables - for variable in self.powerstate.profile_map.keys(): - - # Kinetic variables (te,ti,ne,nZ,w0,ni) - self.plasma_original[variable] = self.powerstate.plasma[variable].clone() - self.powerstate.plasma[variable] = self.powerstate.plasma_fine[variable] - - # Bring also the gradients that are part of the torch trees, so that the derivative is not lost - self.plasma_original[f'aL{variable}'] = self.powerstate.plasma[f'aL{variable}'].clone() - - # ---------------------------------------------------- - # Integrate through fine de-parameterization - # ---------------------------------------------------- - for i in self.powerstate.ProfilesPredicted: - _ = self.powerstate.update_var(i,specific_deparametrizer=self.powerstate.deparametrizers_coarse_middle) - - def flux_integrate(self): - """ - ************************************************************************************************** - Calculate integral of all targets, and then sum aux. - Reason why I do it this convoluted way is to make it faster in mitim, not to run integrateQuadPoly all the time. - Run once for all the batch and also for electrons and ions - (in MW/m^2) - ************************************************************************************************** - """ - - qe = self.powerstate.plasma["te"]*0.0 - qi = self.powerstate.plasma["te"]*0.0 - - if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] >= 2: - qe += -self.powerstate.plasma["qie"] - qi += self.powerstate.plasma["qie"] - - if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 3: - qe += self.powerstate.plasma["qfuse"] - self.powerstate.plasma["qrad"] - qi += self.powerstate.plasma["qfusi"] - - q = torch.cat((qe, qi)).to(qe) - self.P = self.powerstate.volume_integrate(q, force_dim=q.shape[0]) - - def coarse_grid(self): - - # ************************************************************************************************** - # Come back to original grid for targets - # ************************************************************************************************** - - # Interpolate results from fine to coarse (i.e. whole point is that it is better than integrate interpolated values) - if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] >= 2: - for i in ["qie"]: - self.powerstate.plasma[i] = self.powerstate.plasma[i][:, self.powerstate.positions_targets] - - if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 3: - for i in [ - "qfuse", - "qfusi", - "qrad", - "qrad_bremms", - "qrad_line", - "qrad_sync", - ]: - self.powerstate.plasma[i] = self.powerstate.plasma[i][:, self.powerstate.positions_targets] - - self.P = self.P[:, self.powerstate.positions_targets] - - # Recover variables calculated prior to the fine-targets method - for i in self.plasma_original: - self.powerstate.plasma[i] = self.plasma_original[i] - - def postprocessing(self, useConvectiveFluxes=False, forceZeroParticleFlux=False, assumedPercentError=1.0): - - # ************************************************************************************************** - # Plug-in Targets - # ************************************************************************************************** - - self.powerstate.plasma["Pe"] = ( - self.powerstate.plasma["Paux_e"] + self.P[: self.P.shape[0]//2, :] + self.Pe_orig - ) # MW/m^2 - self.powerstate.plasma["Pi"] = ( - self.powerstate.plasma["Paux_i"] + self.P[self.P.shape[0]//2 :, :] + self.Pi_orig - ) # MW/m^2 - self.powerstate.plasma["Ce_raw"] = self.CextraE - self.powerstate.plasma["CZ_raw"] = self.CextraZ - self.powerstate.plasma["Mt"] = self.Mextra - - # Merge convective fluxes - - if useConvectiveFluxes: - self.powerstate.plasma["Ce"] = PLASMAtools.convective_flux( - self.powerstate.plasma["te"], self.powerstate.plasma["Ce_raw"] - ) # MW/m^2 - self.powerstate.plasma["CZ"] = PLASMAtools.convective_flux( - self.powerstate.plasma["te"], self.powerstate.plasma["CZ_raw"] - ) # MW/m^2 - else: - self.powerstate.plasma["Ce"] = self.powerstate.plasma["Ce_raw"] - self.powerstate.plasma["CZ"] = self.powerstate.plasma["CZ_raw"] - - if forceZeroParticleFlux: - self.powerstate.plasma["Ce"] = self.powerstate.plasma["Ce"] * 0 - self.powerstate.plasma["Ce_raw"] = self.powerstate.plasma["Ce_raw"] * 0 - - # ************************************************************************************************** - # Error - # ************************************************************************************************** - - variables_to_error = ["Pe", "Pi", "Ce", "CZ", "Mt", "Ce_raw", "CZ_raw"] - - for i in variables_to_error: - self.powerstate.plasma[i + "_stds"] = abs(self.powerstate.plasma[i]) * assumedPercentError / 100 - - """ - ************************************************************************************************** - GB Normalized - ************************************************************************************************** - Note: This is useful for mitim surrogate variables of targets - """ - - gb_mapping = { - "Pe": "Qgb", - "Pi": "Qgb", - "Ce": "Qgb" if useConvectiveFluxes else "Ggb", - "CZ": "Qgb" if useConvectiveFluxes else "Ggb", - "Mt": "Pgb", - } - - for i in gb_mapping.keys(): - self.powerstate.plasma[f"{i}GB"] = self.powerstate.plasma[i] / self.powerstate.plasma[gb_mapping[i]] - # ---------------------------------------------------------------------------------------------------- # Full analytical models taken from TGYRO # ---------------------------------------------------------------------------------------------------- @@ -214,7 +22,7 @@ def postprocessing(self, useConvectiveFluxes=False, forceZeroParticleFlux=False, c4, c5, c6, c7 = 4.60643e-3, 1.3500e-2, -1.06750e-4, 1.36600e-5 bg, er = 34.3827, 1.124656e6 -class analytical_model(power_targets): +class analytical_model(TARGETStools.power_targets): def __init__(self,powerstate, **kwargs): super().__init__(powerstate, **kwargs) diff --git a/src/mitim_modules/powertorch/physics_models/transport_analytic.py b/src/mitim_modules/powertorch/physics_models/transport_analytic.py new file mode 100644 index 00000000..d9002af3 --- /dev/null +++ b/src/mitim_modules/powertorch/physics_models/transport_analytic.py @@ -0,0 +1,90 @@ +import torch +from mitim_tools.misc_tools import PLASMAtools +from mitim_modules.powertorch.utils import TRANSPORTtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +# ------------------------------------------------------------------ +# SIMPLE Diffusion (#TODO: implement with particle flux and the raw) +# ------------------------------------------------------------------ + +class diffusion_model(TRANSPORTtools.power_transport): + def __init__(self, powerstate, **kwargs): + super().__init__(powerstate, **kwargs) + + # Ensure that the provided diffusivities include the zero location + self.chi_e = self.powerstate.TransportOptions["ModelOptions"]["chi_e"] + self.chi_i = self.powerstate.TransportOptions["ModelOptions"]["chi_i"] + + if self.chi_e.shape[0] < self.powerstate.plasma['rho'].shape[-1]: + self.chi_e = torch.cat((torch.zeros(1), self.chi_e)) + + if self.chi_i.shape[0] < self.powerstate.plasma['rho'].shape[-1]: + self.chi_i = torch.cat((torch.zeros(1), self.chi_i)) + + def produce_profiles(self): + pass + + def evaluate(self): + + # Make sure the chis are applied to all the points in the batch + Pe_tr = PLASMAtools.conduction( + self.powerstate.plasma["ne"], + self.powerstate.plasma["te"], + self.chi_e.repeat(self.powerstate.plasma['rho'].shape[0],1), + self.powerstate.plasma["aLte"], + self.powerstate.plasma["a"].unsqueeze(-1), + ) + Pi_tr = PLASMAtools.conduction( + self.powerstate.plasma["ni"].sum(axis=-1), + self.powerstate.plasma["ti"], + self.chi_i.repeat(self.powerstate.plasma['rho'].shape[0],1), + self.powerstate.plasma["aLti"], + self.powerstate.plasma["a"].unsqueeze(-1), + ) + + self.powerstate.plasma["QeMWm2_tr_turb"] = Pe_tr * 2 / 3 + self.powerstate.plasma["QiMWm2_tr_turb"] = Pi_tr * 2 / 3 + + self.powerstate.plasma["QeMWm2_tr_neo"] = Pe_tr * 1 / 3 + self.powerstate.plasma["QiMWm2_tr_neo"] = Pi_tr * 1 / 3 + + self.powerstate.plasma["QeMWm2_tr"] = self.powerstate.plasma["QeMWm2_tr_turb"] + self.powerstate.plasma["QeMWm2_tr_neo"] + self.powerstate.plasma["QiMWm2_tr"] = self.powerstate.plasma["QiMWm2_tr_turb"] + self.powerstate.plasma["QiMWm2_tr_neo"] + +# ------------------------------------------------------------------ +# SURROGATE +# ------------------------------------------------------------------ + +class surrogate(TRANSPORTtools.power_transport): + def __init__(self, powerstate, **kwargs): + super().__init__(powerstate, **kwargs) + + def produce_profiles(self): + pass + + def evaluate(self): + + """ + flux_fun as given in ModelOptions must produce Q and Qtargets in order of te,ti,ne + """ + + X = torch.Tensor() + for prof in self.powerstate.ProfilesPredicted: + X = torch.cat((X,self.powerstate.plasma['aL'+prof][:,1:]),axis=1) + + _, Q, _, _ = self.powerstate.TransportOptions["ModelOptions"]["flux_fun"](X) + + numeach = self.powerstate.plasma["rho"].shape[1] - 1 + + quantities = { + "te": "QeMWm2", + "ti": "QiMWm2", + "ne": "Ce", + "nZ": "CZ", + "w0": "MtJm2", + } + + for c, i in enumerate(self.powerstate.ProfilesPredicted): + self.powerstate.plasma[f"{quantities[i]}_tr"] = torch.cat((torch.tensor([[0.0]]),Q[:, numeach * c : numeach * (c + 1)]),dim=1) + diff --git a/src/mitim_modules/powertorch/physics_models/transport_cgyro.py b/src/mitim_modules/powertorch/physics_models/transport_cgyro.py new file mode 100644 index 00000000..031ed0a0 --- /dev/null +++ b/src/mitim_modules/powertorch/physics_models/transport_cgyro.py @@ -0,0 +1,488 @@ +import copy +import shutil +import torch +import numpy as np +from mitim_tools.misc_tools import IOtools +from mitim_tools.gacode_tools import PROFILEStools, TGYROtools +from mitim_modules.powertorch.physics_models import transport_tgyro +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class cgyro_model(transport_tgyro.tgyro_model): + def __init__(self, powerstate, **kwargs): + super().__init__(powerstate, **kwargs) + + def evaluate(self): + + # Run original evaluator + tgyro = self._evaluate_tglf_neo() + + # Run CGYRO trick + powerstate_orig = self._trick_cgyro(tgyro) + + # Process results + self._postprocess_results(tgyro, "cgyro_neo") + + # Some checks + print("\t- Checking model modifications:") + for r in ["QeMWm2_tr_turb", "QiMWm2_tr_turb", "Ce_tr_turb", "CZ_tr_turb", "MtJm2_tr_turb"]: #, "PexchTurb"]: #TODO: FIX + print(f"\t\t{r}(tglf) = {' '.join([f'{k:.1e} (+-{ke:.1e})' for k,ke in zip(powerstate_orig.plasma[r][0][1:],powerstate_orig.plasma[r+'_stds'][0][1:]) ])}") + print(f"\t\t{r}(cgyro) = {' '.join([f'{k:.1e} (+-{ke:.1e})' for k,ke in zip(self.powerstate.plasma[r][0][1:],self.powerstate.plasma[r+'_stds'][0][1:]) ])}") + + # ************************************************************************************ + # Private functions for CGYRO evaluation + # ************************************************************************************ + + def _trick_cgyro(self, tgyro): + + FolderEvaluation_TGYRO = self.folder / "cgyro_neo" + + print("\t- Checking whether cgyro_neo folder exists and it was written correctly via cgyro_trick...") + + correctly_run = FolderEvaluation_TGYRO.exists() + if correctly_run: + print("\t\t- Folder exists, but was cgyro_trick run?") + with open(FolderEvaluation_TGYRO / "mitim_flag", "r") as f: + correctly_run = bool(float(f.readline())) + + if correctly_run: + print("\t\t\t* Yes, it was", typeMsg="w") + else: + print("\t\t\t* No, it was run, repating process", typeMsg="i") + + # Remove cgyro_neo folder + if FolderEvaluation_TGYRO.exists(): + IOtools.shutil_rmtree(FolderEvaluation_TGYRO) + + # Copy tglf_neo results + shutil.copytree(self.folder / "tglf_neo", FolderEvaluation_TGYRO) + + # ********************************************************** + # CGYRO writter + # ********************************************************** + + # Write a flag indicating this was not performed + with open(FolderEvaluation_TGYRO / "mitim_flag", "w") as f: + f.write("0") + + self._cgyro_trick(FolderEvaluation_TGYRO) + + # Write a flag indicating this was performed, to avoid an issue that... the script crashes when it has copied tglf_neo, without cgyro_trick modification + with open(FolderEvaluation_TGYRO / "mitim_flag", "w") as f: + f.write("1") + + # Read TGYRO files and construct portals variables + + tgyro.read(label="cgyro_neo", folder=FolderEvaluation_TGYRO) + + powerstate_orig = copy.deepcopy(self.powerstate) + + return powerstate_orig + + def _cgyro_trick(self,FolderEvaluation_TGYRO): + + # Print Information + print(self._print_info()) + + # Copy profiles so that later it is easy to grab all the input.gacodes that were evaluated + self._profiles_to_store() + + # ************************************************************************************************************************** + # Evaluate CGYRO + # ************************************************************************************************************************** + + evaluateCGYRO( + self.powerstate.TransportOptions["ModelOptions"]["extra_params"]["PORTALSparameters"], + self.powerstate.TransportOptions["ModelOptions"]["extra_params"]["folder"], + self.evaluation_number, + FolderEvaluation_TGYRO, + self.file_profs, + self.powerstate.plasma["roa"][0,1:], + self.powerstate.ProfilesPredicted, + ) + + # Make tensors + for i in ["QeMWm2_tr_turb", "QiMWm2_tr_turb", "Ce_tr_turb", "CZ_tr_turb", "MtJm2_tr_turb"]: + try: + self.powerstate.plasma[i] = torch.from_numpy(self.powerstate.plasma[i]).to(self.powerstate.dfT).unsqueeze(0) + except: + pass + + def _print_info(self): + + txt = "\nFluxes to be matched by CGYRO ( TARGETS - NEO ):" + + for var, varn in zip( + ["r/a ", "rho ", "a/LTe", "a/LTi", "a/Lne", "a/LnZ", "a/Lw0"], + ["roa", "rho", "aLte", "aLti", "aLne", "aLnZ", "aLw0"], + ): + txt += f"\n{var} = " + for j in range(self.powerstate.plasma["rho"].shape[1] - 1): + txt += f"{self.powerstate.plasma[varn][0,j+1]:.6f} " + + for var, varn in zip( + ["Qe (MW/m^2)", "Qi (MW/m^2)", "Ce (MW/m^2)", "CZ (MW/m^2)", "MtJm2 (J/m^2) "], + ["QeMWm2", "Pi", "Ce", "CZ", "MtJm2"], + ): + txt += f"\n{var} = " + for j in range(self.powerstate.plasma["rho"].shape[1] - 1): + txt += f"{self.powerstate.plasma[varn][0,j+1]-self.powerstate.plasma[f'{varn}_tr_neo'][0,j+1]:.4e} " + + return txt + +""" +The CGYRO file must contain GB units, and the gb unit is MW/m^2, 1E19m^2/s +The CGYRO file must use particle flux. Convective transformation occurs later +""" + +def evaluateCGYRO(PORTALSparameters, folder, numPORTALS, FolderEvaluation, unmodified_profiles, radii, ProfilesPredicted): + print("\n ** CGYRO evaluation of fluxes has been requested before passing information to the STRATEGY module **",typeMsg="i",) + + if isinstance(numPORTALS, int): + numPORTALS = str(numPORTALS) + + # ------------------------------------------------------------------------------------------------ + # Harcoded + # ------------------------------------------------------------------------------------------------ + if PORTALSparameters['hardCodedCGYRO'] is not None: + """ + train_sep is the number of initial runs in it#0 results file. Now, it's usually 1 + start_num is the number of the first iteration, usually 0 + trick_harcoded_f is the name of the file until the iteration number. E.g. 'example_run/Outputs/cgyro_results/iter_rmp_75_' + + e.g.: + train_sep,start_num,last_one,trick_hardcoded_f = 1, 0,100, 'example_run/Outputs/cgyro_results/d3d_5chan_it_' + + """ + + train_sep = PORTALSparameters["hardCodedCGYRO"]["train_sep"] + start_num = PORTALSparameters["hardCodedCGYRO"]["start_num"] + last_one = PORTALSparameters["hardCodedCGYRO"]["last_one"] + trick_hardcoded_f = PORTALSparameters["hardCodedCGYRO"]["trick_hardcoded_f"] + else: + train_sep = None + start_num = None + last_one = None + trick_hardcoded_f = None + # ------------------------------------------------------------------------------------------------ + + minErrorPercent = PORTALSparameters["percentError_stable"] + Qi_criterion_stable = PORTALSparameters["Qi_criterion_stable"] + percentNeo = PORTALSparameters["percentError"][1] + useConvectiveFluxes = PORTALSparameters["useConvectiveFluxes"] + + try: + impurityPosition = PROFILEStools.impurity_location(PROFILEStools.PROFILES_GACODE(unmodified_profiles), PORTALSparameters["ImpurityOfInterest"]) + except ValueError: + if 'nZ' in ProfilesPredicted: + raise ValueError(f"Impurity {PORTALSparameters['ImpurityOfInterest']} not found in the profiles and needed for CGYRO evaluation") + else: + impurityPosition = 0 + print(f'\t- Impurity location not found. Using hardcoded value of {impurityPosition}') + + OriginalFimp = PORTALSparameters["fImp_orig"] + + cgyroing_file = ( + lambda file_cgyro, numPORTALS_this=0: cgyroing( + FolderEvaluation, + unmodified_profiles, + numPORTALS, + minErrorPercent, + Qi_criterion_stable, + useConvectiveFluxes, + percentNeo, + radii, + OriginalFimp=OriginalFimp, + evaluationsInFile=f"{numPORTALS_this}", + impurityPosition=impurityPosition, + file=file_cgyro, + ) + ) + print(f"\t- Suggested function call for mitim evaluation {numPORTALS} (lambda for cgyroing):",typeMsg="i") + cgyropath = IOtools.expandPath(folder, ensurePathValid=True) / 'Outputs' / 'cgyro_results' / f'cgyro_it_{numPORTALS}.txt' + print(f"\tcgyroing_file('{cgyropath}')") + + print('\t- Then insert "exit" and RETURN', typeMsg="i") + if (trick_hardcoded_f is None) or (int(numPORTALS) > last_one): + embed() + else: + # ------------------------------------------------------------------ + # Hard-coded stuff for quick modifications + # ------------------------------------------------------------------ + if int(numPORTALS) < train_sep: + cgyroing_file(f"{trick_hardcoded_f}{start_num}.txt",numPORTALS_this=numPORTALS) + else: + cgyroing_file(f"{trick_hardcoded_f}{int(numPORTALS)-train_sep+1+start_num}.txt",numPORTALS_this=0) + + +def cgyroing( + FolderEvaluation, + unmodified_profiles, + evaluations, + minErrorPercent, + Qi_criterion_stable, + useConvectiveFluxes, + percentNeo, + radii, + OriginalFimp=1.0, + file=None, + evaluationsInFile=0, + impurityPosition=3, +): + """ + Variables need to have dimensions of (evaluation,rho) + """ + + evaluations = np.array([int(i) for i in evaluations.split(",")]) + evaluationsInFile = np.array([int(i) for i in evaluationsInFile.split(",")]) + + aLTe,aLTi,aLne,Q_gb,Qe,Qi,Ge,GZ,Mt,Pexch,QeE,QiE,GeE,GZE,MtE,PexchE,_,_ = readCGYROresults(file, radii) + + cont = 0 + for _ in evaluations: + k = evaluationsInFile[cont] + cont += 1 + + print(f"\t- Modifying {IOtools.clipstr(FolderEvaluation)} with position {k} in CGYRO results file {IOtools.clipstr(file)}") + + # Get TGYRO + tgyro = TGYROtools.TGYROoutput(FolderEvaluation,profiles=PROFILEStools.PROFILES_GACODE(unmodified_profiles)) + + # Quick checker of correct file + wasThisTheCorrectRun(aLTe, aLTi, aLne, Q_gb, tgyro) + + transport_tgyro.modifyResults( + Qe[k, :], + Qi[k, :], + Ge[k, :], + GZ[k, :], + Mt[k, :], + Pexch[k, :], + QeE[k, :], + QiE[k, :], + GeE[k, :], + GZE[k, :], + MtE[k, :], + PexchE[k, :], + tgyro, + FolderEvaluation, + minErrorPercent=minErrorPercent, + useConvectiveFluxes=useConvectiveFluxes, + Qi_criterion_stable=Qi_criterion_stable, + percentNeo=percentNeo, + impurityPosition=impurityPosition, + OriginalFimp=OriginalFimp, + ) + + +def wasThisTheCorrectRun(aLTe, aLTi, aLne, Q_gb, tgyro, ErrorRaised=0.005): + print("\t- Checking that this was the correct run...") + + tgyro_new = copy.deepcopy(tgyro) + tgyro_new.aLti = tgyro_new.aLti[:, 0, :] + + variables = [ + [aLTe, tgyro_new.aLte, "aLTe"], + [aLTi, tgyro_new.aLti, "aLTi"], + [aLne, tgyro_new.aLne, "aLne"], + [Q_gb, tgyro_new.Q_GB, "Qgb"], + ] + + for var in variables: + [c, t, n] = var + + for pos in range(c.shape[0]): + for i in range(c.shape[1]): + error = np.max(abs((t[pos, i + 1] - c[pos, i]) / t[pos, i + 1])) + print( + f"\t\t* Error in {n}[{i}] was {error*100.0:.2f}% (TGYRO {t[pos,i+1]:.3f} vs. CGYRO {c[pos,i]:.3f})", + typeMsg="w" if error > ErrorRaised else "", + ) + + +def readlineNTH(line, full_file=True, unnormalize=True): + s = line.split() + + i = 2 + roa = float(s[i]) + i += 3 + aLne = float(s[i]) + i += 3 + aLTi = float(s[i]) + i += 3 + aLTe = float(s[i]) + i += 3 + + Qi = float(s[i]) + i += 3 + Qi_std = float(s[i]) + i += 3 + Qe = float(s[i]) + i += 3 + Qe_std = float(s[i]) + i += 3 + Ge = float(s[i]) + i += 3 + Ge_std = float(s[i]) + i += 3 + + if full_file: + GZ = float(s[i]) + i += 3 + GZ_std = float(s[i]) + i += 3 + + Mt = float(s[i]) + i += 3 + Mt_std = float(s[i]) + i += 3 + + Pexch = float(s[i]) + i += 3 + Pexch_std = float(s[i]) + i += 3 + + Q_gb = float(s[i]) + i += 3 + G_gb = float(s[i]) * 1e-1 + i += 3 # From 1E19 to 1E20 + + if full_file: + Mt_gb = float(s[i]) + i += 3 + Pexch_gb = float(s[i]) + i += 3 + + tstart = float(s[i]) + i += 3 + tend = float(s[i]) + i += 3 + + if unnormalize: + QiReal = Qi * Q_gb + QiReal_std = Qi_std * Q_gb + QeReal = Qe * Q_gb + QeReal_std = Qe_std * Q_gb + GeReal = Ge * G_gb + GeReal_std = Ge_std * G_gb + else: + QiReal = Qi + QiReal_std = Qi_std + QeReal = Qe + QeReal_std = Qe_std + GeReal = Ge + GeReal_std = Ge_std + + if full_file: + if unnormalize: + GZReal = GZ * G_gb + GZReal_std = GZ_std * G_gb + + MtReal = Mt * Mt_gb + MtReal_std = Mt_std * Mt_gb + + PexchReal = Pexch * Pexch_gb + PexchReal_std = Pexch_std * Pexch_gb + else: + GZReal = GZ + GZReal_std = GZ_std + + MtReal = Mt + MtReal_std = Mt_std + + PexchReal = Pexch + PexchReal_std = Pexch_std + + return roa,aLTe,aLTi,aLne,Q_gb,QeReal,QiReal,GeReal,GZReal,MtReal,PexchReal,QeReal_std,QiReal_std,GeReal_std,GZReal_std,MtReal_std,PexchReal_std,tstart,tend + else: + return roa,aLTe,aLTi,aLne,Q_gb,QeReal,QiReal,GeReal,0.0,0.0,0.0,QeReal_std,QiReal_std,GeReal_std,0.0,0.0,0.0,tstart,tend + + +def readCGYROresults(file, radii, unnormalize=True): + """ + Arrays are in (batch,radii) + MW/m^2 and 1E20 + """ + + with open(file, "r") as f: + lines = f.readlines() + + rad = len(radii) + num = len(lines) // rad + + roa = np.zeros((num, rad)) + aLTe = np.zeros((num, rad)) + aLTi = np.zeros((num, rad)) + aLne = np.zeros((num, rad)) + Q_gb = np.zeros((num, rad)) + + Qe = np.zeros((num, rad)) + Qe_std = np.zeros((num, rad)) + Qi = np.zeros((num, rad)) + Qi_std = np.zeros((num, rad)) + Ge = np.zeros((num, rad)) + Ge_std = np.zeros((num, rad)) + + GZ = np.zeros((num, rad)) + GZ_std = np.zeros((num, rad)) + + Mt = np.zeros((num, rad)) + Mt_std = np.zeros((num, rad)) + + Pexch = np.zeros((num, rad)) + Pexch_std = np.zeros((num, rad)) + + tstart = np.zeros((num, rad)) + tend = np.zeros((num, rad)) + + p = {} + for r in range(len(radii)): + p[r] = 0 + for i in range(len(lines)): + + # -------------------------------------------------------- + # Line not empty + # -------------------------------------------------------- + if len(lines[i].split()) < 10: + continue + + # -------------------------------------------------------- + # Read line + # -------------------------------------------------------- + ( + roa_read, + aLTe_read, + aLTi_read, + aLne_read, + Q_gb_read, + Qe_read, + Qi_read, + Ge_read, + GZ_read, + Mt_read, + Pexch_read, + Qe_std_read, + Qi_std_read, + Ge_std_read, + GZ_std_read, + Mt_std_read, + Pexch_std_read, + tstart_read, + tend_read, + ) = readlineNTH(lines[i], unnormalize=unnormalize) + + # -------------------------------------------------------- + # Radial location position + # -------------------------------------------------------- + threshold_radii = 1E-4 + r = np.where(np.abs(radii-roa_read) In preparation for the transition + # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ + + # from mitim_tools.gacode_tools import TGLFtools + # tglf = TGLFtools.TGLF(rhos=RadiisToRun) + # _ = tglf.prep( + # self.folder / 'stds', + # inputgacode=self.file_profs, + # recalculatePTOT=False, # Use what's in the input.gacode, which is what PORTALS TGYRO does + # cold_start=cold_start) + + # tglf.run( + # subFolderTGLF="tglf_neo_original", + # TGLFsettings=MODELparameters["transport_model"]["TGLFsettings"], + # cold_start=cold_start, + # forceIfcold_start=True, + # extraOptions=MODELparameters["transport_model"]["extraOptionsTGLF"], + # launchSlurm=launchMODELviaSlurm, + # slurm_setup={"cores": 4, "minutes": 1}, + # ) + + # tglf.read(label="tglf_neo_original") + + # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ + # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ + + return tgyro + + def _postprocess_results(self, tgyro, label): + + ModelOptions = self.powerstate.TransportOptions["ModelOptions"] + + includeFast = ModelOptions.get("includeFastInQi",False) + useConvectiveFluxes = ModelOptions.get("useConvectiveFluxes", True) + UseFineGridTargets = ModelOptions.get("UseFineGridTargets", False) + provideTurbulentExchange = ModelOptions.get("TurbulentExchange", False) + OriginalFimp = ModelOptions.get("OriginalFimp", 1.0) + forceZeroParticleFlux = ModelOptions.get("forceZeroParticleFlux", False) + + # Grab impurity from powerstate ( because it may have been modified in produce_profiles() ) + impurityPosition = self.powerstate.impurityPosition_transport + + # Produce right quantities (TGYRO -> powerstate.plasma) + self.powerstate = tgyro_to_powerstate( + tgyro.results[label], + self.powerstate, + useConvectiveFluxes=useConvectiveFluxes, + includeFast=includeFast, + impurityPosition=impurityPosition, + UseFineGridTargets=UseFineGridTargets, + OriginalFimp=OriginalFimp, + forceZeroParticleFlux=forceZeroParticleFlux, + provideTurbulentExchange=provideTurbulentExchange, + provideTargets=self.powerstate.TargetOptions['ModelOptions']['TargetCalc'] == "tgyro", + ) + + tgyro.results["use"] = tgyro.results[label] + + # Copy profiles to share + self._profiles_to_store() + + # ------------------------------------------------------------------------------------------------------------------------ + # Results class that can be used for further plotting and analysis in PORTALS + # ------------------------------------------------------------------------------------------------------------------------ + + self.model_results = copy.deepcopy(tgyro.results["use"]) # Pass the TGYRO results class that should be use for plotting and analysis + + self.model_results.extra_analysis = {} + for ikey in tgyro.results: + if ikey != "use": + self.model_results.extra_analysis[ikey] = tgyro.results[ikey] + + def _profiles_to_store(self): + + if "extra_params" in self.powerstate.TransportOptions["ModelOptions"] and "folder" in self.powerstate.TransportOptions["ModelOptions"]["extra_params"]: + whereFolder = IOtools.expandPath(self.powerstate.TransportOptions["ModelOptions"]["extra_params"]["folder"] / "Outputs" / "portals_profiles") + if not whereFolder.exists(): + IOtools.askNewFolder(whereFolder) + + fil = whereFolder / f"input.gacode.{self.evaluation_number}" + shutil.copy2(self.file_profs, fil) + shutil.copy2(self.file_profs_unmod, fil.parent / f"{fil.name}_unmodified") + shutil.copy2(self.file_profs_targets, fil.parent / f"{fil.name}.new") + print(f"\t- Copied profiles to {IOtools.clipstr(fil)}") + else: + print("\t- Could not move files", typeMsg="w") + +def tglf_scan_trick( + tglf, + RadiisToRun, + ProfilesPredicted, + impurityPosition=1, + includeFast=False, + delta=0.02, + minimum_abs_gradient=0.005, # This is 0.5% of aLx=1.0, to avoid extremely small scans when, for example, having aLn ~ 0.0 + cold_start=False, + extra_name="", + remove_folders_out = False, + cores_per_tglf_instance = 4 # e.g. 4 core per radius, since this is going to launch ~ Nr=5 x (Nv=6 x Nd=2 + 1) = 65 TGLFs at once + ): + + print(f"\t- Running TGLF standalone scans ({delta = }) to determine relative errors") + + # Prepare scan + variables_to_scan = [] + for i in ProfilesPredicted: + if i == 'te': variables_to_scan.append('RLTS_1') + if i == 'ti': variables_to_scan.append('RLTS_2') + if i == 'ne': variables_to_scan.append('RLNS_1') + if i == 'nZ': variables_to_scan.append(f'RLNS_{impurityPosition+2}') + if i == 'w0': variables_to_scan.append('VEXB_SHEAR') #TODO: is this correct? or VPAR_SHEAR? + + #TODO: Only if that parameter is changing at that location + if 'te' in ProfilesPredicted or 'ti' in ProfilesPredicted: + variables_to_scan.append('TAUS_2') + if 'te' in ProfilesPredicted or 'ne' in ProfilesPredicted: + variables_to_scan.append('XNUE') + if 'te' in ProfilesPredicted or 'ne' in ProfilesPredicted: + variables_to_scan.append('BETAE') + + relative_scan = [1-delta, 1+delta] + + # Enforce at least "minimum_abs_gradient" in gradient, to avoid zero gradient situations + minimum_delta_abs = {} + for ikey in variables_to_scan: + if 'RL' in ikey: + minimum_delta_abs[ikey] = minimum_abs_gradient + + name = 'turb_drives' + + tglf.rhos = RadiisToRun # To avoid the case in which TGYRO was run with an extra rho point + + # Estimate job minutes based on cases and cores (mostly IO I think at this moment, otherwise it should be independent on cases) + num_cases = len(RadiisToRun) * len(variables_to_scan) * len(relative_scan) + if cores_per_tglf_instance == 1: + minutes = 10 * (num_cases / 60) # Ad-hoc formula + else: + minutes = 1 * (num_cases / 60) # Ad-hoc formula + + # Enforce minimum minutes + minutes = max(2, minutes) + + tglf.runScanTurbulenceDrives( + subFolderTGLF = name, + variablesDrives = variables_to_scan, + varUpDown = relative_scan, + minimum_delta_abs = minimum_delta_abs, + TGLFsettings = None, + ApplyCorrections = False, + add_baseline_to = 'first', + cold_start=cold_start, + forceIfcold_start=True, + slurm_setup={ + "cores": cores_per_tglf_instance, + "minutes": minutes, + }, + extra_name = f'{extra_name}_{name}', + positionIon=impurityPosition+2, + attempts_execution=2, + only_minimal_files=True, # Since I only care about fluxes here, do not retrieve all the files + ) + + # Remove folders because they are heavy to carry many throughout + if remove_folders_out: + IOtools.shutil_rmtree(tglf.FolderGACODE) + + Qe = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) + Qi = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) + Ge = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) + GZ = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) + Mt = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) + S = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) + + cont = 0 + for vari in variables_to_scan: + jump = tglf.scans[f'{name}_{vari}']['Qe'].shape[-1] + + Qe[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Qe'] + Qi[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Qi'] + Ge[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Ge'] + GZ[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Gi'] + Mt[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Mt'] + S[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['S'] + cont += jump + + # Calculate the standard deviation of the scans, that's going to be the reported stds + + def calculate_mean_std(Q): + # Assumes Q is [radii, points], with [radii, 0] being the baseline + + Qm = np.mean(Q, axis=1) + Qstd = np.std(Q, axis=1) + + # Qm = Q[:,0] + # Qstd = np.std(Q, axis=1) + + # Qstd = ( Q.max(axis=1)-Q.min(axis=1) )/2 /2 # Such that the range is 2*std + # Qm = Q.min(axis=1) + Qstd*2 # Mean is at the middle of the range + + return Qm, Qstd + + Qe_point, Qe_std = calculate_mean_std(Qe) + Qi_point, Qi_std = calculate_mean_std(Qi) + Ge_point, Ge_std = calculate_mean_std(Ge) + GZ_point, GZ_std = calculate_mean_std(GZ) + Mt_point, Mt_std = calculate_mean_std(Mt) + S_point, S_std = calculate_mean_std(S) + + #TODO: Careful with fast particles + + Flux_base = [Qe[:,0], Qi[:,0], Ge[:,0], GZ[:,0], Mt[:,0], S[:,0]] + Flux_mean = [Qe_point, Qi_point, Ge_point, GZ_point, Mt_point, S_point] + Flux_std = [Qe_std, Qi_std, Ge_std, GZ_std, Mt_std, S_std] + + return Flux_base, Flux_mean, Flux_std +# ************************************************************************************************** +# Functions +# ************************************************************************************************** + +def curateTGYROfiles( + tgyroObject, + label, + RadiisToRun, + ProfilesPredicted, + folder, + percentError, + provideTurbulentExchange=False, + impurityPosition=1, + includeFast=False, + use_tglf_scan_trick=None, + cold_start=False, + extra_name="", + cores_per_tglf_instance = 4, + check_coincidence_thr=1E-2, + ): + + tgyro = tgyroObject.results[label] + + # Determine NEO and Target errors + relativeErrorNEO = percentError[1] / 100.0 + relativeErrorTAR = percentError[2] / 100.0 + + # Grab fluxes from TGYRO + Qe = tgyro.Qe_sim_turb[0, 1:] + Qi = tgyro.QiIons_sim_turb[0, 1:] if includeFast else tgyro.QiIons_sim_turb_thr[0, 1:] + Ge = tgyro.Ge_sim_turb[0, 1:] + GZ = tgyro.Gi_sim_turb[impurityPosition, 0, 1:] + Mt = tgyro.Mt_sim_turb[0, 1:] + Pexch = tgyro.EXe_sim_turb[0, 1:] + + # Determine TGLF standard deviations + if use_tglf_scan_trick is not None: + + # Grab TGLF object + tglfObject = tgyroObject.grab_tglf_objects(fromlabel=label, subfolder = 'tglf_explorations') + + # Run TGLF scan trick + Flux_base, Flux_mean, Flux_std = tglf_scan_trick( + tglfObject, + RadiisToRun, + ProfilesPredicted, + impurityPosition=impurityPosition, + includeFast=includeFast, + delta = use_tglf_scan_trick, + cold_start=cold_start, + extra_name=extra_name, + cores_per_tglf_instance=cores_per_tglf_instance + ) + + Qe, Qi, Ge, GZ, Mt, Pexch = Flux_mean + QeE, QiE, GeE, GZE, MtE, PexchE = Flux_std + + # ---------------------------------------------------- + # Do a check that TGLF scans are consistent with TGYRO + + Qe_base, Qi_base, Ge_base, GZ_base, Mt_base, S_base = Flux_base + + # Grab fluxes from TGYRO + Qe_tgyro = tgyro.Qe_sim_turb[0, 1:] + Qi_tgyro = tgyro.QiIons_sim_turb[0, 1:] if includeFast else tgyro.QiIons_sim_turb_thr[0, 1:] + Ge_tgyro = tgyro.Ge_sim_turb[0, 1:] + GZ_tgyro = tgyro.Gi_sim_turb[impurityPosition, 0, 1:] + Mt_tgyro = tgyro.Mt_sim_turb[0, 1:] + Pexch_tgyro = tgyro.EXe_sim_turb[0, 1:] + + Qe_err = np.abs( (Qe_base - Qe_tgyro) / Qe_tgyro ) if 'te' in ProfilesPredicted else np.zeros_like(Qe_base) + Qi_err = np.abs( (Qi_base - Qi_tgyro) / Qi_tgyro ) if 'ti' in ProfilesPredicted else np.zeros_like(Qi_base) + Ge_err = np.abs( (Ge_base - Ge_tgyro) / Ge_tgyro ) if 'ne' in ProfilesPredicted else np.zeros_like(Ge_base) + GZ_err = np.abs( (GZ_base - GZ_tgyro) / GZ_tgyro ) if 'nZ' in ProfilesPredicted else np.zeros_like(GZ_base) + Mt_err = np.abs( (Mt_base - Mt_tgyro) / Mt_tgyro ) if 'w0' in ProfilesPredicted else np.zeros_like(Mt_base) + Pexch_err = np.abs( (Pexch - Pexch_tgyro) / Pexch_tgyro ) if provideTurbulentExchange else np.zeros_like(Pexch) + + F_err = np.concatenate((Qe_err, Qi_err, Ge_err, GZ_err, Mt_err, Pexch_err)) + if F_err.max() > check_coincidence_thr: + print(f"\t- TGLF scans are not consistent with TGYRO, maximum error = {F_err.max()*100:.2f}%, in quantity:",typeMsg="w") + if ('te' in ProfilesPredicted) and Qe_err.max() > check_coincidence_thr: + print('\t\t* Qe:',Qe_err) + if ('ti' in ProfilesPredicted) and Qi_err.max() > check_coincidence_thr: + print('\t\t* Qi:',Qi_err) + if ('ne' in ProfilesPredicted) and Ge_err.max() > check_coincidence_thr: + print('\t\t* Ge:',Ge_err) + if ('nZ' in ProfilesPredicted) and GZ_err.max() > check_coincidence_thr: + print('\t\t* GZ:',GZ_err) + if ('w0' in ProfilesPredicted) and Mt_err.max() > check_coincidence_thr: + print('\t\t* Mt:',Mt_err) + if provideTurbulentExchange and Pexch_err.max() > check_coincidence_thr: + print('\t\t* Pexch:',Pexch_err) + else: + print(f"\t- TGLF scans are consistent with TGYRO, maximum error = {F_err.max()*100:.2f}%") + # ---------------------------------------------------- + + min_relative_error = 0.01 # To avoid problems with gpytorch, 1% error minimum + + QeE = QeE.clip(abs(Qe)*min_relative_error) + QiE = QiE.clip(abs(Qi)*min_relative_error) + GeE = GeE.clip(abs(Ge)*min_relative_error) + GZE = GZE.clip(abs(GZ)*min_relative_error) + MtE = MtE.clip(abs(Mt)*min_relative_error) + PexchE = PexchE.clip(abs(Pexch)*min_relative_error) + + else: + + # -------------------------------------------------------------- + # If simply a percentage error provided + # -------------------------------------------------------------- + + relativeErrorTGLF = [percentError[0] / 100.0]*len(RadiisToRun) + + QeE = abs(Qe) * relativeErrorTGLF + QiE = abs(Qi) * relativeErrorTGLF + GeE = abs(Ge) * relativeErrorTGLF + GZE = abs(GZ) * relativeErrorTGLF + MtE = abs(Mt) * relativeErrorTGLF + PexchE = abs(Pexch) * relativeErrorTGLF + + # ************************************************************************************************************************** + # Neo + # ************************************************************************************************************************** + + Qe_tr_neo = tgyro.Qe_sim_neo[0, 1:] + if includeFast: + Qi_tr_neo = tgyro.QiIons_sim_neo[0, 1:] + else: + Qi_tr_neo = tgyro.QiIons_sim_neo_thr[0, 1:] + Ge_tr_neo = tgyro.Ge_sim_neo[0, 1:] + GZ_tr_neo = tgyro.Gi_sim_neo[impurityPosition, 0, 1:] + Mt_tr_neo = tgyro.Mt_sim_neo[0, 1:] + + Qe_tr_neoE = abs(tgyro.Qe_sim_neo[0, 1:]) * relativeErrorNEO + if includeFast: + Qi_tr_neoE = abs(tgyro.QiIons_sim_neo[0, 1:]) * relativeErrorNEO + else: + Qi_tr_neoE = abs(tgyro.QiIons_sim_neo_thr[0, 1:]) * relativeErrorNEO + Ge_tr_neoE = abs(tgyro.Ge_sim_neo[0, 1:]) * relativeErrorNEO + GZ_tr_neoE = abs(tgyro.Gi_sim_neo[impurityPosition, 0, 1:]) * relativeErrorNEO + Mt_tr_neoE = abs(tgyro.Mt_sim_neo[0, 1:]) * relativeErrorNEO + + # Merge + + modifyFLUX( + tgyro, + folder, + Qe, + Qi, + Ge, + GZ, + Mt, + Pexch, + Qe_tr_neo=Qe_tr_neo, + Qi_tr_neo=Qi_tr_neo, + Ge_tr_neo=Ge_tr_neo, + GZ_tr_neo=GZ_tr_neo, + Mt_tr_neo=Mt_tr_neo, + impurityPosition=impurityPosition, + ) + + modifyFLUX( + tgyro, + folder, + QeE, + QiE, + GeE, + GZE, + MtE, + PexchE, + Qe_tr_neo=Qe_tr_neoE, + Qi_tr_neo=Qi_tr_neoE, + Ge_tr_neo=Ge_tr_neoE, + GZ_tr_neo=GZ_tr_neoE, + Mt_tr_neo=Mt_tr_neoE, + impurityPosition=impurityPosition, + special_label="_stds", + ) + + # ************************************************************************************************************************** + # Targets + # ************************************************************************************************************************** + + QeTargetE = abs(tgyro.Qe_tar[0, 1:]) * relativeErrorTAR + QiTargetE = abs(tgyro.Qi_tar[0, 1:]) * relativeErrorTAR + GeTargetE = abs(tgyro.Ge_tar[0, 1:]) * relativeErrorTAR + GZTargetE = GeTargetE * 0.0 + MtTargetE = abs(tgyro.Mt_tar[0, 1:]) * relativeErrorTAR + + modifyEVO( + tgyro, + folder, + QeTargetE * 0.0, + QiTargetE * 0.0, + GeTargetE * 0.0, + GZTargetE * 0.0, + MtTargetE * 0.0, + impurityPosition=impurityPosition, + positionMod=1, + special_label="_stds", + ) + modifyEVO( + tgyro, + folder, + QeTargetE, + QiTargetE, + GeTargetE, + GZTargetE, + MtTargetE, + impurityPosition=impurityPosition, + positionMod=2, + special_label="_stds", + ) + +def dummyCDF(GeneralFolder, FolderEvaluation): + """ + This routine creates path to a dummy CDF file in FolderEvaluation, with the name "simulation_evaluation.CDF" + + GeneralFolder, e.g. ~/runs_portals/run10/ + FolderEvaluation, e.g. ~/runs_portals/run10000/Execution/Evaluation.0/model_complete/ + """ + + # ------- Name construction for scratch folders in parallel ---------------- + + GeneralFolder = IOtools.expandPath(GeneralFolder, ensurePathValid=True) + + a, subname = IOtools.reducePathLevel(GeneralFolder, level=1, isItFile=False) + + FolderEvaluation = IOtools.expandPath(FolderEvaluation) + + name = FolderEvaluation.name.split(".")[-1] # 0 (evaluation #) + + if name == "": + name = "0" + + cdf = FolderEvaluation / f"{subname}_ev{name}.CDF" + + return cdf + +def modifyResults( + Qe, + Qi, + Ge, + GZ, + Mt, + Pexch, + QeE, + QiE, + GeE, + GZE, + MtE, + PexchE, + tgyro, + folder_tgyro, + minErrorPercent=5.0, + percent_tr_neo=2.0, + useConvectiveFluxes=False, + Qi_criterion_stable=0.0025, + impurityPosition=3, + OriginalFimp=1.0, +): + """ + All in real units, with dimensions of (rho) from axis to edge + """ + + # If a plasma is very close to stable... do something about error + if minErrorPercent is not None: + ( + Qe_target, + Qi_target, + Ge_target_special, + GZ_target_special, + Mt_target, + ) = defineReferenceFluxes( + tgyro, + useConvectiveFluxes=useConvectiveFluxes, + impurityPosition=impurityPosition, + ) + + Qe_min = Qe_target * (minErrorPercent / 100.0) + Qi_min = Qi_target * (minErrorPercent / 100.0) + Ge_min = Ge_target_special * (minErrorPercent / 100.0) + GZ_min = GZ_target_special * (minErrorPercent / 100.0) + Mt_min = Mt_target * (minErrorPercent / 100.0) + + for i in range(Qe.shape[0]): + if Qi[i] < Qi_criterion_stable: + print( + f"\t- Based on 'Qi_criterion_stable', plasma considered stable (Qi = {Qi[i]:.2e} < {Qi_criterion_stable:.2e} MW/m2) at position #{i}, using minimum errors of {minErrorPercent}% of targets", + typeMsg="w", + ) + QeE[i] = Qe_min[i] + print(f"\t\t* QeE = {QeE[i]}") + QiE[i] = Qi_min[i] + print(f"\t\t* QiE = {QiE[i]}") + GeE[i] = Ge_min[i] + print(f"\t\t* GeE = {GeE[i]}") + GZE[i] = GZ_min[i] + print(f"\t\t* GZE = {GZE[i]}") + MtE[i] = Mt_min[i] + print(f"\t\t* MtE = {MtE[i]}") + + # Heat fluxes + QeTot = Qe + tgyro.Qe_sim_neo[0, 1:] + QiTot = Qi + tgyro.QiIons_sim_neo_thr[0, 1:] + + # Particle fluxes + PeTot = Ge + tgyro.Ge_sim_neo[0, 1:] + PZTot = GZ + tgyro.Gi_sim_neo[impurityPosition, 0, 1:] + + # Momentum fluxes + MtTot = Mt + tgyro.Mt_sim_neo[0, 1:] + + # ************************************************************************************ + # **** Modify complete folder (Division of ion fluxes will be wrong, since I put everything in first ion) + # ************************************************************************************ + + # 1. Modify out.tgyro.evo files (which contain turb+neo summed together) + + print(f"\t- Modifying TGYRO out.tgyro.evo files in {IOtools.clipstr(folder_tgyro)}") + modifyEVO( + tgyro, + folder_tgyro, + QeTot, + QiTot, + PeTot, + PZTot, + MtTot, + impurityPosition=impurityPosition, + ) + + # 2. Modify out.tgyro.flux files (which contain turb and neo separated) + + print(f"\t- Modifying TGYRO out.tgyro.flux files in {folder_tgyro}") + modifyFLUX( + tgyro, + folder_tgyro, + Qe, + Qi, + Ge, + GZ, + Mt, + Pexch, + impurityPosition=impurityPosition, + ) + + # 3. Modify files for errors + + print(f"\t- Modifying TGYRO out.tgyro.flux_stds in {folder_tgyro}") + modifyFLUX( + tgyro, + folder_tgyro, + QeE, + QiE, + GeE, + GZE, + MtE, + PexchE, + impurityPosition=impurityPosition, + special_label="_stds", + ) + + +def modifyEVO( + tgyro, + folder, + QeT, + QiT, + GeT, + GZT, + MtT, + impurityPosition=3, + positionMod=1, + special_label=None, +): + QeTGB = QeT / tgyro.Q_GB[-1, 1:] + QiTGB = QiT / tgyro.Q_GB[-1, 1:] + GeTGB = GeT / tgyro.Gamma_GB[-1, 1:] + GZTGB = GZT / tgyro.Gamma_GB[-1, 1:] + MtTGB = MtT / tgyro.Pi_GB[-1, 1:] + + modTGYROfile(folder / "out.tgyro.evo_te", QeTGB, pos=positionMod, fileN_suffix=special_label) + modTGYROfile(folder / "out.tgyro.evo_ti", QiTGB, pos=positionMod, fileN_suffix=special_label) + modTGYROfile(folder / "out.tgyro.evo_ne", GeTGB, pos=positionMod, fileN_suffix=special_label) + modTGYROfile(folder / "out.tgyro.evo_er", MtTGB, pos=positionMod, fileN_suffix=special_label) + + for i in range(tgyro.Qi_sim_turb.shape[0]): + if i == impurityPosition: + var = GZTGB + else: + var = GZTGB * 0.0 + modTGYROfile( + folder / f"out.tgyro.evo_n{i+1}", + var, + pos=positionMod, + fileN_suffix=special_label, + ) + + +def modifyFLUX( + tgyro, + folder, + Qe, + Qi, + Ge, + GZ, + Mt, + S, + Qe_tr_neo=None, + Qi_tr_neo=None, + Ge_tr_neo=None, + GZ_tr_neo=None, + Mt_tr_neo=None, + impurityPosition=3, + special_label=None, +): + folder = IOtools.expandPath(folder) + + QeGB = Qe / tgyro.Q_GB[-1, 1:] + QiGB = Qi / tgyro.Q_GB[-1, 1:] + GeGB = Ge / tgyro.Gamma_GB[-1, 1:] + GZGB = GZ / tgyro.Gamma_GB[-1, 1:] + MtGB = Mt / tgyro.Pi_GB[-1, 1:] + SGB = S / tgyro.S_GB[-1, 1:] + + # ****************************************************************************************** + # Electrons + # ****************************************************************************************** + + # Particle flux: Update + + modTGYROfile(folder / "out.tgyro.flux_e", GeGB, pos=2, fileN_suffix=special_label) + if Ge_tr_neo is not None: + GeGB_neo = Ge_tr_neo / tgyro.Gamma_GB[-1, 1:] + modTGYROfile(folder / "out.tgyro.flux_e", GeGB_neo, pos=1, fileN_suffix=special_label) + + # Energy flux: Update + + modTGYROfile(folder / "out.tgyro.flux_e", QeGB, pos=4, fileN_suffix=special_label) + if Qe_tr_neo is not None: + QeGB_neo = Qe_tr_neo / tgyro.Q_GB[-1, 1:] + modTGYROfile(folder / "out.tgyro.flux_e", QeGB_neo, pos=3, fileN_suffix=special_label) + + # Rotation: Remove (it will be sum to the first ion) + + modTGYROfile(folder / "out.tgyro.flux_e", GeGB * 0.0, pos=6, fileN_suffix=special_label) + modTGYROfile(folder / "out.tgyro.flux_e", GeGB * 0.0, pos=5, fileN_suffix=special_label) + + # Energy exchange + + modTGYROfile(folder / "out.tgyro.flux_e", SGB, pos=7, fileN_suffix=special_label) + + # SMW = S # S is MW/m^3 + # modTGYROfile(f'{folder}/out.tgyro.power_e',SMW,pos=8,fileN_suffix=special_label) + # print('\t\t- Modified turbulent energy exchange in out.tgyro.power_e') + + # ****************************************************************************************** + # Ions + # ****************************************************************************************** + + # Energy flux: Update + + modTGYROfile(folder / "out.tgyro.flux_i1", QiGB, pos=4, fileN_suffix=special_label) + + if Qi_tr_neo is not None: + QiGB_neo = Qi_tr_neo / tgyro.Q_GB[-1, 1:] + modTGYROfile(folder / "out.tgyro.flux_i1", QiGB_neo, pos=3, fileN_suffix=special_label) + + # Particle flux: Make ion particle fluxes zero, because I don't want to mistake TGLF with CGYRO when looking at tgyro results + + for i in range(tgyro.Qi_sim_turb.shape[0]): + if tgyro.profiles.Species[i]["S"] == "therm": + var = QiGB * 0.0 + modTGYROfile(folder / f"out.tgyro.flux_i{i+1}",var,pos=2,fileN_suffix=special_label,) # Gi_turb + modTGYROfile(folder / f"out.tgyro.evo_n{i+1}", var, pos=1, fileN_suffix=special_label) # Gi (Gi_sim) + + if i != impurityPosition: + modTGYROfile(folder / f"out.tgyro.flux_i{i+1}",var,pos=1,fileN_suffix=special_label) # Gi_neo + + # Rotation: Update + + modTGYROfile(folder / "out.tgyro.flux_i1", MtGB, pos=6, fileN_suffix=special_label) + + if Mt_tr_neo is not None: + MtGB_neo = Mt_tr_neo / tgyro.Pi_GB[-1, 1:] + modTGYROfile(folder / "out.tgyro.flux_i1", MtGB_neo, pos=5, fileN_suffix=special_label) + + # Energy exchange: Remove (it will be the electrons one) + + modTGYROfile(folder / "out.tgyro.flux_i1", SGB * 0.0, pos=7, fileN_suffix=special_label) + + # ****************************************************************************************** + # Impurities + # ****************************************************************************************** + + # Remove everything from all the rest of non-first ions (except the particles for the impurity chosen) + + for i in range(tgyro.Qi_sim_turb.shape[0] - 1): + if tgyro.profiles.Species[i + 1]["S"] == "therm": + var = QiGB * 0.0 + for pos in [3, 4, 5, 6, 7]: + modTGYROfile(folder / f"out.tgyro.flux_i{i+2}",var,pos=pos,fileN_suffix=special_label) + for pos in [1, 2]: + if i + 2 != impurityPosition: + modTGYROfile(folder / f"out.tgyro.flux_i{i+2}",var,pos=pos,fileN_suffix=special_label) + + modTGYROfile(folder / f"out.tgyro.flux_i{impurityPosition+1}",GZGB,pos=2,fileN_suffix=special_label) + if GZ_tr_neo is not None: + GZGB_neo = GZ_tr_neo / tgyro.Gamma_GB[-1, 1:] + modTGYROfile(folder / f"out.tgyro.flux_i{impurityPosition+1}",GZGB_neo,pos=1,fileN_suffix=special_label) + + +def modTGYROfile(file, var, pos=0, fileN_suffix=None): + fileN = file if fileN_suffix is None else file.parent / f"{file.name}{fileN_suffix}" + + if not fileN.exists(): + shutil.copy2(file, fileN) + + with open(fileN, "r") as f: + lines = f.readlines() + + with open(fileN, "w") as f: + f.write(lines[0]) + f.write(lines[1]) + f.write(lines[2]) + for i in range(var.shape[0]): + new_s = [float(k) for k in lines[3 + i].split()] + new_s[pos] = var[i] + + line_new = " " + for k in range(len(new_s)): + line_new += f'{"" if k==0 else " "}{new_s[k]:.6e}' + f.write(line_new + "\n") + +def defineReferenceFluxes( + tgyro, factor_tauptauE=5, useConvectiveFluxes=False, impurityPosition=3 +): + Qe_target = abs(tgyro.Qe_tar[0, 1:]) + Qi_target = abs(tgyro.Qi_tar[0, 1:]) + Mt_target = abs(tgyro.Mt_tar[0, 1:]) + + # For particle fluxes, since the targets are often zero... it's more complicated + QeMW_target = abs(tgyro.Qe_tarMW[0, 1:]) + QiMW_target = abs(tgyro.Qi_tarMW[0, 1:]) + We, Wi, Ne, NZ = tgyro.profiles.deriveContentByVolumes( + rhos=tgyro.rho[0, 1:], impurityPosition=impurityPosition + ) + + tau_special = ( + (We + Wi) / (QeMW_target + QiMW_target) * factor_tauptauE + ) # tau_p in seconds + Ge_target_special = (Ne / tau_special) / tgyro.dvoldr[0, 1:] # (1E20/seconds/m^2) + + if useConvectiveFluxes: + Ge_target_special = PLASMAtools.convective_flux( + tgyro.Te[0, 1:], Ge_target_special + ) # (1E20/seconds/m^2) + + GZ_target_special = Ge_target_special * NZ / Ne + + return Qe_target, Qi_target, Ge_target_special, GZ_target_special, Mt_target + + + +# ------------------------------------------------------------------------------------------------------------------------------------------------------ +# This is where the definitions for the summation variables happen for mitim and PORTALSplot +# ------------------------------------------------------------------------------------------------------------------------------------------------------ + +def tgyro_to_powerstate(TGYROresults, + powerstate, + useConvectiveFluxes=False, + forceZeroParticleFlux=False, + includeFast=False, + impurityPosition=1, + UseFineGridTargets=False, + OriginalFimp=1.0, + provideTurbulentExchange=False, + provideTargets=False + ): + """ + This function is used to extract the TGYRO results and store them in the powerstate object, from numpy arrays to torch tensors. + """ + + if "tgyro_stds" not in TGYROresults.__dict__: + TGYROresults.tgyro_stds = False + + if UseFineGridTargets: + TGYROresults.useFineGridTargets(impurityPosition=impurityPosition) + + nr = powerstate.plasma['rho'].shape[-1] + if powerstate.plasma['rho'].shape[-1] != TGYROresults.rho.shape[-1]: + print('\t- TGYRO was run with an extra point in the grid, treating it carefully now') + + # ********************************** + # *********** Electron Energy Fluxes + # ********************************** + + powerstate.plasma["QeMWm2_tr_turb"] = torch.Tensor(TGYROresults.Qe_sim_turb[:, :nr]).to(powerstate.dfT) + powerstate.plasma["QeMWm2_tr_neo"] = torch.Tensor(TGYROresults.Qe_sim_neo[:, :nr]).to(powerstate.dfT) + + powerstate.plasma["QeMWm2_tr_turb_stds"] = torch.Tensor(TGYROresults.Qe_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + powerstate.plasma["QeMWm2_tr_neo_stds"] = torch.Tensor(TGYROresults.Qe_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + if provideTargets: + powerstate.plasma["QeMWm2"] = torch.Tensor(TGYROresults.Qe_tar[:, :nr]).to(powerstate.dfT) + powerstate.plasma["QeMWm2_stds"] = torch.Tensor(TGYROresults.Qe_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + # ********************************** + # *********** Ion Energy Fluxes + # ********************************** + + if includeFast: + + powerstate.plasma["QiMWm2_tr_turb"] = torch.Tensor(TGYROresults.QiIons_sim_turb[:, :nr]).to(powerstate.dfT) + powerstate.plasma["QiMWm2_tr_neo"] = torch.Tensor(TGYROresults.QiIons_sim_neo[:, :nr]).to(powerstate.dfT) + + powerstate.plasma["QiMWm2_tr_turb_stds"] = torch.Tensor(TGYROresults.QiIons_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + powerstate.plasma["QiMWm2_tr_neo_stds"] = torch.Tensor(TGYROresults.QiIons_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + else: + + powerstate.plasma["QiMWm2_tr_turb"] = torch.Tensor(TGYROresults.QiIons_sim_turb_thr[:, :nr]).to(powerstate.dfT) + powerstate.plasma["QiMWm2_tr_neo"] = torch.Tensor(TGYROresults.QiIons_sim_neo_thr[:, :nr]).to(powerstate.dfT) + + powerstate.plasma["QiMWm2_tr_turb_stds"] = torch.Tensor(TGYROresults.QiIons_sim_turb_thr_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + powerstate.plasma["QiMWm2_tr_neo_stds"] = torch.Tensor(TGYROresults.QiIons_sim_neo_thr_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + if provideTargets: + powerstate.plasma["QiMWm2"] = torch.Tensor(TGYROresults.Qi_tar[:, :nr]).to(powerstate.dfT) + powerstate.plasma["QiMWm2_stds"] = torch.Tensor(TGYROresults.Qi_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + # ********************************** + # *********** Momentum Fluxes + # ********************************** + + powerstate.plasma["MtJm2_tr_turb"] = torch.Tensor(TGYROresults.Mt_sim_turb[:, :nr]).to(powerstate.dfT) # So far, let's include fast in momentum + powerstate.plasma["MtJm2_tr_neo"] = torch.Tensor(TGYROresults.Mt_sim_neo[:, :nr]).to(powerstate.dfT) + + powerstate.plasma["MtJm2_tr_turb_stds"] = torch.Tensor(TGYROresults.Mt_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + powerstate.plasma["MtJm2_tr_neo_stds"] = torch.Tensor(TGYROresults.Mt_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + if provideTargets: + powerstate.plasma["MtJm2"] = torch.Tensor(TGYROresults.Mt_tar[:, :nr]).to(powerstate.dfT) + powerstate.plasma["MtJm2_stds"] = torch.Tensor(TGYROresults.Mt_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + # ********************************** + # *********** Particle Fluxes + # ********************************** + + # Store raw fluxes for better plotting later + powerstate.plasma["Ge1E20sm2_tr_turb"] = torch.Tensor(TGYROresults.Ge_sim_turb[:, :nr]).to(powerstate.dfT) + powerstate.plasma["Ge1E20sm2_tr_neo"] = torch.Tensor(TGYROresults.Ge_sim_neo[:, :nr]).to(powerstate.dfT) + + powerstate.plasma["Ge1E20sm2_tr_turb_stds"] = torch.Tensor(TGYROresults.Ge_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + powerstate.plasma["Ge1E20sm2_tr_neo_stds"] = torch.Tensor(TGYROresults.Ge_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + if provideTargets: + powerstate.plasma["Ge1E20sm2"] = torch.Tensor(TGYROresults.Ge_tar[:, :nr]).to(powerstate.dfT) + powerstate.plasma["Ge1E20sm2_stds"] = torch.Tensor(TGYROresults.Ge_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + if not useConvectiveFluxes: + + powerstate.plasma["Ce_tr_turb"] = powerstate.plasma["Ge1E20sm2_tr_turb"] + powerstate.plasma["Ce_tr_neo"] = powerstate.plasma["Ge1E20sm2_tr_neo"] + + powerstate.plasma["Ce_tr_turb_stds"] = powerstate.plasma["Ge1E20sm2_tr_turb_stds"] + powerstate.plasma["Ce_tr_neo_stds"] = powerstate.plasma["Ge1E20sm2_tr_neo_stds"] + + if provideTargets: + powerstate.plasma["Ce"] = powerstate.plasma["Ge1E20sm2"] + powerstate.plasma["Ce_stds"] = powerstate.plasma["Ge1E20sm2_stds"] + + else: + + powerstate.plasma["Ce_tr_turb"] = torch.Tensor(TGYROresults.Ce_sim_turb[:, :nr]).to(powerstate.dfT) + powerstate.plasma["Ce_tr_neo"] = torch.Tensor(TGYROresults.Ce_sim_neo[:, :nr]).to(powerstate.dfT) + + powerstate.plasma["Ce_tr_turb_stds"] = torch.Tensor(TGYROresults.Ce_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + powerstate.plasma["Ce_tr_neo_stds"] = torch.Tensor(TGYROresults.Ce_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + if provideTargets: + powerstate.plasma["Ce"] = torch.Tensor(TGYROresults.Ce_tar[:, :nr]).to(powerstate.dfT) + powerstate.plasma["Ce_stds"] = torch.Tensor(TGYROresults.Ce_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + # ********************************** + # *********** Impurity Fluxes + # ********************************** + + # Store raw fluxes for better plotting later + powerstate.plasma["CZ_raw_tr_turb"] = torch.Tensor(TGYROresults.Gi_sim_turb[impurityPosition, :, :nr]).to(powerstate.dfT) + powerstate.plasma["CZ_raw_tr_neo"] = torch.Tensor(TGYROresults.Gi_sim_neo[impurityPosition, :, :nr]).to(powerstate.dfT) + + powerstate.plasma["CZ_raw_tr_turb_stds"] = torch.Tensor(TGYROresults.Gi_sim_turb_stds[impurityPosition, :, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + powerstate.plasma["CZ_raw_tr_neo_stds"] = torch.Tensor(TGYROresults.Gi_sim_neo_stds[impurityPosition, :, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + if provideTargets: + powerstate.plasma["CZ_raw"] = torch.Tensor(TGYROresults.Gi_tar[impurityPosition, :, :nr]).to(powerstate.dfT) + powerstate.plasma["CZ_raw_stds"] = torch.Tensor(TGYROresults.Gi_tar_stds[impurityPosition, :, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + + if not useConvectiveFluxes: + + powerstate.plasma["CZ_tr_turb"] = powerstate.plasma["CZ_raw_tr_turb"] / OriginalFimp + powerstate.plasma["CZ_tr_neo"] = powerstate.plasma["CZ_raw_tr_neo"] / OriginalFimp + + powerstate.plasma["CZ_tr_turb_stds"] = powerstate.plasma["CZ_raw_tr_turb_stds"] / OriginalFimp if TGYROresults.tgyro_stds else None + powerstate.plasma["CZ_tr_neo_stds"] = powerstate.plasma["CZ_raw_tr_neo_stds"] / OriginalFimp if TGYROresults.tgyro_stds else None + + if provideTargets: + powerstate.plasma["CZ"] = powerstate.plasma["CZ_raw"] / OriginalFimp + powerstate.plasma["CZ_stds"] = powerstate.plasma["CZ_raw_stds"] / OriginalFimp if TGYROresults.tgyro_stds else None + + else: + + powerstate.plasma["CZ_tr_turb"] = torch.Tensor(TGYROresults.Ci_sim_turb[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp + powerstate.plasma["CZ_tr_neo"] = torch.Tensor(TGYROresults.Ci_sim_neo[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp + + powerstate.plasma["CZ_tr_turb_stds"] = torch.Tensor(TGYROresults.Ci_sim_turb_stds[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp if TGYROresults.tgyro_stds else None + powerstate.plasma["CZ_tr_neo_stds"] = torch.Tensor(TGYROresults.Ci_sim_neo_stds[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp if TGYROresults.tgyro_stds else None + + if provideTargets: + powerstate.plasma["CZ"] = torch.Tensor(TGYROresults.Ci_tar[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp + powerstate.plasma["CZ_stds"] = torch.Tensor(TGYROresults.Ci_tar_stds[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp if TGYROresults.tgyro_stds else None + + # ********************************** + # *********** Energy Exchange + # ********************************** + + if provideTurbulentExchange: + powerstate.plasma["PexchTurb"] = torch.Tensor(TGYROresults.EXe_sim_turb[:, :nr]).to(powerstate.dfT) + powerstate.plasma["PexchTurb_stds"] = torch.Tensor(TGYROresults.EXe_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None + else: + powerstate.plasma["PexchTurb"] = powerstate.plasma["QeMWm2_tr_turb"] * 0.0 + powerstate.plasma["PexchTurb_stds"] = powerstate.plasma["QeMWm2_tr_turb"] * 0.0 + + # ********************************** + # *********** Traget extra + # ********************************** + + if forceZeroParticleFlux and provideTargets: + powerstate.plasma["Ce"] = powerstate.plasma["Ce"] * 0.0 + powerstate.plasma["Ce_stds"] = powerstate.plasma["Ce_stds"] * 0.0 + + # ------------------------------------------------------------------------------------------------------------------------ + # Sum here turbulence and neoclassical, after modifications + # ------------------------------------------------------------------------------------------------------------------------ + + quantities = ['QeMWm2', 'QiMWm2', 'Ce', 'CZ', 'MtJm2', 'Ge1E20sm2', 'CZ_raw'] + for ikey in quantities: + powerstate.plasma[ikey+"_tr"] = powerstate.plasma[ikey+"_tr_turb"] + powerstate.plasma[ikey+"_tr_neo"] + + return powerstate + + diff --git a/src/mitim_modules/powertorch/scripts/calculateTargets.py b/src/mitim_modules/powertorch/scripts/calculateTargets.py index 9b478dc0..26dcf732 100644 --- a/src/mitim_modules/powertorch/scripts/calculateTargets.py +++ b/src/mitim_modules/powertorch/scripts/calculateTargets.py @@ -7,7 +7,7 @@ from mitim_tools.misc_tools import IOtools from mitim_tools.gacode_tools import PROFILEStools from mitim_modules.powertorch import STATEtools -from mitim_modules.powertorch.physics import TRANSPORTtools,TARGETStools +from mitim_modules.powertorch.physics_models import targets_analytic, transport_tgyro from IPython import embed def calculator( @@ -33,13 +33,13 @@ def calculator( 'fineTargetsResolution': fineTargetsResolution, }, TargetOptions={ - "targets_evaluator": TARGETStools.analytical_model, + "targets_evaluator": targets_analytic.analytical_model, "ModelOptions": { "TypeTarget": TypeTarget, "TargetCalc": "tgyro"}, }, TransportOptions={ - "transport_evaluator": TRANSPORTtools.tgyro_model, + "transport_evaluator": transport_tgyro.tgyro_model, "ModelOptions": { "cold_start": cold_start, "launchSlurm": True, @@ -59,7 +59,7 @@ def calculator( "ni_thermals": True, "recompute_ptot": False, }, - "transport_model": {"turbulence": 'TGLF',"TGLFsettings": 5, "extraOptionsTGLF": {}}, + "transport_model": {"TGLFsettings": 5, "extraOptionsTGLF": {}}, }, "includeFastInQi": False, }, @@ -75,7 +75,7 @@ def calculator( 'fineTargetsResolution': fineTargetsResolution, }, TargetOptions={ - "targets_evaluator": TARGETStools.analytical_model, + "targets_evaluator": targets_analytic.analytical_model, "ModelOptions": { "TypeTarget": TypeTarget, "TargetCalc": "powerstate"}, @@ -112,7 +112,7 @@ def calculator( p.profiles.deriveQuantities() - p.to_gacode( + p.from_powerstate( write_input_gacode=folder / "input.gacode.new.powerstate", position_in_powerstate_batch=0, postprocess_input_gacode={ @@ -126,7 +126,7 @@ def calculator( rederive_profiles=False, ) - p.plasma["Pin"] = ( + p.plasma["QiMWm2n"] = ( (p.plasma["Paux_e"] + p.plasma["Paux_i"]) * p.plasma["volp"] )[..., -1] p.plasma["Q"] = p.plasma["Pfus"] / p.plasma["Pin"] diff --git a/src/mitim_modules/powertorch/scripts/compareRadialResolution.py b/src/mitim_modules/powertorch/scripts/compareRadialResolution.py index db600e89..bbdeea21 100644 --- a/src/mitim_modules/powertorch/scripts/compareRadialResolution.py +++ b/src/mitim_modules/powertorch/scripts/compareRadialResolution.py @@ -151,7 +151,7 @@ ax.legend() ax = axs[0, 1] -varsS = ["Pe", "Pi"] +varsS = ["QeMWm2", "QiMWm2"] s, lab = sF, "Fine " for var in varsS: diff --git a/src/mitim_modules/powertorch/scripts/compareWithTGYRO.py b/src/mitim_modules/powertorch/scripts/compareWithTGYRO.py index 41155bab..fe103393 100644 --- a/src/mitim_modules/powertorch/scripts/compareWithTGYRO.py +++ b/src/mitim_modules/powertorch/scripts/compareWithTGYRO.py @@ -154,13 +154,13 @@ ax = axs[0, 2] ax.plot(t.rho[0], t.Qe_tar[0], "s-", lw=0.5, label="TGYRO Pe", markersize=markersize) -P = s.plasma["Pe"] +P = s.plasma["QeMWm2"] ax.plot(s.plasma["rho"][0], P[0], ls, lw=0.5, label="STATE Pe", markersize=markersize) MaxError = np.nanmax(np.abs(t.Qe_tarMW[0] - P[0].cpu().numpy()) / t.Qe_tarMW[0] * 100.0) print(f"{MaxError = :.3f} %") ax.plot(t.rho[0], t.Qi_tar[0], "s-", lw=0.5, label="TGYRO Pi", markersize=markersize) -P = s.plasma["Pi"] +P = s.plasma["QiMWm2"] ax.plot(s.plasma["rho"][0], P[0], ls, lw=0.5, label="STATE Pi", markersize=markersize) MaxError = np.nanmax(np.abs(t.Qi_tarMW[0] - P[0].cpu().numpy()) / t.Qi_tarMW[0] * 100.0) print(f"{MaxError = :.3f} %") @@ -178,13 +178,13 @@ ax = axs[1, 2] ax.plot(t.rho[0], t.Qe_tarMW[0], "s-", lw=0.5, label="TGYRO Pe", markersize=markersize) -P = s.plasma["Pe"] * s.plasma["volp"] +P = s.plasma["QeMWm2"] * s.plasma["volp"] ax.plot(s.plasma["rho"][0], P[0], ls, lw=0.5, label="STATE Pe", markersize=markersize) MaxError = np.nanmax(np.abs(t.Qe_tarMW[0] - P[0].cpu().numpy()) / t.Qe_tarMW[0] * 100.0) print(f"{MaxError = :.3f} %") ax.plot(t.rho[0], t.Qi_tarMW[0], "s-", lw=0.5, label="TGYRO Pi", markersize=markersize) -P = s.plasma["Pi"] * s.plasma["volp"] +P = s.plasma["QiMWm2"] * s.plasma["volp"] ax.plot(s.plasma["rho"][0], P[0], ls, lw=0.5, label="STATE Pi", markersize=markersize) MaxError = np.nanmax(np.abs(t.Qi_tarMW[0] - P[0].cpu().numpy()) / t.Qi_tarMW[0] * 100.0) print(f"{MaxError = :.3f} %") diff --git a/src/mitim_modules/powertorch/physics/CALCtools.py b/src/mitim_modules/powertorch/utils/CALCtools.py similarity index 100% rename from src/mitim_modules/powertorch/physics/CALCtools.py rename to src/mitim_modules/powertorch/utils/CALCtools.py diff --git a/src/mitim_modules/powertorch/utils/POWERplot.py b/src/mitim_modules/powertorch/utils/POWERplot.py index 51bbe449..436c3c6c 100644 --- a/src/mitim_modules/powertorch/utils/POWERplot.py +++ b/src/mitim_modules/powertorch/utils/POWERplot.py @@ -12,7 +12,7 @@ def plot(self, axs, axsRes, figs=None, c="r", label="powerstate",batch_num=0, co if figs is not None: # Insert profiles with the latest powerstate - profiles_new = self.to_gacode(insert_highres_powers=True) + profiles_new = self.from_powerstate(insert_highres_powers=True) # Plot the inserted profiles together with the original ones _ = PROFILEStools.plotAll([self.profiles, profiles_new], figs=figs) @@ -25,20 +25,20 @@ def plot(self, axs, axsRes, figs=None, c="r", label="powerstate",batch_num=0, co if "te" in self.ProfilesPredicted: set_plots.append( - [ 'te', 'aLte', 'Pe_tr', 'Pe', + [ 'te', 'aLte', 'QeMWm2_tr', 'QeMWm2', 'Electron Temperature','$T_e$ (keV)','$a/LT_e$','$Q_e$ (GB)','$Q_e$ ($MW/m^2$)', 1.0,"Qgb"]) if "ti" in self.ProfilesPredicted: set_plots.append( - [ 'ti', 'aLti', 'Pi_tr', 'Pi', + [ 'ti', 'aLti', 'QiMWm2_tr', 'QiMWm2', 'Ion Temperature','$T_i$ (keV)','$a/LT_i$','$Q_i$ (GB)','$Q_i$ ($MW/m^2$)', 1.0,"Qgb"]) if "ne" in self.ProfilesPredicted: # If this model provides the raw particle flux, go for it - if 'Ce_raw_tr' in self.plasma: + if 'Ge1E20sm2_tr' in self.plasma: set_plots.append( - [ 'ne', 'aLne', 'Ce_raw_tr', 'Ce_raw', + [ 'ne', 'aLne', 'Ge1E20sm2_tr', 'Ge1E20sm2', 'Electron Density','$n_e$ ($10^{20}m^{-3}$)','$a/Ln_e$','$\\Gamma_e$ (GB)','$\\Gamma_e$ ($10^{20}m^{-3}/s$)', 1E-1,"Ggb"]) else: @@ -75,7 +75,7 @@ def plot(self, axs, axsRes, figs=None, c="r", label="powerstate",batch_num=0, co if "w0" in self.ProfilesPredicted: set_plots.append( - [ 'w0', 'aLw0', 'Mt_tr', 'Mt', + [ 'w0', 'aLw0', 'MtJm2_tr', 'MtJm2', 'Rotation','$\\omega_0$ ($krad/s$)','$-d\\omega_0/dr$ ($krad/s/cm$)','$\\Pi$ (GB)','$\\Pi$ ($J/m^2$)', 1E-3,"Pgb"]) @@ -123,7 +123,7 @@ def plot(self, axs, axsRes, figs=None, c="r", label="powerstate",batch_num=0, co position_in_batch = i * ( self.plasma['rho'].shape[-1] -1 ) + j - ax.plot(self.FluxMatch_Xopt[:,position_in_batch], "-o", color=colors[j], lw=1.0, label = f"r/a = {self.plasma['roa'][batch_num,j]:.2f}",markersize=0.5) + ax.plot(self.FluxMatch_Xopt[:,position_in_batch], "-o", color=colors[j], lw=1.0, label = f"r/a = {self.plasma['roa'][batch_num,j+1]:.2f}",markersize=0.5) if self.bounds_current is not None: for u in [0,1]: ax.axhline(y=self.bounds_current[u,position_in_batch], color=colors[j], linestyle='-.', lw=0.2) diff --git a/src/mitim_modules/powertorch/utils/TARGETStools.py b/src/mitim_modules/powertorch/utils/TARGETStools.py new file mode 100644 index 00000000..1ae63c80 --- /dev/null +++ b/src/mitim_modules/powertorch/utils/TARGETStools.py @@ -0,0 +1,153 @@ +import torch +from mitim_tools.misc_tools import PLASMAtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class power_targets: + ''' + Default class for power target models, change "evaluate" method to implement a new model + ''' + + def evaluate(self): + print("No model implemented for power targets", typeMsg="w") + + def __init__(self,powerstate): + self.powerstate = powerstate + + # Make sub-targets equal to zero + variables_to_zero = ["qfuse", "qfusi", "qie", "qrad", "qrad_bremms", "qrad_line", "qrad_sync"] + for i in variables_to_zero: + self.powerstate.plasma[i] = self.powerstate.plasma["te"] * 0.0 + + def fine_grid(self): + + """ + Make all quantities needed on the fine resolution + ------------------------------------------------- + In the powerstate creation, the plasma variables are stored in two different resolutions, one for the coarse grid and one for the fine grid, + if the option is activated. + + Here, at calculation stage I use some precalculated quantities in the fine grid and then integrate the gradients into that resolution + + Note that the set ['te','ti','ne','nZ','w0','ni'] will automatically be substituted during the update_var() that comes next, so + it's ok that I lose the torch leaf here. However, I must do this copy here because if any of those variables are not updated in + update_var() then it would fail. But first store them for later use. + """ + + self.plasma_original = {} + + # Bring to fine grid + variables_to_fine = ["B_unit", "B_ref", "volp", "rmin", "roa", "rho", "ni"] + for variable in variables_to_fine: + self.plasma_original[variable] = self.powerstate.plasma[variable].clone() + self.powerstate.plasma[variable] = self.powerstate.plasma_fine[variable] + + # Bring also the gradients and kinetic variables + for variable in self.powerstate.profile_map.keys(): + + # Kinetic variables (te,ti,ne,nZ,w0,ni) + self.plasma_original[variable] = self.powerstate.plasma[variable].clone() + self.powerstate.plasma[variable] = self.powerstate.plasma_fine[variable] + + # Bring also the gradients that are part of the torch trees, so that the derivative is not lost + self.plasma_original[f'aL{variable}'] = self.powerstate.plasma[f'aL{variable}'].clone() + + # ---------------------------------------------------- + # Integrate through fine profile constructors + # ---------------------------------------------------- + for i in self.powerstate.ProfilesPredicted: + _ = self.powerstate.update_var(i,specific_profile_constructor=self.powerstate.profile_constructors_coarse_middle) + + def flux_integrate(self): + """ + ************************************************************************************************** + Calculate integral of all targets, and then sum aux. + Reason why I do it this convoluted way is to make it faster in mitim, not to run integrateQuadPoly all the time. + Run once for all the batch and also for electrons and ions + (in MW/m^2) + ************************************************************************************************** + """ + + qe = self.powerstate.plasma["te"]*0.0 + qi = self.powerstate.plasma["te"]*0.0 + + if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] >= 2: + qe += -self.powerstate.plasma["qie"] + qi += self.powerstate.plasma["qie"] + + if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 3: + qe += self.powerstate.plasma["qfuse"] - self.powerstate.plasma["qrad"] + qi += self.powerstate.plasma["qfusi"] + + q = torch.cat((qe, qi)).to(qe) + self.P = self.powerstate.volume_integrate(q, force_dim=q.shape[0]) + + def coarse_grid(self): + + # ************************************************************************************************** + # Come back to original grid for targets + # ************************************************************************************************** + + # Interpolate results from fine to coarse (i.e. whole point is that it is better than integrate interpolated values) + if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] >= 2: + for i in ["qie"]: + self.powerstate.plasma[i] = self.powerstate.plasma[i][:, self.powerstate.positions_targets] + + if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 3: + for i in [ + "qfuse", + "qfusi", + "qrad", + "qrad_bremms", + "qrad_line", + "qrad_sync", + ]: + self.powerstate.plasma[i] = self.powerstate.plasma[i][:, self.powerstate.positions_targets] + + self.P = self.P[:, self.powerstate.positions_targets] + + # Recover variables calculated prior to the fine-targets method + for i in self.plasma_original: + self.powerstate.plasma[i] = self.plasma_original[i] + + def postprocessing(self, useConvectiveFluxes=False, forceZeroParticleFlux=False, relative_error_assumed=1.0): + + # ************************************************************************************************** + # Plug-in targets that were fixed + # ************************************************************************************************** + + self.powerstate.plasma["QeMWm2"] = self.powerstate.plasma["QeMWm2_fixedtargets"] + self.P[: self.P.shape[0]//2, :] # MW/m^2 + self.powerstate.plasma["QiMWm2"] = self.powerstate.plasma["QiMWm2_fixedtargets"] + self.P[self.P.shape[0]//2 :, :] # MW/m^2 + self.powerstate.plasma["Ge1E20sm2"] = self.powerstate.plasma["Ge_fixedtargets"] # 1E20/s/m^2 + self.powerstate.plasma["CZ_raw"] = self.powerstate.plasma["GZ_fixedtargets"] # 1E20/s/m^2 + self.powerstate.plasma["MtJm2"] = self.powerstate.plasma["MtJm2_fixedtargets"] # J/m^2 + + if forceZeroParticleFlux: + self.powerstate.plasma["Ge1E20sm2"] = self.powerstate.plasma["Ge1E20sm2"] * 0 + + # Convective fluxes? + if useConvectiveFluxes: + self.powerstate.plasma["Ce"] = PLASMAtools.convective_flux(self.powerstate.plasma["te"], self.powerstate.plasma["Ge1E20sm2"]) # MW/m^2 + self.powerstate.plasma["CZ"] = PLASMAtools.convective_flux(self.powerstate.plasma["te"], self.powerstate.plasma["CZ_raw"]) # MW/m^2 + else: + self.powerstate.plasma["Ce"] = self.powerstate.plasma["Ge1E20sm2"] + self.powerstate.plasma["CZ"] = self.powerstate.plasma["CZ_raw"] + + # ************************************************************************************************** + # Error + # ************************************************************************************************** + + variables_to_error = ["QeMWm2", "QiMWm2", "Ce", "CZ", "MtJm2", "Ge1E20sm2", "CZ_raw"] + + for i in variables_to_error: + self.powerstate.plasma[i + "_stds"] = abs(self.powerstate.plasma[i]) * relative_error_assumed / 100 + + # ************************************************************************************************** + # GB Normalized (Note: This is useful for mitim surrogate variables of targets) + # ************************************************************************************************** + + self.powerstate.plasma["QeGB"] = self.powerstate.plasma["QeMWm2"] / self.powerstate.plasma["Qgb"] + self.powerstate.plasma["QiGB"] = self.powerstate.plasma["QiMWm2"] / self.powerstate.plasma["Qgb"] + self.powerstate.plasma["CeGB"] = self.powerstate.plasma["Ce"] / self.powerstate.plasma["Qgb" if useConvectiveFluxes else "Ggb"] + self.powerstate.plasma["CZGB"] = self.powerstate.plasma["CZ"] / self.powerstate.plasma["Qgb" if useConvectiveFluxes else "Ggb"] + self.powerstate.plasma["MtGB"] = self.powerstate.plasma["MtJm2"] / self.powerstate.plasma["Pgb"] diff --git a/src/mitim_modules/powertorch/utils/TRANSFORMtools.py b/src/mitim_modules/powertorch/utils/TRANSFORMtools.py index 405add08..68c283a8 100644 --- a/src/mitim_modules/powertorch/utils/TRANSFORMtools.py +++ b/src/mitim_modules/powertorch/utils/TRANSFORMtools.py @@ -1,11 +1,11 @@ import copy import torch +from pathlib import Path import numpy as np import pandas as pd -from mitim_modules.powertorch.physics import CALCtools -from mitim_tools.misc_tools import LOGtools +from mitim_tools.misc_tools import LOGtools, IOtools from mitim_tools.gacode_tools import PROFILEStools -from mitim_modules.powertorch.physics import TARGETStools +from mitim_modules.powertorch.physics_models import targets_analytic, parameterizers from mitim_tools.misc_tools.LOGtools import printMsg as print from mitim_tools import __mitimroot__ from IPython import embed @@ -13,7 +13,7 @@ # <> Function to interpolate a curve <> from mitim_tools.misc_tools.MATHtools import extrapolateCubicSpline as interpolation_function -def gacode_to_powerstate(self, input_gacode, rho_vec): +def gacode_to_powerstate(self, rho_vec=None): """ This function converts from the fine input.gacode grid to a powertorch object and grid. Notes: @@ -30,6 +30,10 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): print("\t- Producing powerstate object from input.gacode") + input_gacode = self.profiles + if rho_vec is None: + rho_vec = self.plasma["rho"] + # ********************************************************************************************* # Radial grid # ********************************************************************************************* @@ -105,39 +109,33 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): ).to(rho_vec) # ********************************************************************************************* - quantities_to_interpolate_and_volp = [ - ["Paux_e", "qe_aux_MWmiller"], - ["Paux_i", "qi_aux_MWmiller"], - ["Gaux_e", "ge_10E20miller"], - ["Maux", "mt_Jmiller"], - ] - - for key in quantities_to_interpolate_and_volp: - - # ********************************************************************************************* - # Extract the quantity via interpolation and tensorization - # ********************************************************************************************* - self.plasma[key[0]] = torch.from_numpy( - interpolation_function(rho_vec.cpu(), rho_use, input_gacode.derived[key[1]]) - ).to(rho_vec) / self.plasma["volp"] - # ********************************************************************************************* - - self.plasma["Gaux_Z"] = self.plasma["Gaux_e"] * 0.0 + # ********************************************************************************************* + # Fixed targets + # ********************************************************************************************* quantitites = {} - quantitites["Pe_orig_fusrad"] = input_gacode.derived["qe_fus_MWmiller"] - input_gacode.derived["qrad_MWmiller"] - quantitites["Pi_orig_fusrad"] = input_gacode.derived["qi_fus_MWmiller"] - quantitites["Pe_orig_fusradexch"] = quantitites["Pe_orig_fusrad"] - input_gacode.derived["qe_exc_MWmiller"] - quantitites["Pi_orig_fusradexch"] = quantitites["Pi_orig_fusrad"] + input_gacode.derived["qe_exc_MWmiller"] + quantitites["QeMWm2_fixedtargets"] = input_gacode.derived["qe_aux_MWmiller"] + quantitites["QiMWm2_fixedtargets"] = input_gacode.derived["qi_aux_MWmiller"] + quantitites["Ge_fixedtargets"] = input_gacode.derived["ge_10E20miller"] + quantitites["GZ_fixedtargets"] = input_gacode.derived["ge_10E20miller"] * 0.0 + quantitites["MtJm2_fixedtargets"] = input_gacode.derived["mt_Jmiller"] + + if self.TargetOptions["ModelOptions"]["TypeTarget"] < 3: + # Fusion and radiation fixed if 1,2 + quantitites["QeMWm2_fixedtargets"] += input_gacode.derived["qe_fus_MWmiller"] - input_gacode.derived["qrad_MWmiller"] + quantitites["QiMWm2_fixedtargets"] += input_gacode.derived["qi_fus_MWmiller"] + + if self.TargetOptions["ModelOptions"]["TypeTarget"] < 2: + # Exchange fixed if 1 + quantitites["QeMWm2_fixedtargets"] -= input_gacode.derived["qe_exc_MWmiller"] + quantitites["QiMWm2_fixedtargets"] += input_gacode.derived["qe_exc_MWmiller"] for key in quantitites: # ********************************************************************************************* # Extract the quantity via interpolation and tensorization # ********************************************************************************************* - self.plasma[key] = torch.from_numpy( - interpolation_function(rho_vec.cpu(), rho_use, quantitites[key]) - ).to(rho_vec) / self.plasma["volp"] + self.plasma[key] = torch.from_numpy(interpolation_function(rho_vec.cpu(), rho_use, quantitites[key])).to(rho_vec) / self.plasma["volp"] # ********************************************************************************************* # ********************************************************************************************* @@ -159,9 +157,10 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): self.plasma["kradcm"] = 1e-5 / self.plasma["a"] # ********************************************************************************************* - # Define deparametrizer functions for the varying profiles and gradients from here + # Define profile_constructor functions for the varying profiles and gradients from here # ********************************************************************************************* + # [quantiy in powerstate, quantity in input.gacode, index of the ion, multiplier, parameterize_in_aLx] cases_to_parameterize = [ ["te", "te(keV)", None, 1.0, True], ["ti", "ti(keV)", 0, 1.0, True], @@ -174,16 +173,16 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): for i in range(input_gacode.profiles['ni(10^19/m^3)'].shape[1]): cases_to_parameterize.append([f"ni{i}", "ni(10^19/m^3)", i, 1.0, True]) - self.deparametrizers_fine, self.deparametrizers_coarse, self.deparametrizers_coarse_middle = {}, {}, {} + self.profile_constructors_fine, self.profile_constructors_coarse, self.profile_constructors_coarse_middle = {}, {}, {} for key in cases_to_parameterize: quant = input_gacode.profiles[key[1]] if key[2] is None else input_gacode.profiles[key[1]][:, key[2]] ( aLy_coarse, - self.deparametrizers_fine[key[0]], - self.deparametrizers_coarse[key[0]], - self.deparametrizers_coarse_middle[key[0]], - ) = parameterize_curve( + self.profile_constructors_fine[key[0]], + self.profile_constructors_coarse[key[0]], + self.profile_constructors_coarse_middle[key[0]], + ) = parameterizers.piecewise_linear( input_gacode.derived["roa"], quant, self.plasma["roa"], @@ -200,6 +199,45 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): print(f"\t- All values of {key[0]} detected to be zero, to avoid NaNs, inserting {addT} at the edge",typeMsg="w") self.plasma[f"aL{key[0]}"][..., -1] += addT +def to_gacode( + self, + write_input_gacode=None, + position_in_powerstate_batch=0, + postprocess_input_gacode={}, + insert_highres_powers=False, + rederive_profiles=True, +): + ''' + Notes: + - insert_highres_powers: whether to insert high resolution powers (will calculate them with powerstate targets object, not other custom ones) + ''' + print(">> Inserting powerstate into input.gacode") + + profiles = powerstate_to_gacode( + self, + position_in_powerstate_batch=position_in_powerstate_batch, + postprocess_input_gacode=postprocess_input_gacode, + insert_highres_powers=insert_highres_powers, + rederive=rederive_profiles, + ) + + # Write input.gacode + if write_input_gacode is not None: + write_input_gacode = Path(write_input_gacode) + print(f"\t- Writing input.gacode file: {IOtools.clipstr(write_input_gacode)}") + write_input_gacode.parent.mkdir(parents=True, exist_ok=True) + profiles.writeCurrentStatus(file=write_input_gacode) + + # If corrections modify the ions set... it's better to re-read, otherwise powerstate will be confused + if rederive_profiles: + defineIons(self, profiles, self.plasma["rho"][position_in_powerstate_batch, :], self.dfT) + # Repeat, that's how it's done earlier + self._repeat_tensors(batch_size=self.plasma["rho"].shape[0], + specific_keys=["ni","ions_set_mi","ions_set_Zi","ions_set_Dion","ions_set_Tion","ions_set_c_rad"], + positionToUnrepeat=None) + + return profiles + def powerstate_to_gacode( self, postprocess_input_gacode={}, @@ -244,9 +282,9 @@ def powerstate_to_gacode( print(f"\t- Inserting {key[0]} into input.gacode profiles") # ********************************************************************************************* - # From a/Lx to x via fine deparametrizer + # From a/Lx to x via fine profile_constructor # ********************************************************************************************* - x, y = self.deparametrizers_fine[key[0]]( + x, y = self.profile_constructors_fine[key[0]]( self.plasma["roa"][position_in_powerstate_batch, :], self.plasma[f"aL{key[0]}"][position_in_powerstate_batch, :], ) @@ -327,7 +365,7 @@ def powerstate_to_gacode_powers(self, profiles, position_in_powerstate_batch=0): profiles, EvolutionOptions={"rhoPredicted": rhoy}, TargetOptions={ - "targets_evaluator": TARGETStools.analytical_model, + "targets_evaluator": targets_analytic.analytical_model, "ModelOptions": { "TypeTarget": self.TargetOptions["ModelOptions"]["TypeTarget"], # Important to keep the same as in the original "TargetCalc": "powerstate", @@ -385,7 +423,7 @@ def defineIons(self, input_gacode, rho_vec, dfT): Zi.append(input_gacode.profiles["z"][i]) # Grab chebyshev coefficients from file - data_df = pd.read_csv(__mitimroot__ / "src" / "mitim_modules" / "powertorch" / "physics" / "radiation_chebyshev.csv") + data_df = pd.read_csv(__mitimroot__ / "src" / "mitim_modules" / "powertorch" / "physics_models" / "radiation_chebyshev.csv") try: c = data_df[data_df['Ion'].str.lower()==input_gacode.profiles["name"][i].lower()].to_numpy()[0,2:].astype(float) except IndexError: @@ -410,179 +448,6 @@ def defineIons(self, input_gacode, rho_vec, dfT): self.plasma["ions_set_Tion"] = Tion self.plasma["ions_set_c_rad"] = c_rad -def parameterize_curve( - x_coord, - y_coord_raw, - x_coarse_tensor, - parameterize_in_aLx=True, - multiplier_quantity=1.0, - PreventNegative=False, - ): - """ - Notes: - - x_coarse_tensor must be torch - """ - - # ********************************************************************************************************** - # Define the integrator and derivator functions (based on whether I want to parameterize in aLx or in gradX) - # ********************************************************************************************************** - - if parameterize_in_aLx: - # 1/Lx = -1/X*dX/dr - integrator_function, derivator_function = ( - CALCtools.integrateGradient, - CALCtools.produceGradient, - ) - else: - # -dX/dr - integrator_function, derivator_function = ( - CALCtools.integrateGradient_lin, - CALCtools.produceGradient_lin, - ) - - y_coord = torch.from_numpy(y_coord_raw).to(x_coarse_tensor) * multiplier_quantity - - ygrad_coord = derivator_function( torch.from_numpy(x_coord).to(x_coarse_tensor), y_coord ) - - # ********************************************************************************************************** - # Get control points - # ********************************************************************************************************** - - x_coarse = x_coarse_tensor[1:].cpu().numpy() - - # Clip to zero if I want to prevent negative values - ygrad_coord = ygrad_coord.clip(0) if PreventNegative else ygrad_coord - - """ - Define region to get control points from - ------------------------------------------------------------ - Trick: Addition of extra point - This is important because if I don't, when I combine the trailing edge and the new - modified profile, there's going to be a discontinuity in the gradient. - """ - - ir_end = np.argmin(np.abs(x_coord - x_coarse[-1])) - - if ir_end < len(x_coord) - 1: - ir = ir_end + 2 # To prevent that TGYRO does a 2nd order derivative - x_coarse = np.append(x_coarse, [x_coord[ir]]) - else: - ir = ir_end - - # Definition of trailing edge. Any point after, and including, the extra point - x_trail = torch.from_numpy(x_coord[ir:]).to(x_coarse_tensor) - y_trail = y_coord[ir:] - x_notrail = torch.from_numpy(x_coord[: ir + 1]).to(x_coarse_tensor) - - # Produce control points, including a zero at the beginning - aLy_coarse = [[0.0, 0.0]] - for cont, i in enumerate(x_coarse): - yValue = ygrad_coord[np.argmin(np.abs(x_coord - i))] - aLy_coarse.append([i, yValue.cpu().item()]) - - aLy_coarse = torch.from_numpy(np.array(aLy_coarse)).to(ygrad_coord) - - # Since the last one is an extra point very close, I'm making it the same - aLy_coarse[-1, 1] = aLy_coarse[-2, 1] - - # Boundary condition at point moved by gridPointsAllowed - y_bc = torch.from_numpy(interpolation_function([x_coarse[-1]], x_coord, y_coord.cpu().numpy())).to(ygrad_coord) - - # Boundary condition at point (ACTUAL THAT I WANT to keep fixed, i.e. rho=0.8) - y_bc_real = torch.from_numpy(interpolation_function([x_coarse[-2]], x_coord, y_coord.cpu().numpy())).to(ygrad_coord) - - # ********************************************************************************************************** - # Define deparametrizer functions - # ********************************************************************************************************** - - def deparametrizer_coarse(x, y, multiplier=multiplier_quantity): - """ - Construct curve in a coarse grid - ---------------------------------------------------------------------------------------------------- - This constructs a curve in any grid, with any batch given in y=y. - Useful for surrogate evaluations. Fast in a coarse grid. For HF evaluations, - I need to do in a finer grid so that it is consistent with TGYRO. - x, y must be (batch, radii), y_bc must be (1) - """ - return ( - x, - integrator_function(x, y, y_bc_real) / multiplier, - ) - - def deparametrizer_coarse_middle(x, y, multiplier=multiplier_quantity): - """ - Deparamterizes a finer profile based on the values in the coarse. - Reason why something like this is not used for the full profile is because derivative of this will not be as original, - which is needed to match TGYRO - """ - yCPs = CALCtools.Interp1d()(aLy_coarse[:, 0][:-1].repeat((y.shape[0], 1)), y, x) - return x, integrator_function(x, yCPs, y_bc_real) / multiplier - - def deparametrizer_fine(x, y, multiplier=multiplier_quantity): - """ - Notes: - - x is a 1D array, but y can be a 2D array for a batch of individuals: (batch,x) - - I am assuming it is 1/LT for parameterization, but gives T - """ - - y = torch.atleast_2d(y) - x = x[0, :] if x.dim() == 2 else x - - # Add the extra trick point - x = torch.cat((x, aLy_coarse[-1][0].repeat((1)))) - y = torch.cat((y, aLy_coarse[-1][-1].repeat((y.shape[0], 1))), dim=1) - - # Model curve (basically, what happens in between points) - yBS = CALCtools.Interp1d()(x.repeat(y.shape[0], 1), y, x_notrail.repeat(y.shape[0], 1)) - - """ - --------------------------------------------------------------------------------------------------------- - Trick 1: smoothAroundCoarsing - TGYRO will use a 2nd order scheme to obtain gradients out of the profile, so a piecewise linear - will simply not give the right derivatives. - Here, this rough trick is to modify the points in gradient space around the coarse grid with the - same value of gradient, so in principle it doesn't matter the order of the derivative. - """ - num_around = 1 - for i in range(x.shape[0] - 2): - ir = torch.argmin(torch.abs(x[i + 1] - x_notrail)) - for k in range(-num_around, num_around + 1, 1): - yBS[:, ir + k] = yBS[:, ir] - # -------------------------------------------------------------------------------------------------------- - - yBS = integrator_function(x_notrail.repeat(yBS.shape[0], 1), yBS.clone(), y_bc) - - """ - Trick 2: Correct y_bc - The y_bc for the profile integration started at gridPointsAllowed, but that's not the real - y_bc. I want the temperature fixed at my first point that I actually care for. - Here, I multiply the profile to get that. - Multiplication works because: - 1/LT = 1/T * dT/dr - 1/LT' = 1/(T*m) * d(T*m)/dr = 1/T * dT/dr = 1/LT - Same logarithmic gradient, but with the right boundary condition - - """ - ir = torch.argmin(torch.abs(x_notrail - x[-2])) - yBS = yBS * torch.transpose((y_bc_real / yBS[:, ir]).repeat(yBS.shape[1], 1), 0, 1) - - # Add trailing edge - y_trailnew = copy.deepcopy(y_trail).repeat(yBS.shape[0], 1) - - x_notrail_t = torch.cat((x_notrail[:-1], x_trail), dim=0) - yBS = torch.cat((yBS[:, :-1], y_trailnew), dim=1) - - return x_notrail_t, yBS / multiplier - - # ********************************************************************************************************** - - return ( - aLy_coarse, - deparametrizer_fine, - deparametrizer_coarse, - deparametrizer_coarse_middle, - ) - def improve_resolution_profiles(profiles, rhoMODEL): """ Resolution of input.gacode @@ -633,7 +498,6 @@ def improve_resolution_profiles(profiles, rhoMODEL): # ---------------------------------------------------------------------------------- profiles.changeResolution(rho_new=rho_new) - def debug_transformation(p, p_new, s): rho = s.plasma['rho'][0][1:] diff --git a/src/mitim_modules/powertorch/utils/TRANSPORTtools.py b/src/mitim_modules/powertorch/utils/TRANSPORTtools.py new file mode 100644 index 00000000..02f5c395 --- /dev/null +++ b/src/mitim_modules/powertorch/utils/TRANSPORTtools.py @@ -0,0 +1,139 @@ +import copy +import shutil +from mitim_tools.misc_tools import IOtools +from mitim_tools.gacode_tools import PROFILEStools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class power_transport: + ''' + Default class for power transport models, change "evaluate" method to implement a new model and produce_profiles if the model requires written input.gacode written + + Notes: + - After evaluation, the self.model_results attribute will contain the results of the model, which can be used for plotting and analysis + - model results can have .plot() method that can grab kwargs or be similar to TGYRO plot + + ''' + def __init__(self, powerstate, name = "test", folder = "~/scratch/", evaluation_number = 0): + + self.name = name + self.folder = IOtools.expandPath(folder) + self.evaluation_number = evaluation_number + self.powerstate = powerstate + + # Allowed fluxes in powerstate so far + self.quantities = ['QeMWm2', 'QiMWm2', 'Ce', 'CZ', 'MtJm2'] + + # Each flux has a turbulent and neoclassical component + self.variables = [f'{i}_tr_turb' for i in self.quantities] + [f'{i}_tr_neo' for i in self.quantities] + + # Each flux component has a standard deviation + self.variables += [f'{i}_stds' for i in self.variables] + + # There is also turbulent exchange + self.variables += ['PexchTurb', 'PexchTurb_stds'] + + # And total transport flux + self.variables += [f'{i}_tr' for i in self.quantities] + + # Model results is None by default, but can be assigned in evaluate + self.model_results = None + + # Assign zeros to transport ones if not evaluated + for i in self.variables: + self.powerstate.plasma[i] = self.powerstate.plasma["te"] * 0.0 + + # There is also target components + self.variables += [f'{i}' for i in self.quantities] + [f'{i}_stds' for i in self.quantities] + + # ---------------------------------------------------------------------------------------- + # labels for plotting + # ---------------------------------------------------------------------------------------- + + self.powerstate.labelsFluxes = { + "te": "$Q_e$ ($MW/m^2$)", + "ti": "$Q_i$ ($MW/m^2$)", + "ne": ( + "$Q_{conv}$ ($MW/m^2$)" + if self.powerstate.TransportOptions["ModelOptions"].get("useConvectiveFluxes", True) + else "$\\Gamma_e$ ($10^{20}/s/m^2$)" + ), + "nZ": ( + "$Q_{conv}$ $\\cdot f_{Z,0}$ ($MW/m^2$)" + if self.powerstate.TransportOptions["ModelOptions"].get("useConvectiveFluxes", True) + else "$\\Gamma_Z$ $\\cdot f_{Z,0}$ ($10^{20}/s/m^2$)" + ), + "w0": "$M_T$ ($J/m^2$)", + } + + def produce_profiles(self): + # Only add self._produce_profiles() if it's needed (e.g. full TGLF), otherwise this is somewhat expensive + # (e.g. for flux matching of analytical models) + pass + + def _produce_profiles(self,deriveQuantities=True): + + self.applyCorrections = self.powerstate.TransportOptions["ModelOptions"].get("MODELparameters", {}).get("applyCorrections", {}) + + # Write this updated profiles class (with parameterized profiles and target powers) + self.file_profs = self.folder / "input.gacode" + + powerstate_detached = self.powerstate.copy_state() + + self.powerstate.profiles = powerstate_detached.from_powerstate( + write_input_gacode=self.file_profs, + postprocess_input_gacode=self.applyCorrections, + rederive_profiles = deriveQuantities, # Derive quantities so that it's ready for analysis and plotting later + insert_highres_powers = deriveQuantities, # Insert powers so that Q, Pfus and all that it's consistent when read later + ) + + self.powerstate.profiles_transport = copy.deepcopy(self.powerstate.profiles) + + self._modify_profiles() + + def _modify_profiles(self): + ''' + Modify the profiles (e.g. lumping) before running the transport model + ''' + + # After producing the profiles, copy for future modifications + self.file_profs_unmod = self.file_profs.parent / f"{self.file_profs.name}_unmodified" + shutil.copy2(self.file_profs, self.file_profs_unmod) + + profiles_postprocessing_fun = self.powerstate.TransportOptions["ModelOptions"].get("profiles_postprocessing_fun", None) + + if profiles_postprocessing_fun is not None: + print(f"\t- Modifying input.gacode to run transport calculations based on {profiles_postprocessing_fun}",typeMsg="i") + self.powerstate.profiles_transport = profiles_postprocessing_fun(self.file_profs) + + # Position of impurity ion may have changed + p_old = PROFILEStools.PROFILES_GACODE(self.file_profs_unmod) + p_new = PROFILEStools.PROFILES_GACODE(self.file_profs) + + impurity_of_interest = p_old.Species[self.powerstate.impurityPosition] + + try: + impurityPosition_new = p_new.Species.index(impurity_of_interest) + + except ValueError: + print(f"\t- Impurity {impurity_of_interest} not found in new profiles, keeping position {self.powerstate.impurityPosition}",typeMsg="w") + impurityPosition_new = self.powerstate.impurityPosition + + if impurityPosition_new != self.powerstate.impurityPosition: + print(f"\t- Impurity position has changed from {self.powerstate.impurityPosition} to {impurityPosition_new}",typeMsg="i") + self.powerstate.impurityPosition_transport = p_new.Species.index(impurity_of_interest) + + # ---------------------------------------------------------------------------------------------------- + # EVALUATE (custom part) + # ---------------------------------------------------------------------------------------------------- + def evaluate(self): + ''' + This needs to populate the following in self.powerstate.plasma + - QeMWm2, QeMWm2_tr, QeMWm2_tr_turb, QeMWm2_tr_neo + Same for QiMWm2, Ce, CZ, MtJm2 + and their respective standard deviations + ''' + + print(">> No transport fluxes to evaluate", typeMsg="w") + pass + diff --git a/src/mitim_tools/astra_tools/ASTRA_CDFtools.py b/src/mitim_tools/astra_tools/ASTRA_CDFtools.py index c8b36f93..67862efc 100644 --- a/src/mitim_tools/astra_tools/ASTRA_CDFtools.py +++ b/src/mitim_tools/astra_tools/ASTRA_CDFtools.py @@ -28,51 +28,190 @@ def close_file(self): del self.nc_file def getProfiles(self): + + ### Constants + + self.GP = self.f["GP"][:] # pi + self.GP2 = self.f["GP2"][:] # 2*pi + + ### Few control parameters + + try: + self.t = self.f[ + "TIME" + ].data # New ASTRA update needs this patch, for old version still need [:] + except: + self.t = self.f["TIME"][:] + self.t = np.array([self.t]) if np.isscalar(self.t) else np.array(self.t) + self.tau = self.f["TAU"][:] # simulation time step + self.na1 = self.f["NA1"][:] # transport grid size + + ### Geometry + try: - self.R = self.f["r2d"][:] - self.Z = self.f["z2d"][:] + self.R = self.f["r2d"][:] # R coordinates + self.Z = self.f["z2d"][:] # Z coordinates except: - self.R = self.f["r"][:] - self.Z = self.f["z"][:] - self.rho = self.f["RHO"][:] - self.xrho = self.f["XRHO"][:] - self.BTOR = self.f["BTOR"][:] - self.IPL = self.f["IPL"][:] - self.Te = self.f["TE"][:] - self.TEX = self.f["TEX"][:] - self.TIX = self.f["TIX"][:] - self.NEX = self.f["NEX"][:] - self.Ti = self.f["TI"][:] - self.ne = self.f["NE"][:] - self.ni = self.f["NI"][:] - self.FP = self.f["FP"][:] - self.TF = self.rho[:,-1] * self.rho[:,-1] * self.BTOR[-1] / 2 # Wb/rad - self.VPOL = self.f["VPOL"][:] - self.VTOR = self.f["VTOR"][:] - self.F1 = self.f["F1"][:] - self.F2 = self.f["F2"][:] - self.F3 = self.f["F3"][:] - self.VR = self.f["VR"][:] - self.Cu = self.f["CU"][:] - self.Cubs = self.f["CUBS"][:] - #self.CuOhm = self.f["CUOHM"][:] - self.CuTor = self.f["CUTOR"][:] - self.CD = self.f["CD"][:] - self.Mu = self.f["MU"][:] - self.q_onaxis = 1/self.Mu[:,0] - self.MV = self.f["MV"][:] - self.FV = self.f["FV"][:] - self.VP = self.f["VP"][:] - self.Qi = self.f["QI"][:] - self.Qe = self.f["QE"][:] - self.Qn = self.f["QN"][:] - # self.QNTOT = self.f['CAR8'][:] - # self.QETOT = self.f['CAR9'][:] - # self.QITOT = self.f['CAR10'][:] - self.PEECR = self.f["PEECR"][:] - self.G11 = self.f["G11"][:] - - # dummy variables + self.R = self.f["R"][:] + self.Z = self.f["Z"][:] + self.ROC = self.f["ROC"][:] # effective minor radius + self.rho = self.f["RHO"][:] # main magnetic surface label + self.xrho = self.f["XRHO"][:] # sqrt of normalized toroidal magnetic flux + self.XRHO = self.xrho + self.HRO = self.f["HRO"][:] # radial grid step in rho + self.rmin = self.f["AMETR"][:] # minor radius + self.elong = self.f["ELONG"][:] # separatrix elongation + self.elon = self.f["ELON"][:] # elongation profile + self.trian = self.f["TRIAN"][:] # separatrix triangularity + self.tria = self.f["TRIA"][:] # triangularity + self.UPDWN = self.f["UPDWN"][:] # vertical shift separatrix + self.shift = self.f["SHIFT"][:] # Shafranov shift separatrix + self.shif = self.f["SHIF"][:] # Shafranov shift profile + self.RTOR = self.f["RTOR"][:] # Major radius + self.ABC = self.f["ABC"][:] # minor radius at separatrix on OMP + self.AB = self.f["AB"][:] # maximum value allowed for ABC + self.vol = self.f["VOLUM"][:] # plasma volume profile + self.VOLUME = self.f["VOLUME"][:] # plasma volume inside separatrix + self.AREAT = self.f['AREAT'][:] # cross section area + self.SLAT = self.f['SLAT'][:] # lateral plasma surface + self.G11 = self.f["G11"][:] # geometrical factor 1 + self.G22 = self.f["G22"][:] # geometrical factor 2 + self.G33 = self.f["G33"][:] # geometrical factor 3 + + self.timesteps = len(self.t) + self.radialsize = int(self.na1[-1]) + self.area = np.zeros([self.timesteps,self.radialsize]) + for ii in range(0,int(self.na1[-1])): + if ii>0: + self.area[:,ii] = self.AREAT[:,ii]-self.AREAT[:,ii-1] + else: + self.area[:,ii] = self.AREAT[:,ii] # cross section differential area + + ### Essential parameters + + self.BTOR = self.f["BTOR"][:] # toroidal magnetic field + self.IPL = self.f["IPL"][:] # plasma current + self.TEX = self.f["TEX"][:] # experimental electron temperature + self.TIX = self.f["TIX"][:] # experimental ion temperature + self.NEX = self.f["NEX"][:] # experimental electron density + self.NIX = self.f["NIX"][:] # experimental ion density + self.Te = self.f["TE"][:] # electron temperature + self.Ti = self.f["TI"][:] # ion temperature + self.ne = self.f["NE"][:] # electron density + self.NE = self.ne + self.ni = self.f["NI"][:] # ion density + self.NI = self.ni + self.NMAIN = self.f["NMAIN"][:] # main ion density + self.NDEUT = self.f["NDEUT"][:] # D density + self.NTRIT = self.f["NTRIT"][:] # T density + self.NIZ1 = self.f["NIZ1"][:] # 1st impurity density + self.NIZ2 = self.f["NIZ2"][:] # 2nd impurity density + self.NIZ3 = self.f["NIZ3"][:] # 3rd impurity density + self.NALF = self.f["NALF"][:] # alpha density + self.ZMJ = self.f["ZMJ"][:] # main ion charge + self.ZMAIN = self.f["ZMAIN"][:] # main ion charge + self.ZIM1 = self.f["ZIM1"][:] # 1st impurity charge + self.ZIM2 = self.f["ZIM2"][:] # 2nd impurity charge + self.ZIM3 = self.f["ZIM3"][:] # 3rd impurity charge + self.AMJ = self.f["AMJ"][:] # main ion mass + self.AMAIN = self.f["AMAIN"][:] # main ion mass + self.AIM1 = self.f["AIM1"][:] # 1st impurity mass + self.AIM2 = self.f["AIM2"][:] # 2nd impurity mass + self.AIM3 = self.f["AIM3"][:] # 3rd impurity mass + self.FP = self.f["FP"][:] # poloidal magnetic flux + self.TF = self.rho[:,-1] * self.rho[:,-1] * self.BTOR[-1] / 2 # ~average toroidal flux?? (Wb/rad) + self.ER = self.f["ER"][:] # radial electric field + self.VPOL = self.f["VPOL"][:] # poloidal plasma velocity + self.VTOR = self.f["VTOR"][:] # toroidal plasma velocity + self.F1 = self.f["F1"][:] # density of 1st additional transported species + self.F2 = self.f["F2"][:] # density of 2nd additional transported species + self.F3 = self.f["F3"][:] # density of 3rd additional transported species + self.VR = self.f["VR"][:] # volume derivative + self.Cu = self.f["CU"][:] # current density + self.Cubs = self.f["CUBS"][:] # bootstrap current density + self.CuTor = self.f["CUTOR"][:] # toroidal current density + self.CD = self.f["CD"][:] # driven current density + self.Mu = self.f["MU"][:] # rotational transform + self.q = 1/self.Mu # safety factor + self.q_onaxis = 1/self.Mu[:,0] # q on axis + self.sheas = self.f["SHEAR"][:] # magnetic + self.MV = self.f["MV"][:] # vacuum rotational transform + self.FV = self.f["FV"][:] # poloidal flux for vacuum magnetic field + self.VP = self.f["VP"][:] # pinch velocity + self.Qi = self.f["QI"][:] # total transported ion heat flux + self.Qe = self.f["QE"][:] # total transported electron heat flux + self.Qn = self.f["QN"][:] # total transported particles flux + self.ZEF = self.f["ZEF"][:] # Zeff + self.FTO = self.f["FTO"][:] # toroidal magnetic flux at the edge + self.DN = self.f["DN"][:] # main ion particle diffusivity + # self.HN = self.f['HN'][:] + # self.XN = self.f['XN'][:] + self.CN = self.f["CN"][:] # particle convection + # self.DE = self.f['DE'][:] + self.HE = self.f["HE"][:] # electron heat diffusivity due to Te gradient + # self.XE = self.f['XE'][:] + self.CE = self.f["CE"][:] # electron heat convection + # self.DI = self.f['DI'][:] + # self.HI = self.f['HI'][:] + self.XI = self.f["XI"][:] # main ion heat diffusivity due to Ti gradient + self.CI = self.f["CI"][:] # ion heat convection + self.DC = self.f["DC"][:] # current diffusivity due to n gradient + self.HC = self.f["HC"][:] # current diffusivity due to Te gradient + self.XC = self.f["XC"][:] # current diffusivity due to Ti gradient + self.CC = self.f["CC"][:] # conductivity + self.UPAR = self.f['UPAR'][:] # toroidal velocity + self.XUPAR = self.f['XUPAR'][:] # Momentum diffusivity + self.CNPAR = self.f['CNPAR'][:] # Momentum convective velocity + self.RUPFR = self.f['RUPFR'][:] # Toroidal turbulence-driven instrinsique torque + self.TTRQ = self.f['TTRQ'][:] # Applied external torque (e.g. NBI) + self.SN = self.f["SN"][:] # particle source + self.SNTOT = self.f["SNTOT"][:] + self.SNEBM = self.f['SNEBM'][:] # particle source due to NBI + # self.SNN = self.f['SNN'][:] + # self.SNNEU = self.f['SNNEU'][:] + self.PBPER = self.f['PBPER'][:] # pressure of fast ions in the perpendicular direction (wrt Bt) + self.PBLON = self.f['PBLON'][:] # pressure of fast ions in the longitudinal direction (wrt Bt) + self.ULON = self.f["ULON"][:] # longitudinal loop voltage + self.UPL = self.f["UPL"][:] # toroidal loop voltage + self.IPOL = self.f["IPOL"][:] # normalized poloidal current + + ### Power sources and sinks + + self.PE = self.f["PE"][:] # local electron power density + self.PI = self.f["PI"][:] # local ion power density + self.PEBM = self.f["PEBM"][:] # NBI power to electrons + self.PIBM = self.f["PIBM"][:] # NBI power to ions + self.PEECR = self.f["PEECR"][:] # ECH heating to electrons + self.PRAD = self.f["PRAD"][:] # Radiated Power + self.PEICR = self.f["PEICR"][:] # ICH heating to electrons + self.PIICR = self.f["PIICR"][:] # ICH heating to ions + self.POH = self.CC*(self.ULON/(self.GP2[-1]*self.RTOR[-1]*self.IPOL))**2/self.G33 + #### --------------- Calculation of fusion partition between main ions and electrons ------------------------- ### + YVALP = 1.2960e+07 + ne = np.maximum(self.ne, 1e-30) + te = np.maximum(self.Te, 1e-30) + YLLAME = 23.9 + np.log(1e3 * te / np.sqrt(1e19 * ne)) + yy6 = np.sqrt(1e3 * te / 1e19 / ne) * (4.0 * self.AMAIN * YVALP) / (4.0 + self.AMAIN) + YLLAMI = 14.2 + np.log(np.maximum(yy6, 0.1)) + yy6 = np.sqrt(1e3 * te / 1e19 / ne) * 2.0 * YVALP + YLLAMA = 14.2 + np.log(np.maximum(yy6, 0.01)) + yy6 = (YLLAMI * self.NI / (self.AMAIN * ne) + YLLAMA * self.NALF / ne) * 7.3e-4 / YLLAME + yy6 = np.maximum(yy6, 1e-4) + yvc = yy6**0.33 * np.sqrt(2.0 * te * 1.7564e14) + yeps = YVALP / (yvc + 1e-4) + yy6 = np.arctan(0.577 * (2.0 * yeps - 1.0)) + yy7 = np.log((1.0 + yeps)**2 / (1.0 - yeps + yeps**2)) + self.PAION1 = 2.0 / yeps**2 * (0.577 * yy6 - 0.167 * yy7 + 0.3) # alpha power fraction to main ions + self.SVDT = self.Ti**(-1/3) + self.SVDT = 8.972*np.exp(-19.9826*self.SVDT)*self.SVDT*self.SVDT*((self.Ti+1.0134)/(1.+6.386E-3*(self.Ti+1.0134)**2)+1.877*np.exp(-0.16176*self.Ti*np.sqrt(self.Ti))) # nuclear fusion cross section + self.PDT = 5.632*self.NDEUT*self.NTRIT*self.SVDT # total alpha power + self.PEDT = (1-self.PAION1)*self.PDT # alpha power to electrons + self.PIDT = self.PAION1*self.PDT # alpha power to main ions + self.COULG = 15.9-0.5*np.log(self.ne)+np.log(self.Te) # Coloumb logarithm + self.PEICL = 0.00246*self.COULG*self.ne*self.ni*self.ZMAIN**2 + self.PEICL = self.PEICL*(self.Te-self.Ti)/(self.AMAIN*self.Te*np.sqrt(self.Te)) # Collisional exchange power + + ### Dummy arrays (used for user-specified quantities) self.CAR1 = self.f["CAR1"][:] self.CAR2 = self.f["CAR2"][:] @@ -88,9 +227,7 @@ def getProfiles(self): self.CAR12 = self.f["CAR12"][:] self.CAR13 = self.f["CAR13"][:] self.CAR14 = self.f["CAR14"][:] - self.CAR14X = self.f["CAR14X"][:] self.CAR15 = self.f["CAR15"][:] - self.CAR15X = self.f["CAR15X"][:] self.CAR16 = self.f["CAR16"][:] self.CAR17 = self.f["CAR17"][:] self.CAR18 = self.f["CAR18"][:] @@ -130,6 +267,83 @@ def getProfiles(self): self.CAR52 = self.f["CAR52"][:] self.CAR53 = self.f["CAR53"][:] self.CAR54 = self.f["CAR54"][:] + self.CAR55 = self.f["CAR55"][:] + self.CAR56 = self.f["CAR56"][:] + self.CAR57 = self.f["CAR57"][:] + self.CAR58 = self.f["CAR58"][:] + self.CAR59 = self.f["CAR59"][:] + self.CAR60 = self.f["CAR60"][:] + self.CAR61 = self.f["CAR61"][:] + self.CAR62 = self.f["CAR62"][:] + self.CAR63 = self.f["CAR63"][:] + self.CAR64 = self.f["CAR64"][:] + self.CAR1X = self.f["CAR1X"][:] + self.CAR2X = self.f["CAR2X"][:] + self.CAR3X = self.f["CAR3X"][:] + self.CAR4X = self.f["CAR4X"][:] + self.CAR5X = self.f["CAR5X"][:] + self.CAR6X = self.f["CAR6X"][:] + self.CAR7X = self.f["CAR7X"][:] + self.CAR8X = self.f["CAR8X"][:] + self.CAR9X = self.f["CAR9X"][:] + self.CAR10X = self.f["CAR10X"][:] + self.CAR11X = self.f["CAR11X"][:] + self.CAR12X = self.f["CAR12X"][:] + self.CAR13X = self.f["CAR13X"][:] + self.CAR14X = self.f["CAR14X"][:] + self.CAR15X = self.f["CAR15X"][:] + self.CAR16X = self.f["CAR16X"][:] + self.CAR17X = self.f["CAR17X"][:] + self.CAR18X = self.f["CAR18X"][:] + self.CAR19X = self.f["CAR19X"][:] + self.CAR20X = self.f["CAR20X"][:] + self.CAR21X = self.f["CAR21X"][:] + self.CAR22X = self.f["CAR22X"][:] + self.CAR23X = self.f["CAR23X"][:] + self.CAR24X = self.f["CAR24X"][:] + self.CAR25X = self.f["CAR25X"][:] + self.CAR26X = self.f["CAR26X"][:] + self.CAR27X = self.f["CAR27X"][:] + self.CAR28X = self.f["CAR28X"][:] + self.CAR29X = self.f["CAR29X"][:] + self.CAR30X = self.f["CAR30X"][:] + self.CAR31X = self.f["CAR31X"][:] + self.CAR32X = self.f["CAR32X"][:] + self.CAR33X = self.f["CAR33X"][:] + self.CAR34X = self.f["CAR34X"][:] + self.CAR35X = self.f["CAR35X"][:] + self.CAR36X = self.f["CAR36X"][:] + self.CAR37X = self.f["CAR37X"][:] + self.CAR38X = self.f["CAR38X"][:] + self.CAR39X = self.f["CAR39X"][:] + self.CAR40X = self.f["CAR40X"][:] + self.CAR41X = self.f["CAR41X"][:] + self.CAR42X = self.f["CAR42X"][:] + self.CAR43X = self.f["CAR43X"][:] + self.CAR44X = self.f["CAR44X"][:] + self.CAR45X = self.f["CAR45X"][:] + self.CAR46X = self.f["CAR46X"][:] + self.CAR47X = self.f["CAR47X"][:] + self.CAR48X = self.f["CAR48X"][:] + self.CAR49X = self.f["CAR49X"][:] + self.CAR50X = self.f["CAR50X"][:] + self.CAR51X = self.f["CAR51X"][:] + self.CAR52X = self.f["CAR52X"][:] + self.CAR53X = self.f["CAR53X"][:] + self.CAR54X = self.f["CAR54X"][:] + self.CAR55X = self.f["CAR55X"][:] + self.CAR56X = self.f["CAR56X"][:] + self.CAR57X = self.f["CAR57X"][:] + self.CAR58X = self.f["CAR58X"][:] + self.CAR59X = self.f["CAR59X"][:] + self.CAR60X = self.f["CAR60X"][:] + self.CAR61X = self.f["CAR61X"][:] + self.CAR62X = self.f["CAR62X"][:] + self.CAR63X = self.f["CAR63X"][:] + self.CAR64X = self.f["CAR64X"][:] + + ### Dummy scalars (used for user-specified quantities) + self.CRAD1 = self.f['CRAD1'][:] self.CRAD2 = self.f['CRAD2'][:] self.CRAD3 = self.f['CRAD3'][:] @@ -137,12 +351,10 @@ def getProfiles(self): self.CIMP1 = self.f['CIMP1'][:] self.CIMP2 = self.f['CIMP2'][:] self.CIMP3 = self.f['CIMP3'][:] + self.CIMP4 = self.f['CIMP4'][:] self.ZRD1 = self.f["ZRD1"][:] - self.ZRD1X = self.f["ZRD1X"][:] self.ZRD2 = self.f["ZRD2"][:] - self.ZRD2X = self.f["ZRD2X"][:] self.ZRD3 = self.f["ZRD3"][:] - self.ZRD3X = self.f["ZRD3X"][:] self.ZRD4 = self.f["ZRD4"][:] self.ZRD5 = self.f["ZRD5"][:] self.ZRD6 = self.f["ZRD6"][:] @@ -194,10 +406,72 @@ def getProfiles(self): self.ZRD52 = self.f["ZRD52"][:] self.ZRD53 = self.f["ZRD53"][:] self.ZRD54 = self.f["ZRD54"][:] + self.ZRD55 = self.f["ZRD55"][:] + self.ZRD56 = self.f["ZRD56"][:] + self.ZRD57 = self.f["ZRD57"][:] + self.ZRD58 = self.f["ZRD58"][:] + self.ZRD59 = self.f["ZRD59"][:] + self.ZRD60 = self.f["ZRD60"][:] + self.ZRD1X = self.f["ZRD1X"][:] + self.ZRD2X = self.f["ZRD2X"][:] + self.ZRD3X = self.f["ZRD3X"][:] + self.ZRD4X = self.f["ZRD4X"][:] + self.ZRD5X = self.f["ZRD5X"][:] + self.ZRD6X = self.f["ZRD6X"][:] + self.ZRD7X = self.f["ZRD7X"][:] + self.ZRD8X = self.f["ZRD8X"][:] + self.ZRD9X = self.f["ZRD9X"][:] + self.ZRD10X = self.f["ZRD10X"][:] + self.ZRD11X = self.f["ZRD11X"][:] + self.ZRD12X = self.f["ZRD12X"][:] + self.ZRD13X = self.f["ZRD13X"][:] + self.ZRD14X = self.f["ZRD14X"][:] + self.ZRD15X = self.f["ZRD15X"][:] + self.ZRD16X = self.f["ZRD16X"][:] + self.ZRD17X = self.f["ZRD17X"][:] + self.ZRD18X = self.f["ZRD18X"][:] + self.ZRD19X = self.f["ZRD19X"][:] + self.ZRD20X = self.f["ZRD20X"][:] + self.ZRD21X = self.f["ZRD21X"][:] + self.ZRD22X = self.f["ZRD22X"][:] + self.ZRD23X = self.f["ZRD23X"][:] + self.ZRD24X = self.f["ZRD24X"][:] + self.ZRD25X = self.f["ZRD25X"][:] + self.ZRD26X = self.f["ZRD26X"][:] + self.ZRD27X = self.f["ZRD27X"][:] + self.ZRD28X = self.f["ZRD28X"][:] + self.ZRD29X = self.f["ZRD29X"][:] + self.ZRD30X = self.f["ZRD30X"][:] + self.ZRD31X = self.f["ZRD31X"][:] + self.ZRD32X = self.f["ZRD32X"][:] + self.ZRD33X = self.f["ZRD33X"][:] + self.ZRD34X = self.f["ZRD34X"][:] + self.ZRD35X = self.f["ZRD35X"][:] + self.ZRD36X = self.f["ZRD36X"][:] + self.ZRD37X = self.f["ZRD37X"][:] + self.ZRD38X = self.f["ZRD38X"][:] + self.ZRD39X = self.f["ZRD39X"][:] + self.ZRD40X = self.f["ZRD40X"][:] + self.ZRD41X = self.f["ZRD41X"][:] + self.ZRD42X = self.f["ZRD42X"][:] + self.ZRD43X = self.f["ZRD43X"][:] + self.ZRD44X = self.f["ZRD44X"][:] + self.ZRD45X = self.f["ZRD45X"][:] + self.ZRD46X = self.f["ZRD46X"][:] + self.ZRD47X = self.f["ZRD47X"][:] + self.ZRD48X = self.f["ZRD48X"][:] + self.ZRD49X = self.f["ZRD49X"][:] + self.ZRD50X = self.f["ZRD50X"][:] self.ZRD51X = self.f["ZRD51X"][:] self.ZRD52X = self.f["ZRD52X"][:] self.ZRD53X = self.f["ZRD53X"][:] - + self.ZRD54X = self.f["ZRD54X"][:] + self.ZRD55X = self.f["ZRD55X"][:] + self.ZRD56X = self.f["ZRD56X"][:] + self.ZRD57X = self.f["ZRD57X"][:] + self.ZRD58X = self.f["ZRD58X"][:] + self.ZRD59X = self.f["ZRD59X"][:] + self.ZRD60X = self.f["ZRD60X"][:] self.CF1 = self.f["CF1"][:] self.CF2 = self.f["CF2"][:] self.CF3 = self.f["CF3"][:] @@ -231,170 +505,172 @@ def getProfiles(self): self.CV15 = self.f["CV15"][:] self.CV16 = self.f["CV16"][:] - self.AMJ = self.f["AMJ"][:] - self.AMAIN = self.f['AMAIN'][:] - self.AIM1 = self.f['AIM1'][:] - self.AIM2 = self.f['AIM2'][:] - self.AIM3 = self.f['AIM3'][:] - self.ZIM1 = self.f['ZIM1'][:] - self.ZIM2 = self.f['ZIM2'][:] - self.ZIM3 = self.f['ZIM3'][:] - self.ZMJ = self.f["ZMJ"][:] - self.ZEF = self.f["ZEF"][:] - self.ROC = self.f["ROC"][:] - self.tau = self.f["TAU"][:] - self.vol = self.f["VOLUM"][:] - self.VOLUME = self.f["VOLUME"][:] - try: - self.t = self.f[ - "TIME" - ].data # New ASTRA update needs this patch, for old version still need [:] - except: - self.t = self.f["TIME"][:] - self.rmin = self.f["AMETR"][:] - self.elong = self.f["ELONG"][:] - self.elon = self.f["ELON"][:] - self.trian = self.f["TRIAN"][:] - self.tria = self.f["TRIA"][:] - self.UPDWN = self.f["UPDWN"][:] - self.shif = self.f["SHIF"][:] - self.shift = self.f["SHIFT"][:] - self.RTOR = self.f["RTOR"][:] - self.AB = self.f["AB"][:] - self.ABC = self.f["ABC"][:] - self.PE = self.f["PE"][:] - self.PI = self.f["PI"][:] - self.PEBM = self.f["PEBM"][:] # NBI power to electrons - self.PIBM = self.f["PIBM"][:] # NBI power to ions - # self.POH = self.f['POH'][:] # Ohmic Power - self.PEECR = self.f["PEECR"][:] # ECH heating to electrons - self.PRAD = self.f["PRAD"][:] # Radiated Power - self.PEICR = self.f["PEICR"][:] - self.PIICR = self.f["PIICR"][:] - # self.PEICL = self.f['PEICL'][:] # Exchange power, given to ions --> not saved rn - self.chi_e_TGLF = self.f["CAR18"][:] # TGLF effective electron diffusivity - self.chi_i_TGLF = self.f["CAR17"][:] # TGLF effective ion plot_diffusivity - self.chi_e_TGLF_smoothed = self.f["CAR22"][:] - self.chi_i_TGLF_smoothed = self.f["CAR21"][:] - self.pinch_TGLF_smoothed = self.f["CAR24"][:] - self.FTO = self.f["FTO"][:] - self.DN = self.f["DN"][:] - # self.HN = self.f['HN'][:] - # self.XN = self.f['XN'][:] - # self.DE = self.f['DE'][:] - self.HE = self.f["HE"][:] - # self.XE = self.f['XE'][:] - # self.DI = self.f['DI'][:] - # self.HI = self.f['HI'][:] - self.XI = self.f["XI"][:] - self.CN = self.f["CN"][:] - self.CE = self.f["CE"][:] - self.CI = self.f["CI"][:] - self.DC = self.f["DC"][:] - self.HC = self.f["HC"][:] - self.XC = self.f["XC"][:] - self.SN = self.f["SN"][:] - # self.SNN = self.f['SNN'][:] - # self.SNNEU = self.f['SNNEU'][:] - self.XRHO = self.f["XRHO"][:] - self.HRO = self.f["HRO"][:] - self.PBPER = self.f['PBPER'][:] - self.PBLON = self.f['PBLON'][:] - - self.CC = self.f["CC"][:] - self.ULON = self.f["ULON"][:] - self.UPL = self.f["UPL"][:] - self.GP2 = self.f["GP2"][:] - self.IPOL = self.f["IPOL"][:] - self.G22 = self.f["G22"][:] - self.G33 = self.f["G33"][:] - # self.POH = self.CC/(self.ULON/(self.GP2[-1]*self.RTOR[-1]*self.IPOL))**2/self.G33*1e-6 - self.PEDT = self.f["CAR3"][:] - self.PIDT = self.f["CAR4"][:] - self.PEICL = self.f["CAR5"][:] - self.POH = self.f["CAR6"][:] - self.QDT = np.zeros([len(self.PEDT[:,-1]),len(self.PEDT[-1,:])]) - self.QICRH = np.zeros([len(self.PEICR[:,-1]),len(self.PEICR[-1,:])]) - self.QE = np.zeros([len(self.PEICR[:,-1]),len(self.PEICR[-1,:])]) - self.QI = np.zeros([len(self.PEICR[:,-1]),len(self.PEICR[-1,:])]) - self.QRAD = np.zeros([len(self.PEICR[:,-1]),len(self.PEICR[-1,:])]) - self.QOH = np.zeros([len(self.PEICR[:,-1]),len(self.PEICR[-1,:])]) - self.Wtot = np.zeros([len(self.PEICR[:,-1]),len(self.PEICR[-1,:])]) - self.ne_avg = np.zeros([len(self.PEICR[:,-1])]) - self.Te_avg = np.zeros([len(self.PEICR[:,-1])]) - self.Ti_avg = np.zeros([len(self.PEICR[:,-1])]) - self.tau98 = np.zeros([len(self.PEICR[:,-1])]) - self.AREAT = self.f['AREAT'][:] - self.SLAT = self.f['SLAT'][:] - self.FP_norm = np.zeros([len(self.PEICR[:,-1]),len(self.PEICR[-1,:])]) - for kk in range(0,len(self.PEDT[:,-1])): - self.FP_norm[kk,:] = (self.FP[kk,:]-self.FP[kk,0])/(self.FP[kk,-1]-self.FP[kk,0]) + ### Initialize derived and integral quantities + + self.QIDT = np.zeros([self.timesteps,self.radialsize]) + self.QEDT = np.zeros([self.timesteps,self.radialsize]) + self.QDT = np.zeros([self.timesteps,self.radialsize]) + self.QEICRH = np.zeros([self.timesteps,self.radialsize]) + self.QIICRH = np.zeros([self.timesteps,self.radialsize]) + self.QICRH = np.zeros([self.timesteps,self.radialsize]) + self.QNBI = np.zeros([self.timesteps,self.radialsize]) + self.QECRH = np.zeros([self.timesteps,self.radialsize]) + self.QEICL = np.zeros([self.timesteps,self.radialsize]) + self.Cu_tot = np.zeros([self.timesteps,self.radialsize]) + self.CuTor_tot = np.zeros([self.timesteps,self.radialsize]) + self.Cubs_tot = np.zeros([self.timesteps,self.radialsize]) + self.QE = np.zeros([self.timesteps,self.radialsize]) + self.QI = np.zeros([self.timesteps,self.radialsize]) + self.QRAD = np.zeros([self.timesteps,self.radialsize]) + self.QOH = np.zeros([self.timesteps,self.radialsize]) + self.Wtot = np.zeros([self.timesteps,self.radialsize]) + self.ne_avg = np.zeros([self.timesteps]) + self.NIZ1_avg = np.zeros([self.timesteps]) + self.NIZ2_avg = np.zeros([self.timesteps]) + self.NIZ3_avg = np.zeros([self.timesteps]) + self.NI_avg = np.zeros([self.timesteps]) + self.ne_lineavg = np.zeros([self.timesteps]) + self.Te_avg = np.zeros([self.timesteps]) + self.Ti_avg = np.zeros([self.timesteps]) + self.tau98 = np.zeros([self.timesteps]) + self.tau89 = np.zeros([self.timesteps]) + self.tau98_lineavg = np.zeros([self.timesteps]) + self.beta = np.zeros(self.timesteps) + self.betaN = np.zeros(self.timesteps) + self.FP_norm = np.zeros([self.timesteps,self.radialsize]) + self.q95position = [0]*self.timesteps + self.q95 = np.zeros(self.timesteps) + self.delta95 = np.zeros(self.timesteps) + self.kappa95 = np.zeros(self.timesteps) + self.n_Angioni = np.zeros(self.timesteps) + self.SNEBM_tot = np.zeros(self.timesteps) + self.shear = np.zeros([self.timesteps,self.radialsize]) + self.PBRAD = np.zeros([self.timesteps,self.radialsize]) + self.PSYNC = np.zeros([self.timesteps,self.radialsize]) + self.QBRAD = np.zeros([self.timesteps,self.radialsize]) + self.QSYNC = np.zeros([self.timesteps,self.radialsize]) + self.PRWOL_PUET_dens = np.zeros([self.timesteps,self.radialsize]) + self.rlte = np.zeros([self.timesteps,self.radialsize]) + self.rlti = np.zeros([self.timesteps,self.radialsize]) + self.rlne = np.zeros([self.timesteps,self.radialsize]) + + ### Integrated quantities + + for kk in range(0,self.timesteps): + # volumetric density variables + self.QIDT[kk,:] = np.cumsum(self.PIDT[kk,:]*self.HRO[kk]*self.VR[kk,:]) + self.QEICL[kk,:] = np.cumsum(self.PEICL[kk,:]*self.HRO[kk]*self.VR[kk,:]) + self.QEDT[kk,:] = np.cumsum(self.PEDT[kk,:]*self.HRO[kk]*self.VR[kk,:]) self.QDT[kk,:] = np.cumsum((self.PEDT[kk,:]+self.PIDT[kk,:])*self.HRO[kk]*self.VR[kk,:]) + self.QNBI[kk,:] = np.cumsum((self.PEBM[kk,:]+self.PIBM[kk,:])*self.HRO[kk]*self.VR[kk,:]) + self.QECRH[kk,:] = np.cumsum(self.PEECR[kk,:]*self.HRO[kk]*self.VR[kk,:]) + self.QIICRH[kk,:] = np.cumsum((self.PIICR[kk,:])*self.HRO[kk]*self.VR[kk,:]) + self.QEICRH[kk,:] = np.cumsum((self.PEICR[kk,:])*self.HRO[kk]*self.VR[kk,:]) self.QICRH[kk,:] = np.cumsum((self.PIICR[kk,:]+self.PEICR[kk,:])*self.HRO[kk]*self.VR[kk,:]) self.QE[kk,:] = np.cumsum(self.PE[kk,:]*self.HRO[kk]*self.VR[kk,:]) self.QI[kk,:] = np.cumsum(self.PI[kk,:]*self.HRO[kk]*self.VR[kk,:]) self.QRAD[kk,:] = np.cumsum(self.PRAD[kk,:]*self.HRO[kk]*self.VR[kk,:]) self.QOH[kk,:] = np.cumsum(self.POH[kk,:]*self.HRO[kk]*self.VR[kk,:]) - self.Wtot[kk,:] = np.cumsum((self.ne[kk,:]*self.Te[kk,:]+self.ni[kk,:]*self.Ti[kk,:])*self.HRO[kk]*self.VR[kk,:]) - self.ne_avg[kk] = np.cumsum(self.ne[kk,:]*self.HRO[kk]*self.VR[kk,:])[-1]/self.vol[kk,-1] - self.Te_avg[kk] = np.cumsum(self.Te[kk,:]*self.HRO[kk]*self.VR[kk,:])[-1]/self.vol[kk,-1] - self.Ti_avg[kk] = np.cumsum(self.Ti[kk,:]*self.HRO[kk]*self.VR[kk,:])[-1]/self.vol[kk,-1] - self.tau98[kk] = 0.0562*(self.IPL[kk])**0.93*(self.BTOR[kk])**0.15*(self.ne_avg[kk])**0.41*(self.QE[kk,-1]+self.QI[kk,-1]+self.QRAD[kk,-1])**(-0.69)*(self.RTOR[kk])**1.97*(self.AREAT[kk,-1]/(3.1415*self.rmin[kk,-1]**2))**0.78*(self.rmin[kk,-1]/self.RTOR[kk])**0.58*(self.AMAIN[kk,1])**0.19 - - self.Wtot = 0.0024*self.Wtot #check formula in ASTRA - self.tauE = self.Wtot/(self.QRAD+self.QE+self.QI) - self.H98 = self.tauE[:,-1]/self.tau98 - self.NDEUT = self.f["NDEUT"][:] - self.NTRIT = self.f["NTRIT"][:] - self.NIZ1 = self.f["NIZ1"][:] - self.NIZ2 = self.f["NIZ2"][:] - self.NIZ3 = self.f["NIZ3"][:] - self.CAR1 = self.f["CAR1"][:] - self.NMAIN = self.f["NMAIN"][:] - self.ZIM1 = self.f["ZIM1"][:] - self.ZIM2 = self.f["ZIM2"][:] - self.ZIM3 = self.f["ZIM3"][:] - self.CAR7 = self.f["CAR7"][:] - self.ZMAIN = self.f["ZMAIN"][:] - self.ptot = self.ne*self.Te+self.ni*self.Ti+0.5*(self.PBPER+self.PBLON) - self.rlte = np.zeros([len(self.PEDT[:,-1]),len(self.PEDT[-1,:])]) - self.rlti = np.zeros([len(self.PEDT[:,-1]),len(self.PEDT[-1,:])]) - self.rlne = np.zeros([len(self.PEDT[:,-1]),len(self.PEDT[-1,:])]) - for kk in range(0,len(self.Te[:,-1])): - for jj in range(0,len(self.Te[-1,:])-1): - self.rlte[kk,jj]=-self.RTOR[-1]/(0.5*(self.Te[kk,jj]+self.Te[kk,jj+1])*(self.rmin[kk,jj+1]-self.rmin[kk,jj])/(self.Te[kk,jj+1]-self.Te[kk,jj])) - self.rlti[kk,jj]=-self.RTOR[-1]/(0.5*(self.Ti[kk,jj]+self.Ti[kk,jj+1])*(self.rmin[kk,jj+1]-self.rmin[kk,jj])/(self.Ti[kk,jj+1]-self.Ti[kk,jj])) - self.rlne[kk,jj]=-self.RTOR[-1]/(0.5*(self.ne[kk,jj]+self.ne[kk,jj+1])*(self.rmin[kk,jj+1]-self.rmin[kk,jj])/(self.ne[kk,jj+1]-self.ne[kk,jj])) - self.rlte[kk,jj+1]=self.rlte[kk,jj] - self.rlti[kk,jj+1]=self.rlti[kk,jj] - self.rlne[kk,jj+1]=self.rlne[kk,jj] - - ## check on quasi-neutrality - self.quasi = (self.f['NE'][:]-self.f['NMAIN'][:]*self.f['ZMAIN'][:]-self.f['NIZ1'][:]*self.f['ZIM1'][:]-self.f['NIZ2'][:]*self.f['ZIM2'][:]-self.f['NIZ3'][:]*self.f['ZIM3'][:])/self.f['NE'][:] - - ## some global and performance parameters - self.Q = (self.QDT[:,-1]/(self.QICRH[:,-1]+self.QOH[:,-1]))/0.2 ## in teh D+T fusion reactions 20% goes to He and 80% to neutrons - self.Pfus = self.QDT/0.2 - self.betaN = np.zeros(len(self.PEDT[:,-1])) - for kk in range(0,len(self.PEDT[:,-1])): - self.betaN[kk] = 0.402*np.cumsum((self.ne[kk,:]*self.Te[kk,:]+self.ni[kk,:]*self.Ti[kk,:]+0.5*(self.PBPER[kk,:]+self.PBLON[kk,:]))*self.VR[kk,:])[-1]/np.cumsum(self.VR[kk,:])[-1]*self.ABC[kk]/(self.BTOR[kk]*self.IPL[kk]) - self.PLH = 0.0488*(self.ne_avg/10.)**0.717*(self.BTOR)**0.803*(self.SLAT[:,-1])**0.941*(2/self.AMAIN[:,-1]) - self.PLH_lower = 0.0488*math.exp(-0.057)*(self.ne_avg/10.)**0.682*(self.BTOR)**0.771*(self.SLAT[:,-1])**0.922*(2/self.AMAIN[:,-1]) - self.PLH_upper = 0.0488*math.exp(0.057)*(self.ne_avg/10.)**0.752*(self.BTOR)**0.835*(self.SLAT[:,-1])**0.96*(2/self.AMAIN[:,-1]) - self.PLH_perc = (self.QE[:,-1]+self.QI[:,-1])/self.PLH - self.PLH_lower_perc = (self.QE[:,-1]+self.QI[:,-1])/self.PLH_lower - self.PLH_upper_perc = (self.QE[:,-1]+self.QI[:,-1])/self.PLH_upper - self.PLH_schmidtmayr = 0.0325*(self.ne_avg/10.)**1.05*(self.BTOR)**0.68*(self.SLAT[:,-1])**0.93*(2/self.AMAIN[:,-1]) - self.PLH_schmidt_perc = (self.QI[:,-1])/self.PLH_schmidtmayr - + self.SNEBM_tot[kk] = np.cumsum(self.SNEBM[kk,:]*self.HRO[kk]*self.VR[kk,:])[-1]/self.vol[kk,-1] + # areal density variables + self.Cu_tot[kk,:] = np.cumsum(self.Cu[kk,:]*self.area[kk,:]) + self.CuTor_tot[kk,:] = np.cumsum(self.CuTor[kk,:]*self.area[kk,:]) + self.Cubs_tot[kk,:] = np.cumsum(self.Cubs[kk,:]*self.area[kk,:]) + self.QETOT = self.QE + self.QITOT = self.QI + + ### Derived quantities + + for kk in range(0,self.timesteps): + # average values + self.ne_avg[kk] = np.cumsum(self.ne[kk,:]*self.HRO[kk]*self.VR[kk,:])[-1]/self.vol[kk,-1] # volume average electron density + self.NIZ1_avg[kk] = np.cumsum(self.NIZ1[kk,:]*self.HRO[kk]*self.VR[kk,:])[-1]/self.vol[kk,-1] # volume average 1st impurity density + self.NIZ2_avg[kk] = np.cumsum(self.NIZ2[kk,:]*self.HRO[kk]*self.VR[kk,:])[-1]/self.vol[kk,-1] # volume average 2nd impurity density + self.NI_avg[kk] = np.cumsum(self.NI[kk,:]*self.HRO[kk]*self.VR[kk,:])[-1]/self.vol[kk,-1] # volume average ion density + self.ne_lineavg[kk] = np.cumsum(self.ne[kk,:])[-1]/len(self.ne[kk,:]) # line average electron density + self.Te_avg[kk] = np.cumsum(self.Te[kk,:]*self.HRO[kk]*self.VR[kk,:])[-1]/self.vol[kk,-1] # volume average Te + self.Ti_avg[kk] = np.cumsum(self.Ti[kk,:]*self.HRO[kk]*self.VR[kk,:])[-1]/self.vol[kk,-1] # volume average Ti + # derived quantities + self.FP_norm[kk,:] = (self.FP[kk,:]-self.FP[kk,0])/(self.FP[kk,-1]-self.FP[kk,0]) # normalized poloidal flux + self.q95position[kk] = np.abs(self.FP_norm[kk] - 0.95).argmin() # coordinate at 95% of poloidal normalized flux + self.q95[kk] = 1/self.Mu[kk,self.q95position[kk]] # q at 95% of poloidal normalized flux + self.delta95[kk] = self.tria[kk,self.q95position[kk]] # triangularity at 95% of poloidal normalized flux + self.kappa95[kk] = self.elon[kk,self.q95position[kk]] # elongation at 95% of poloidal normalized flux + self.beta[kk] = 0.00402*np.cumsum((self.ne[kk,:]*self.Te[kk,:]+self.ni[kk,:]*self.Ti[kk,:]+0.5*(self.PBPER[kk,:]+self.PBLON[kk,:]))*self.VR[kk,:])[-1]/np.cumsum(self.VR[kk,:])[-1]/(self.BTOR[kk]**2) # plasma beta + self.betaN[kk] = 0.402*np.cumsum((self.ne[kk,:]*self.Te[kk,:]+self.ni[kk,:]*self.Ti[kk,:]+0.5*(self.PBPER[kk,:]+self.PBLON[kk,:]))*self.VR[kk,:])[-1]/np.cumsum(self.VR[kk,:])[-1]*self.ABC[kk]/(self.BTOR[kk]*self.IPL[kk]) # normalized plasma beta + self.Wtot[kk,:] = 0.024*np.cumsum((self.ne[kk,:]*self.Te[kk,:]+self.ni[kk,:]*self.Ti[kk,:])*self.HRO[kk]*self.VR[kk,:]) # total plasma energy + self.tau89[kk] = 0.048*(self.AMAIN[kk,1])**0.5*(self.IPL[kk])**0.85*(self.RTOR[kk])**1.2*(self.ABC[kk])**0.3*(self.AREAT[kk,-1]/(3.1415*self.rmin[kk,-1]**2))**0.5*max(1.e-12,self.ne_lineavg[kk])**0.1*(self.BTOR[kk])**0.2*max(1.e-12,self.QDT[kk,-1]+self.QICRH[kk,-1]+self.QECRH[kk,-1]+self.QNBI[kk,-1]+self.QOH[kk,-1])**(-0.5) # tau89 + self.tau98[kk] = 0.0562*(self.IPL[kk])**0.93*(self.BTOR[kk])**0.15*max(1.e-12,self.ne_avg[kk])**0.41*max(1.e-12,self.QE[kk,-1]+self.QI[kk,-1]+self.QRAD[kk,-1])**(-0.69)*(self.RTOR[kk])**1.97*(self.AREAT[kk,-1]/(3.1415*self.rmin[kk,-1]**2))**0.78*(self.rmin[kk,-1]/self.RTOR[kk])**0.58*(self.AMAIN[kk,1])**0.19 # tau98 + self.tau98_lineavg[kk] = 0.0562*(self.IPL[kk])**0.93*(self.BTOR[kk])**0.15*max(1.e-12,self.ne_lineavg[kk])**0.41*max(1.e-12,self.QE[kk,-1]+self.QI[kk,-1]+self.QRAD[kk,-1])**(-0.69)*(self.RTOR[kk])**1.97*(self.AREAT[kk,-1]/(3.1415*self.rmin[kk,-1]**2))**0.78*(self.rmin[kk,-1]/self.RTOR[kk])**0.58*(self.AMAIN[kk,1])**0.19 # tau98 computed with line avg density + self.n_Angioni[kk] = 1.347-0.117*math.log(max(1.e-12,0.2*self.ne_avg[kk]*self.RTOR[kk]*self.Te_avg[kk]**(-2)))+1.331*self.SNEBM_tot[kk]-4.03*self.beta[kk] # Angioni density peaking scaling + self.shear[kk,:] = -self.rmin[kk,:]/self.Mu[kk,:]*np.gradient(self.Mu[kk,:]/self.rmin[kk,:]) # magnetic shear + self.PBRAD[kk,:] = 5.06E-5*self.ZEF[kk,:]*self.ne[kk,:]**2*self.Te[kk,:]**0.5 # Bremmstrahlung radiation + self.PSYNC[kk,:] = 1.32E-7*(self.Te_avg[kk]*self.BTOR[kk])**2.5*np.sqrt(self.ne_avg[kk]/self.AB[kk]*(1.+18.*self.AB[kk]/(self.RTOR[kk]*np.sqrt(self.Te_avg[kk])))) # Synchrotron radiation + # normalized gradients + for jj in range(0,self.radialsize-1): + self.rlte[kk,jj]=-self.RTOR[-1]/(0.5*(self.Te[kk,jj]+self.Te[kk,jj+1])*(self.rmin[kk,jj+1]-self.rmin[kk,jj])/(self.Te[kk,jj+1]-self.Te[kk,jj])) + self.rlti[kk,jj]=-self.RTOR[-1]/(0.5*(self.Ti[kk,jj]+self.Ti[kk,jj+1])*(self.rmin[kk,jj+1]-self.rmin[kk,jj])/(self.Ti[kk,jj+1]-self.Ti[kk,jj])) + self.rlne[kk,jj]=-self.RTOR[-1]/(0.5*(self.ne[kk,jj]+self.ne[kk,jj+1])*(self.rmin[kk,jj+1]-self.rmin[kk,jj])/(self.ne[kk,jj+1]-self.ne[kk,jj])) + self.rlte[kk,jj+1]=self.rlte[kk,jj] # normalized logaritmic Te gradient + self.rlti[kk,jj+1]=self.rlti[kk,jj] # normalized logaritmic Ti gradient + self.rlne[kk,jj+1]=self.rlne[kk,jj] # normalized logaritmic ne gradient + #### --------------- Calculation of W radiation by Puetterich formula ------------------------- ### + for jj in range(0,self.radialsize): + T = self.Te[kk,jj]*1000. + Z = np.log10(self.Te[kk,jj]) + if T <= 25.25: + self.PRWOL_PUET_dens[kk,jj] = 20.*self.ne[kk,jj] + elif T > 25.25 and T <= 300.: + self.PRWOL_PUET_dens[kk,jj] = (-(150.984*Z**4 + 566.56*Z**3 + 729.562*Z**2 + 377.649*Z + 47.922))*self.ne[kk,jj] + elif T > 300. and T <= 3350.: + self.PRWOL_PUET_dens[kk,jj] = (-119.946*Z**3 - 82.821*Z**2 + 32.707*Z + 42.603)*self.ne[kk,jj] + elif T > 3350.: + self.PRWOL_PUET_dens[kk,jj] = (4.7 + 14.484*np.exp(-3.4196*(Z - 0.602)**2))*self.ne[kk,jj] + self.QBRAD[kk,:] = np.cumsum(self.PBRAD[kk,:]*self.HRO[kk]*self.VR[kk,:]) + self.QSYNC[kk,:] = np.cumsum(self.PSYNC[kk,:]*self.HRO[kk]*self.VR[kk,:]) + + self.CuOhm = self.CC*self.ULON/(self.RTOR[-1]*2*np.pi) # Ohmic current density + self.n_Gr = self.IPL/(np.pi*self.ABC**2) # Greenwald density + self.f_Gr = self.ne_avg/10/self.n_Gr # Greenwald fraction + self.tauE = self.Wtot/(self.QRAD+self.QE+self.QI) # energy confinement time + self.H98 = self.tauE[:,-1]/self.tau98 # H98 + self.H89 = self.tauE[:,-1]/self.tau89 # H89 + self.H98_lineavg = self.tauE[:,-1]/self.tau98_lineavg # H98 with line average density + self.fMAIN = self.NMAIN/self.ne # main ion concentration + self.f1 = self.NIZ1/self.ne # 1st impurity concentration + self.f2 = self.NIZ2/self.ne # 2nd impurity concentration + self.f3 = self.NIZ3/self.ne # 3rd impurity concentration + self.ptot = (self.ne*self.Te+self.ni*self.Ti+0.5*(self.PBPER+self.PBLON))*1.6e-3 # total pressure, in MPa + self.quasi = (self.f['NE'][:]-self.f['NMAIN'][:]*self.f['ZMAIN'][:]-self.f['NIZ1'][:]*self.f['ZIM1'][:]-self.f['NIZ2'][:]*self.f['ZIM2'][:]-self.f['NIZ3'][:]*self.f['ZIM3'][:])/self.f['NE'][:] # check if QN is fulfilled (low value expected) + # some global and performance parameters + self.Pfus = self.QDT/0.2 # fusion power (in the D+T fusion reactions 20% goes to He and 80% to neutrons) + self.Q = self.Pfus[:,-1]/(self.QICRH[:,-1]+self.QOH[:,-1]) # fusion gain + self.q_Uckan = 5*self.ABC**2*self.BTOR/(self.RTOR*self.IPL)*(1+self.kappa95**2*(1+2*self.delta95**2-1.2*self.delta95**3))/2 + self.ne_PLHmin = 0.07*(self.IPL)**0.34*(self.BTOR)**0.62*(self.RTOR)**(-0.95)*(self.RTOR/self.ABC)**0.4 # LH transition minimum density + self.ne_PLHmin_perc = self.ne_avg/10/self.ne_PLHmin # percentage of LH transition minimum density (>1 to use Martin scaling) + # Martin scaling + self.PLH = 0.0488*(self.ne_avg/10.)**0.717*(self.BTOR)**0.803*(self.SLAT[:,-1])**0.941*(2/self.AMAIN[:,-1]) # LH power threshold + self.PLH_lower = 0.0488*math.exp(-0.057)*(self.ne_avg/10.)**0.682*(self.BTOR)**0.771*(self.SLAT[:,-1])**0.922*(2/self.AMAIN[:,-1]) # lower error bar of LH power threshold + self.PLH_upper = 0.0488*math.exp(0.057)*(self.ne_avg/10.)**0.752*(self.BTOR)**0.835*(self.SLAT[:,-1])**0.96*(2/self.AMAIN[:,-1]) # upper error bar of LH power threshold + self.PLH_perc = (self.QE[:,-1]+self.QI[:,-1])/self.PLH # Psep/PLH + self.PLH_lower_perc = (self.QE[:,-1]+self.QI[:,-1])/self.PLH_lower # lower error bar of LH power threshold + self.PLH_upper_perc = (self.QE[:,-1]+self.QI[:,-1])/self.PLH_upper # upper error bar of LH power threshold + self.fLH_Martin = self.PLH_perc + # "Metal wall" scaling + self.PLH_metal = 0.044*(self.ne_avg/10.)**1.06*(self.BTOR)**0.54*(self.SLAT[:,-1])*(2/self.AMAIN[:,-1])**(0.965) # LH power threshold + self.PLH_metal_perc = (self.QE[:,-1]+self.QI[:,-1])/self.PLH_metal # Psep/PLH + self.fLH_metal = self.PLH_metal_perc + # Schmidtmayr scaling + self.PLH_schmidtmayr = 0.0325*(self.ne_avg/10.)**1.05*(self.BTOR)**0.68*(self.SLAT[:,-1])**0.93*(2/self.AMAIN[:,-1]) # LH power threshold + self.PLH_schmidt_perc = (self.QI[:,-1])/self.PLH_schmidtmayr # Psep/PLH + self.fLH_Schmidt = self.PLH_schmidt_perc + self.a = self.rmin[:, -1] # LCFS minor radius at OMP rtor_matrix = np.zeros(self.rho.shape) for i in range(rtor_matrix.shape[1]): rtor_matrix[:, i] = self.RTOR[:] - - self.a = self.rmin[:, -1] self.rmaj_LFx = ( rtor_matrix + self.shif + self.rmin - ) # major radius on the low field side + ) # major radius on the low field side def calcProfiles(self): self.getProfiles() @@ -430,7 +706,7 @@ def build_notebook(self, time_aims): self.fn.show() # sys.exit(self.fn.app.exec_()) - self.fn.deleteGui() + # self.fn.deleteGui() def get_rho_tor_indices(self, rho_tor_aims): """ @@ -441,7 +717,7 @@ def get_rho_tor_indices(self, rho_tor_aims): self.rho_tor_aims = rho_tor_aims self.i_rho_tor_aims = [] - for t in range(len(self.t)): + for t in range(self.timesteps): rho_t = np.array(self.rho[t, :]) ROC_t = self.ROC[t] rho_tor_t = rho_t / ROC_t @@ -478,7 +754,7 @@ def make_temporal_plots(self, axis, param, rho_tor_aims, linestyle="solid"): for i in range(self.i_rho_tor_aims.shape[1]): param_list = [] - for t in range(len(self.t)): + for t in range(self.timesteps): i_rho_tor = self.i_rho_tor_aims[t, i] param_list.append(param[t, i_rho_tor]) @@ -521,28 +797,30 @@ def plot_temp(self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) ## Make temporal figures ## self.axTet = fig.add_subplot(2, 2, 1) self.make_temporal_plots(self.axTet, self.Te, rho_tor_aims) - - self.axTet.set_ylabel("Te (keV)") + self.axTet.set_xlabel("Time (s)") + self.axTet.set_ylabel("$T_e$ (keV)") self.axTit = fig.add_subplot(2, 2, 3) self.make_temporal_plots(self.axTit, self.Ti, rho_tor_aims) - self.axTit.set_xlabel("Time") - self.axTit.set_ylabel("Ti (keV)") + self.axTit.set_xlabel("Time (s)") + self.axTit.set_ylabel("$T_i$ (keV)") - plt.legend(title=r"$\rho_{tor}$") + plt.legend(title=r"$\rho_{tor}$",loc='upper left') #, bbox_to_anchor=(1, 1)) ## Make radial figures ## self.axTer = fig.add_subplot(2, 2, 2) self.make_radial_plots(self.axTer, self.Te, time_aims) - self.axTer.set_ylabel("Te (keV)") + self.axTer.set_ylabel("$T_e$ (keV)") self.axTir = fig.add_subplot(2, 2, 4) self.make_radial_plots(self.axTir, self.Ti, time_aims) - self.axTir.set_ylabel("Ti (keV)") + self.axTir.set_ylabel("$T_i$ (keV)") - plt.legend(title="Times") + plt.legend(title="Times") #, bbox_to_anchor=(1, 1)) + fig.tight_layout() + def plot_gradients( self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] ): @@ -552,34 +830,36 @@ def plot_gradients( ## Make temporal figures ## self.axaLTet = fig.add_subplot(2, 3, 1) self.make_temporal_plots(self.axaLTet, self.aLTe, rho_tor_aims) - self.axaLTet.set_ylabel("aLTe") - plt.legend(title=r"$\rho_{tor}$") + self.axaLTet.set_ylabel("$a\\nabla T_e/T_e$") + plt.legend(title=r"$\rho_{tor}$",loc='upper left') #, bbox_to_anchor=(1, 1)) self.axaLTit = fig.add_subplot(2, 3, 2) self.make_temporal_plots(self.axaLTit, self.aLTi, rho_tor_aims) - self.axaLTit.set_ylabel("aLTi") - plt.legend(title=r"$\rho_{tor}$") + self.axaLTit.set_ylabel("$a\\nabla T_i/T_i$") + #plt.legend(title=r"$\rho_{tor}$",loc='upper left') #, bbox_to_anchor=(1, 1)) self.axaLnet = fig.add_subplot(2, 3, 3) self.make_temporal_plots(self.axaLnet, self.aLne, rho_tor_aims) - self.axaLnet.set_ylabel("aLne") - plt.legend(title=r"$\rho_{tor}$") + self.axaLnet.set_ylabel("$a\\nabla n_e/n_e$") + #plt.legend(title=r"$\rho_{tor}$",loc='upper left') #, bbox_to_anchor=(1, 1)) ##Make radial figures ## self.axaLTer = fig.add_subplot(2, 3, 4) self.make_radial_plots(self.axaLTer, self.aLTe, time_aims) - self.axaLTer.set_ylabel("aLTe") - plt.legend(title="Times") + self.axaLTer.set_ylabel("$a\\nabla T_e/T_e$") + plt.legend(title="Times") #, bbox_to_anchor=(1, 1)) self.axaLTir = fig.add_subplot(2, 3, 5) self.make_radial_plots(self.axaLTir, self.aLTi, time_aims) - self.axaLTir.set_ylabel("aLTi") - plt.legend(title="Times") + self.axaLTir.set_ylabel("$a\\nabla T_i/T_i$") + #plt.legend(title="Times") #, bbox_to_anchor=(1, 1)) self.axaLner = fig.add_subplot(2, 3, 6) self.make_radial_plots(self.axaLner, self.aLne, time_aims) - self.axaLner.set_ylabel("aLne") - plt.legend(title="Times") + self.axaLner.set_ylabel("$a\\nabla n_e/n_e$") + #plt.legend(title="Times") #, bbox_to_anchor=(1, 1)) + + #fig.tight_layout() def plot_density(self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]): fig = self.fn.add_figure(label="Density Profiles") @@ -587,12 +867,12 @@ def plot_density(self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0. # Make temporal figures self.axnet = fig.add_subplot(2, 3, 1) self.make_temporal_plots(self.axnet, self.ne, rho_tor_aims) - self.axnet.set_ylabel("Density [1/m^3]") - #plt.legend(title=r"$\rho_{tor}$") + self.axnet.set_ylabel("Density [$10^{19}/m^3$]") + plt.legend(title=r"$\rho_{tor}$",loc='upper left') #, bbox_to_anchor=(1, 1)) self.axCut = fig.add_subplot(2, 3, 2) self.make_temporal_plots(self.axCut, self.Cu, rho_tor_aims) - self.axCut.set_ylabel("J [MA/m^3]") + self.axCut.set_ylabel("J [$MA/m^2$]") self.axqt = fig.add_subplot(2, 3, 3) self.make_temporal_plots(self.axqt, self.q, rho_tor_aims) @@ -601,18 +881,20 @@ def plot_density(self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0. # Make radial figures self.axner = fig.add_subplot(2, 3, 4) - self.axner.set_ylabel("Density (1/m^3)") + self.axner.set_ylabel("Density ($10^{19}/m^3$)") self.make_radial_plots(self.axner, self.ne, time_aims) self.axCur = fig.add_subplot(2, 3, 5) - self.axCur.set_ylabel("J[MA/m^3]") + self.axCur.set_ylabel("J [$MA/m^2$]") self.make_radial_plots(self.axCur, self.Cu, time_aims) - plt.legend(title="Times") + plt.legend(title="Times") #, bbox_to_anchor=(1, 1)) self.axqr = fig.add_subplot(2, 3, 6) self.make_radial_plots(self.axqr, self.q, time_aims) self.axqr.set_ylabel("q") + #fig.tight_layout() + def plot_powers_t( self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] ): @@ -623,24 +905,30 @@ def plot_powers_t( # Make temporal figures self.axPEt = fig.add_subplot(2, 3, 1) self.make_temporal_plots(self.axPEt, self.PE, rho_tor_aims) - self.axPEt.set_ylabel("PE (MW/m^3)") - plt.legend(title=r"$\rho_{tor}$") + self.axPEt.set_ylabel("$P_E$ ($MW/m^3$)") + plt.legend(title=r"$\rho_{tor}$",loc='upper left') #, bbox_to_anchor=(1, 1)) self.axPIt = fig.add_subplot(2, 3, 2) self.make_temporal_plots(self.axPIt, self.PI, rho_tor_aims) - self.axPIt.set_ylabel("PI (MW/m^3)") + self.axPIt.set_ylabel("$P_I$ ($MW/m^3$)") self.axPBMt = fig.add_subplot(2, 3, 3) self.make_temporal_plots(self.axPBMt, self.PEBM + self.PIBM, rho_tor_aims) - self.axPBMt.set_ylabel("Total NBI (MW/m^3)") + self.axPBMt.set_ylabel("Total NBI ($MW/m^3$)") self.axPECRt = fig.add_subplot(2, 3, 4) self.make_temporal_plots(self.axPECRt, self.PEECR, rho_tor_aims) - self.axPECRt.set_ylabel("Total ECH (MW/m^3)") + self.axPECRt.set_ylabel("Total ECH ($MW/m^3$)") self.axPRADt = fig.add_subplot(2, 3, 5) self.make_temporal_plots(self.axPRADt, self.PRAD, rho_tor_aims) - self.axPRADt.set_ylabel("PRAD (MW/m^3)") + self.axPRADt.set_ylabel("$P_{RAD}$ ($MW/m^3$)") + + self.axPFUSt = fig.add_subplot(2, 3, 6) + self.make_temporal_plots(self.axPFUSt, (self.PEDT+self.PIDT)*5, rho_tor_aims) + self.axPFUSt.set_ylabel("$P_{FUS}$ ($MW/m^3$)") + + #fig.tight_layout() def plot_powers_r( self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] @@ -651,24 +939,30 @@ def plot_powers_r( # Make temporal figures self.axPEr = fig.add_subplot(2, 3, 1) self.make_radial_plots(self.axPEr, self.PE, time_aims) - self.axPEr.set_ylabel("PE (MW/m^3)") - plt.legend(title="Times [s]") + self.axPEr.set_ylabel("$P_E$ ($MW/m^3$)") + plt.legend(title="Times [s]") #, bbox_to_anchor=(1, 1)) self.axPIr = fig.add_subplot(2, 3, 2) self.make_radial_plots(self.axPIr, self.PI, time_aims) - self.axPIr.set_ylabel("PI (MW/m^3)") + self.axPIr.set_ylabel("$P_I$ ($MW/m^3$)") self.axPBMr = fig.add_subplot(2, 3, 3) self.make_radial_plots(self.axPBMr, self.PEBM + self.PIBM, time_aims) - self.axPBMr.set_ylabel("Total NBI (MW/m^3)") + self.axPBMr.set_ylabel("Total NBI ($MW/m^3$)") self.axPECRr = fig.add_subplot(2, 3, 4) self.make_radial_plots(self.axPECRr, self.PEECR, time_aims) - self.axPECRr.set_ylabel("Total ECH (MW/m^3)") + self.axPECRr.set_ylabel("Total ECH ($MW/m^3$)") self.axPRADr = fig.add_subplot(2, 3, 5) self.make_radial_plots(self.axPRADr, self.PRAD, time_aims) - self.axPRADr.set_ylabel("PRAD (MW/m^3)") + self.axPRADr.set_ylabel("$P_{rad}$ ($MW/m^3$)") + + self.axPFUSr = fig.add_subplot(2, 3, 6) + self.make_radial_plots(self.axPFUSr, (self.PEDT+self.PIDT)*5, time_aims) + self.axPFUSr.set_ylabel("$P_{fus}$ ($MW/m^3$)") + + #fig.tight_layout() def plot_chi_e(self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]): fig = self.fn.add_figure(label="Chi_e") @@ -676,34 +970,36 @@ def plot_chi_e(self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] # Make temporal figures self.axchi_et = fig.add_subplot(2, 3, 1) self.make_temporal_plots(self.axchi_et, self.chi_e_TGLF, rho_tor_aims) - self.axchi_et.set_ylabel("TGLF (m^2/s)") + self.axchi_et.set_ylabel("TGLF ($m^2/s$)") self.axchi_e_smoothedt = fig.add_subplot(2, 3, 2) self.make_temporal_plots( self.axchi_e_smoothedt, self.chi_e_TGLF_smoothed, rho_tor_aims ) - self.axchi_e_smoothedt.set_ylabel("Smoothed (m^2/s)") + self.axchi_e_smoothedt.set_ylabel("Smoothed ($m^2/s$)") self.axHEt = fig.add_subplot(2, 3, 3) self.make_temporal_plots(self.axHEt, self.HE, rho_tor_aims) - self.axHEt.set_ylabel("ASTRA (m^2/s)") - plt.legend(title="Times [s]") + self.axHEt.set_ylabel("ASTRA ($m^2/s$)") + plt.legend(title="Times [s]") #, bbox_to_anchor=(1, 1)) # Make radial figures self.axchi_er = fig.add_subplot(2, 3, 4) self.make_radial_plots(self.axchi_er, self.chi_e_TGLF, time_aims) - self.axchi_er.set_ylabel("TGLF (m^2/s)") - plt.legend(title=r"$\rho_{tor}$") + self.axchi_er.set_ylabel("TGLF ($m^2/s$)") + plt.legend(title=r"$\rho_{tor}$",loc='upper left') #, bbox_to_anchor=(1, 1)) self.axchi_e_smoothedr = fig.add_subplot(2, 3, 5) self.make_radial_plots( self.axchi_e_smoothedr, self.chi_e_TGLF_smoothed, time_aims ) - self.axchi_e_smoothedr.set_ylabel("Smoothed (m^2/s)") + self.axchi_e_smoothedr.set_ylabel("Smoothed ($m^2/s$)") self.axHEr = fig.add_subplot(2, 3, 6) self.make_radial_plots(self.axHEr, self.HE, time_aims) - self.axHEr.set_ylabel("ASTRA (m^2/s)") + self.axHEr.set_ylabel("ASTRA ($m^2/s$)") + + #fig.tight_layout() def plot_chi_i(self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]): fig = self.fn.add_figure(label="Chi_i") @@ -711,34 +1007,36 @@ def plot_chi_i(self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] # Make temporal figures self.axchi_it = fig.add_subplot(2, 3, 1) self.make_temporal_plots(self.axchi_it, self.chi_i_TGLF, rho_tor_aims) - self.axchi_it.set_ylabel("TGLF (m^2/s)") + self.axchi_it.set_ylabel("TGLF ($m^2/s$)") self.axchi_i_smoothedt = fig.add_subplot(2, 3, 2) self.make_temporal_plots( self.axchi_i_smoothedt, self.chi_i_TGLF_smoothed, rho_tor_aims ) - self.axchi_i_smoothedt.set_ylabel("Smoothed (m^2/s)") + self.axchi_i_smoothedt.set_ylabel("Smoothed ($m^2/s$)") self.axXIt = fig.add_subplot(2, 3, 3) self.make_temporal_plots(self.axXIt, self.XI, rho_tor_aims) - self.axXIt.set_ylabel("ASTRA (m^2/s)") - plt.legend(title="Times [s]") + self.axXIt.set_ylabel("ASTRA ($m^2/s$)") + plt.legend(title="Times [s]") #, bbox_to_anchor=(1, 1)) # Make radial figures self.axchi_ir = fig.add_subplot(2, 3, 4) self.make_radial_plots(self.axchi_ir, self.chi_i_TGLF, time_aims) - self.axchi_ir.set_ylabel("TGLF (m^2/s)") - plt.legend(title=r"$\rho_{tor}$") + self.axchi_ir.set_ylabel("TGLF ($m^2/s$)") + plt.legend(title=r"$\rho_{tor}$",loc='upper left') #, bbox_to_anchor=(1, 1)) self.axchi_i_smoothedr = fig.add_subplot(2, 3, 5) self.make_radial_plots( self.axchi_i_smoothedr, self.chi_i_TGLF_smoothed, time_aims ) - self.axchi_i_smoothedr.set_ylabel("Smoothed (m^2/s)") + self.axchi_i_smoothedr.set_ylabel("Smoothed ($m^2/s$)") self.axXIr = fig.add_subplot(2, 3, 6) self.make_radial_plots(self.axXIr, self.XI, time_aims) - self.axXIr.set_ylabel("ASTRA (m^2/s)") + self.axXIr.set_ylabel("ASTRA ($m^2/s$)") + + #fig.tight_layout() def plot_heat_fluxes( self, time_aims, rho_tor_aims=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] @@ -754,22 +1052,24 @@ def plot_heat_fluxes( self.make_temporal_plots(self.axQit, self.Qi, rho_tor_aims) self.axQit.set_ylabel("Qi (MW)") - plt.legend(title=r"$\rho_{tor}$") + plt.legend(title=r"$\rho_{tor}$",loc='upper left') #, bbox_to_anchor=(1, 1)) # Make radial figures self.axQer = fig.add_subplot(2, 2, 3) - self.axQer.set_ylabel("Qe(MW)") + self.axQer.set_ylabel("Qe (MW)") self.make_radial_plots(self.axQer, self.Qe, time_aims) self.axQir = fig.add_subplot(2, 2, 4) self.axQir.set_ylabel("Qi (MW)") self.make_radial_plots(self.axQir, self.Qi, time_aims) - plt.legend(title="Times") + plt.legend(title="Times") #, bbox_to_anchor=(1, 1)) + + #fig.tight_layout() def plot_flux_matching( - self, time_aims, rho_tor_aims=[0.5], qe_lim=[0, 5], qi_lim=[0, 5], qn_lim=[0, 5] + self, time_aims, rho_tor_aims=[0.3, 0.5, 0.7], qe_lim=[0, 5], qi_lim=[0, 5], qn_lim=[0, 5] ): last_time = [time_aims[-1]] @@ -785,26 +1085,30 @@ def plot_flux_matching( # self.make_temporal_plots(self.axQet, self.QETOT, rho_tor_aims) self.axQet.set_ylabel("Qe") # self.axQet.set_ylim(qe_lim) - plt.legend(["Qe", "Qetot"], title=r"$\rho_{tor}$ = " + str(rho_tor_aims[0])) + #plt.legend(["Qe", "Qetot"], title=r"$\rho_{tor}$ = " + str(rho_tor_aims[0])) #, bbox_to_anchor=(1, 1)) + plt.legend(title=r"$\rho_{tor}$",loc='upper left') self.axQit = fig.add_subplot(2, 3, 2) self.make_temporal_plots(self.axQit, self.Qi, rho_tor_aims, linestyle="dashed") # self.make_temporal_plots(self.axQit, self.QITOT, rho_tor_aims) self.axQit.set_ylabel("Qi") # self.axQit.set_ylim(qi_lim) - plt.legend(["Qi", "Qitot"], title=r"$\rho_{tor}$ = " + str(rho_tor_aims[0])) + #plt.legend(["Qi", "Qitot"], title=r"$\rho_{tor}$ = " + str(rho_tor_aims[0])) #, bbox_to_anchor=(1, 1)) + plt.legend(title=r"$\rho_{tor}$",loc='upper left') self.axQnt = fig.add_subplot(2, 3, 3) self.make_temporal_plots( self.axQnt, self.Qn / self.G11, rho_tor_aims, linestyle="dashed" ) # self.make_temporal_plots(self.axQnt, self.QNTOT / self.G11, rho_tor_aims) - self.axQnt.set_ylabel("Qn") + self.axQnt.set_ylabel("Qn/volume") # self.axQnt.set_ylim(qn_lim) - plt.legend( - ["Qn/volume", "Qntot/volume"], - title=r"$\rho_{tor}$ = " + str(rho_tor_aims[0]), - ) + #plt.legend( + # ["Qn/volume", "Qntot/volume"], + # title=r"$\rho_{tor}$ = " + str(rho_tor_aims[0]), bbox_to_anchor=(1, 1), + #) + plt.legend(title=r"$\rho_{tor}$",loc='upper left') + self.axQnt.axhline(y=0.,linestyle='-.',c='k') ## Make radial figures ## @@ -814,7 +1118,7 @@ def plot_flux_matching( self.axQer.set_xlim([0, self.boundary]) self.axQer.set_ylim(qe_lim) self.axQer.set_ylabel("Qe") - plt.legend(["Qe", "Qetot"], title="Time = " + str(last_time[0])) + plt.legend(["Qe", "Qetot"], title="Time = " + str(last_time[0])) #, bbox_to_anchor=(1, 1)) self.axQir = fig.add_subplot(2, 3, 5) self.make_radial_plots(self.axQir, self.Qi, last_time, linestyle="dashed") @@ -822,7 +1126,7 @@ def plot_flux_matching( self.axQir.set_xlim([0, self.boundary]) self.axQir.set_ylim(qi_lim) self.axQir.set_ylabel("Qi") - plt.legend(["Qi", "Qitot"], title="Time = " + str(last_time[0])) + plt.legend(["Qi", "Qitot"], title="Time = " + str(last_time[0])) #, bbox_to_anchor=(1, 1)) self.axQnr = fig.add_subplot(2, 3, 6) self.make_radial_plots(self.axQnr, self.Qn, last_time, linestyle="dashed") @@ -830,7 +1134,9 @@ def plot_flux_matching( self.axQnr.set_xlim([0, self.boundary]) self.axQnr.set_ylim(qn_lim) self.axQnr.set_ylabel("Qn") - plt.legend(["Qn/volume", "Qntot/volume"], title="Time = " + str(last_time[0])) + plt.legend(["Qn/volume", "Qntot/volume"], title="Time = " + str(last_time[0])) #, bbox_to_anchor=(1, 1)) + + #fig.tight_layout() def plot_pulse( self, @@ -839,19 +1145,388 @@ def plot_pulse( ): fig = self.fn.add_figure(label="Pulse") - ## Make temporal figures ## - - self.axCAR53t = fig.add_subplot(2, 2, 1) - self.make_temporal_plots(self.axCAR53t, self.CAR53, rho_tor_aims) - self.axCAR53t.set_ylabel("Pulse [MW/m^3]") - plt.legend(title=r"$\rho_{tor}$") - - ## Make radial figures - - self.axCAR53r = fig.add_subplot(2, 2, 3) - self.make_radial_plots(self.axCAR53r, self.CAR53, time_aims) - self.axCAR53r.set_ylabel("Pulse [MW/m^3]") - plt.legend(title="Times") + ## plot performance + + ''' + self.axQ = fig.add_subplot(2, 3, 1) + self.axQ.plot(self.t, self.Pfus[:,-1],label="$P_{fus}$ (MW)") + self.axQ.plot(self.t, self.Q,label="Q") + self.axQ.set_yscale('log') + self.axQ.set_ylabel("performance parameters") + self.axQ.set_xlabel("time (s)") + plt.legend() + ''' + + ## plot EPED stuff + + self.axEPED = fig.add_subplot(2, 3, 1) + self.axEPED.plot(self.t, self.ZRD50/10,label="$n_{e,top}$ ($10^{20}m^{-3}$)") + self.axEPED.plot(self.t, self.ZRD49/1.e3,label="$p_{top}$ (MPa)") + self.axEPED.set_ylabel("EPED values") + self.axEPED.set_xlabel("time (s)") + plt.legend() + + ## plot confinement + + self.axtau = fig.add_subplot(2, 3, 3) + self.axtau.plot(self.t, self.tauE[:,-1],label="$\\tau_{e}$ (s)") + self.axtau.plot(self.t, self.H98,label="H98") + self.axtau.set_ylabel("performance parameters") + self.axtau.set_xlabel("time (s)") + plt.legend() + + ## plot shaping and q values + + self.axq = fig.add_subplot(2, 3, 2) + self.axq.plot(self.t, self.q95/self.q95[0], label='$q_{95}$ normalized') + self.axq.plot(self.t, self.q_onaxis/self.q_onaxis[0], label='q0 normalized') + self.axq.plot(self.t, self.kappa95, label='$k_{95}$') + self.axq.plot(self.t, self.delta95, label='$\\delta_{95}$') + self.axq.plot(self.t, self.trian, label='$\\delta_{sep}$') + self.axq.plot(self.t, self.elong, label='$k_{sep}$') + self.axq.set_ylabel("shaping and safety factor") + self.axq.set_xlabel("time (s)") + plt.legend() + + ## plot beta and averaged kinetic profiles + + self.axglob = fig.add_subplot(2, 3, 4) + self.axglob.plot(self.t, self.betaN/self.betaN[0],label="$\\beta_N$ normalized") + self.axglob.plot(self.t, self.ne_avg/self.ne_avg[0],label="$n_{e,avg}$ normalized") + self.axglob.plot(self.t, self.Te_avg/self.Te_avg[0],label="$T_{e,avg}$ normalized") + self.axglob.plot(self.t, self.Ti_avg/self.Ti_avg[0],label="$T_{i,avg}$ normalized") + self.axglob.set_ylabel("global parameters") + self.axglob.set_xlabel("time (s)") + #self.axglob.set_yscale("log") + plt.legend() + + ## plot Hmode parameters + + self.axPLH = fig.add_subplot(2, 3, 5) + self.axPLH.plot(self.t, self.PLH_perc,label="Martin") + self.axPLH.plot(self.t, self.PLH_schmidt_perc,label="Schmidtmayr") + self.axPLH.set_ylabel("$P_{sep}/P_{LH}$") + self.axPLH.set_xlabel("time (s)") + plt.legend() + + ## plot total powers + + self.axP = fig.add_subplot(2, 3, 6) + self.axP.plot(self.t, self.QDT[:,-1]*5,label="fusion") + self.axP.plot(self.t, self.QICRH[:,-1],label="ICRH") + self.axP.plot(self.t, self.QECRH[:,-1],label="ECRH") + self.axP.plot(self.t, self.QNBI[:,-1],label="NBI") + self.axP.plot(self.t, self.QRAD[:,-1],label="radiation") + self.axP.plot(self.t, self.QOH[:,-1],label="ohmic") + self.axP.plot(self.t, self.QETOT[:,-1],label="electron total") + self.axP.plot(self.t, self.QITOT[:,-1],label="ion total") + self.axP.set_ylabel("P (MW)") + self.axP.set_xlabel("time (s)") + plt.legend() + + #fig.tight_layout() + + def plot_2_pulses( + self,second_pulse, + time_aims=[10.20, 10.201, 10.2015, 10.202, 10.210, 10.212], + rho_tor_aims=[0.2, 0.25, 0.3, 0.35, 0.4, 0.5, 0.6], + ): + self.getProfiles() + second_pulse.getProfiles() + # time_index = time_index(time) + name = "ASTRA CDF Viewer" + self.fn = FigureNotebook(name,vertical=False) + fig = self.fn.add_figure(label="Solid = first pulse, dashed = second pulse") + + ## plot performance + + ''' + self.axQ = fig.add_subplot(2, 3, 1) + self.axQ.plot(self.t, self.Pfus[:,-1],label="$P_{fus}$ (MW)") + self.axQ.plot(self.t, self.Q,label="Q") + self.axQ.set_yscale('log') + self.axQ.set_ylabel("performance parameters") + self.axQ.set_xlabel("time (s)") + plt.legend() + ''' + + ## plot EPED stuff + + self.axEPED = fig.add_subplot(2, 3, 1) + self.axEPED.plot(self.t, self.ZRD50/10,label="$n_{e,top}$ ($10^{20}m^{-3}$)",c='b') + self.axEPED.plot(self.t, self.ZRD49/1.e3,label="$p_{top}$ (MPa)",c='r') + self.axEPED.plot(second_pulse.t, second_pulse.ZRD50/10,c='b',linestyle='--') + self.axEPED.plot(second_pulse.t, second_pulse.ZRD49/1.e3,c='r',linestyle='--') + self.axEPED.set_ylabel("EPED values") + self.axEPED.set_xlabel("time (s)") + plt.legend() + + ## plot confinement + + self.axtau = fig.add_subplot(2, 3, 3) + self.axtau.plot(self.t, self.tauE[:,-1],label="$\\tau_{e}$ (s)",c='b') + self.axtau.plot(self.t, self.H98,label="H98",c='r') + self.axtau.plot(second_pulse.t, second_pulse.tauE[:,-1],linestyle='--') + self.axtau.plot(second_pulse.t, second_pulse.H98,c='r',linestyle='--') + self.axtau.set_ylabel("performance parameters") + self.axtau.set_xlabel("time (s)") + self.axtau.axhline(y=1.0, ls='-.',c='k') + self.axtau.set_ylim(bottom=0) + plt.legend() + + ## plot shaping and q values + + self.axq = fig.add_subplot(2, 3, 2) + self.axq.plot(self.t, self.kappa95, label='$k_{95}$',c='b') + self.axq.plot(self.t, self.delta95, label='$\\delta_{95}$',c='r') + self.axq.plot(self.t, self.trian, label='$\\delta_{sep}$',c='g') + self.axq.plot(self.t, self.elong, label='$k_{sep}$',c='k') + self.axq.plot(self.t, self.q95, label='$q_{95}$',c='y') + self.axq.plot(self.t, self.q_onaxis, label='q0',c='orange') + self.axq.plot(second_pulse.t, second_pulse.kappa95,c='b',linestyle='--') + self.axq.plot(second_pulse.t, second_pulse.delta95,c='r',linestyle='--') + self.axq.plot(second_pulse.t, second_pulse.trian,c='g',linestyle='--') + self.axq.plot(second_pulse.t, second_pulse.elong,c='k',linestyle='--') + self.axq.plot(second_pulse.t, second_pulse.q95,c='y',linestyle='--') + self.axq.plot(second_pulse.t, second_pulse.q_onaxis,c='orange',linestyle='--') + self.axq.set_ylabel("shaping and safety factor") + self.axq.set_xlabel("time (s)") + plt.legend() + + ## plot beta and averaged kinetic profiles + + self.axglob = fig.add_subplot(2, 3, 4) + self.axglob.plot(self.t, self.betaN,label="$\\beta_N$",c='b') + self.axglob.plot(self.t, self.ne_avg,label="$n_{e,avg}$",c='k') + self.axglob.plot(self.t, self.Te_avg,label="$T_{e,avg}$",c='y') + self.axglob.plot(self.t, self.Ti_avg,label="$T_{i,avg}$",c='orange') + self.axglob.plot(self.t, self.ne[:,int(0.2*self.na1[-1])]/self.ne_avg,label="$\\nu_{n_e}$",c='purple') + self.axglob.plot(second_pulse.t, second_pulse.betaN,c='b',linestyle='--') + self.axglob.plot(second_pulse.t, second_pulse.ne_avg,c='k',linestyle='--') + self.axglob.plot(second_pulse.t, second_pulse.Te_avg,c='y',linestyle='--') + self.axglob.plot(second_pulse.t, second_pulse.Ti_avg,c='orange',linestyle='--') + self.axglob.plot(second_pulse.t, second_pulse.ne[:,int(0.2*second_pulse.na1[-1])]/second_pulse.ne_avg,c='purple',linestyle='--') + self.axglob.set_ylabel("global parameters") + self.axglob.set_xlabel("time (s)") + #self.axglob.set_yscale('log') + plt.legend() + + ## plot Hmode parameters + + self.axPLH = fig.add_subplot(2, 3, 5) + self.axPLH.plot(self.t, self.PLH_perc,label="Martin",c='b') + self.axPLH.plot(self.t, self.PLH_schmidt_perc,label="Schmidtmayr",c='r') + self.axPLH.plot(second_pulse.t, second_pulse.PLH_perc,c='b',linestyle='--') + self.axPLH.plot(second_pulse.t, second_pulse.PLH_schmidt_perc,c='r',linestyle='--') + self.axPLH.set_ylabel("$P_{sep}/P_{LH}$") + self.axPLH.set_xlabel("time (s)") + self.axPLH.axhline(y=1.0, ls='-.',c='k') + self.axPLH.set_ylim(bottom=0) + plt.legend() + + ## plot total powers + + self.axP = fig.add_subplot(2, 3, 6) + self.axP.plot(self.t, self.QDT[:,-1]*5,label="fusion",c='b') + self.axP.plot(self.t, self.QICRH[:,-1],label="ICRH",c='r') + self.axP.plot(self.t, self.QECRH[:,-1],label="ECRH",c='g') + self.axP.plot(self.t, self.QNBI[:,-1],label="NBI",c='k') + self.axP.plot(self.t, self.QRAD[:,-1],label="radiation",c='y') + self.axP.plot(self.t, self.QOH[:,-1],label="ohmic",c='orange') + self.axP.plot(self.t, self.QETOT[:,-1],label="electron total",c='purple') + self.axP.plot(self.t, self.QITOT[:,-1],label="ion total",c='cyan') + self.axP.plot(second_pulse.t, second_pulse.QDT[:,-1]*5,c='b',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QICRH[:,-1],c='r',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QECRH[:,-1],c='g',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QNBI[:,-1],c='k',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QRAD[:,-1],c='y',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QOH[:,-1],c='orange',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QETOT[:,-1],c='purple',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QITOT[:,-1],c='cyan',linestyle='--') + self.axP.set_ylabel("P (MW)") + self.axP.set_xlabel("time (s)") + self.axP.axhline(y=500, ls='-.',c='k') + self.axP.axhline(y=1000, ls='-.',c='k') + #self.axP.set_yscale('log') + plt.legend() + + GRAPHICStools.addDenseAxis(self.axP) + GRAPHICStools.addDenseAxis(self.axtau) + GRAPHICStools.addDenseAxis(self.axglob) + GRAPHICStools.addDenseAxis(self.axPLH) + GRAPHICStools.addDenseAxis(self.axq) + GRAPHICStools.addDenseAxis(self.axEPED) + + GRAPHICStools.addLegendApart(self.axP) + GRAPHICStools.addLegendApart(self.axtau) + GRAPHICStools.addLegendApart(self.axglob) + GRAPHICStools.addLegendApart(self.axPLH) + GRAPHICStools.addLegendApart(self.axq) + GRAPHICStools.addLegendApart(self.axEPED) + + def plot_3_pulses( + self,second_pulse,third_pulse, + time_aims=[10.20, 10.201, 10.2015, 10.202, 10.210, 10.212], + rho_tor_aims=[0.2, 0.25, 0.3, 0.35, 0.4, 0.5, 0.6], + ): + self.getProfiles() + second_pulse.getProfiles() + third_pulse.getProfiles() + # time_index = time_index(time) + name = "ASTRA CDF Viewer" + self.fn = FigureNotebook(name,vertical=False) + fig = self.fn.add_figure(label="Solid = first pulse, dashed = second pulse, dots= third pulse") + + ## plot performance + + ''' + self.axQ = fig.add_subplot(2, 3, 1) + self.axQ.plot(self.t, self.Pfus[:,-1],label="$P_{fus}$ (MW)") + self.axQ.plot(self.t, self.Q,label="Q") + self.axQ.set_yscale('log') + self.axQ.set_ylabel("performance parameters") + self.axQ.set_xlabel("time (s)") + plt.legend() + ''' + + ## plot EPED stuff + + self.axEPED = fig.add_subplot(2, 3, 1) + self.axEPED.plot(self.t, self.ZRD50/10,label="$n_{e,top}$ ($10^{20}m^{-3}$)",c='b') + self.axEPED.plot(self.t, self.ZRD49/1.e3,label="$p_{top}$ (MPa)",c='r') + self.axEPED.plot(second_pulse.t, second_pulse.ZRD50/10,c='b',linestyle='--') + self.axEPED.plot(second_pulse.t, second_pulse.ZRD49/1.e3,c='r',linestyle='--') + self.axEPED.plot(third_pulse.t, third_pulse.ZRD50/10,c='b',linestyle='-.') + self.axEPED.plot(third_pulse.t, third_pulse.ZRD49/1.e3,c='r',linestyle='-.') + self.axEPED.set_ylabel("EPED values") + self.axEPED.set_xlabel("time (s)") + plt.legend() + + ## plot confinement + + self.axtau = fig.add_subplot(2, 3, 3) + self.axtau.plot(self.t, self.tauE[:,-1],label="$\\tau_{e}$ (s)",c='b') + self.axtau.plot(self.t, self.H98,label="H98",c='r') + self.axtau.plot(second_pulse.t, second_pulse.tauE[:,-1],linestyle='--') + self.axtau.plot(second_pulse.t, second_pulse.H98,c='r',linestyle='--') + self.axtau.plot(third_pulse.t, third_pulse.tauE[:,-1],linestyle='-.') + self.axtau.plot(third_pulse.t, third_pulse.H98,c='r',linestyle='-.') + self.axtau.set_ylabel("performance parameters") + self.axtau.set_xlabel("time (s)") + self.axtau.axhline(y=1.0, ls='-.',c='k') + self.axtau.set_ylim(bottom=0) + plt.legend() + + ## plot shaping and q values + + self.axq = fig.add_subplot(2, 3, 2) + self.axq.plot(self.t, self.kappa95, label='$k_{95}$',c='b') + self.axq.plot(self.t, self.delta95, label='$\\delta_{95}$',c='r') + self.axq.plot(self.t, self.trian, label='$\\delta_{sep}$',c='g') + self.axq.plot(self.t, self.elong, label='$k_{sep}$',c='k') + self.axq.plot(self.t, self.q95, label='$q_{95}$',c='y') + self.axq.plot(self.t, self.q_onaxis, label='q0',c='orange') + self.axq.plot(second_pulse.t, second_pulse.kappa95,c='b',linestyle='--') + self.axq.plot(second_pulse.t, second_pulse.delta95,c='r',linestyle='--') + self.axq.plot(second_pulse.t, second_pulse.trian,c='g',linestyle='--') + self.axq.plot(second_pulse.t, second_pulse.elong,c='k',linestyle='--') + self.axq.plot(second_pulse.t, second_pulse.q95,c='y',linestyle='--') + self.axq.plot(second_pulse.t, second_pulse.q_onaxis,c='orange',linestyle='--') + self.axq.plot(third_pulse.t, third_pulse.kappa95,c='b',linestyle='-.') + self.axq.plot(third_pulse.t, third_pulse.delta95,c='r',linestyle='-.') + self.axq.plot(third_pulse.t, third_pulse.trian,c='g',linestyle='-.') + self.axq.plot(third_pulse.t, third_pulse.elong,c='k',linestyle='-.') + self.axq.plot(third_pulse.t, third_pulse.q95,c='y',linestyle='-.') + self.axq.plot(third_pulse.t, third_pulse.q_onaxis,c='orange',linestyle='-.') + self.axq.set_ylabel("shaping and safety factor") + self.axq.set_xlabel("time (s)") + plt.legend() + + ## plot beta and averaged kinetic profiles + + self.axglob = fig.add_subplot(2, 3, 4) + self.axglob.plot(self.t, self.betaN,label="$\\beta_N$",c='b') + self.axglob.plot(self.t, self.ne_avg,label="$n_{e,avg}$",c='k') + self.axglob.plot(self.t, self.Te_avg,label="$T_{e,avg}$",c='y') + self.axglob.plot(self.t, self.Ti_avg,label="$T_{i,avg}$",c='orange') + self.axglob.plot(self.t, self.ne[:,int(0.2*self.na1[-1])]/self.ne_avg,label="$\\nu_{n_e}$",c='purple') + self.axglob.plot(second_pulse.t, second_pulse.betaN,c='b',linestyle='--') + self.axglob.plot(second_pulse.t, second_pulse.ne_avg,c='k',linestyle='--') + self.axglob.plot(second_pulse.t, second_pulse.Te_avg,c='y',linestyle='--') + self.axglob.plot(second_pulse.t, second_pulse.Ti_avg,c='orange',linestyle='--') + self.axglob.plot(second_pulse.t, second_pulse.ne[:,int(0.2*second_pulse.na1[-1])]/second_pulse.ne_avg,c='purple',linestyle='--') + self.axglob.plot(third_pulse.t, third_pulse.betaN,c='b',linestyle='-.') + self.axglob.plot(third_pulse.t, third_pulse.ne_avg,c='k',linestyle='-.') + self.axglob.plot(third_pulse.t, third_pulse.Te_avg,c='y',linestyle='-.') + self.axglob.plot(third_pulse.t, third_pulse.Ti_avg,c='orange',linestyle='-.') + self.axglob.plot(third_pulse.t, third_pulse.ne[:,int(0.2*third_pulse.na1[-1])]/third_pulse.ne_avg,c='purple',linestyle='-.') + self.axglob.set_ylabel("global parameters") + self.axglob.set_xlabel("time (s)") + #self.axglob.set_yscale('log') + plt.legend() + + ## plot Hmode parameters + + self.axPLH = fig.add_subplot(2, 3, 5) + self.axPLH.plot(self.t, self.PLH_perc,label="Martin",c='b') + self.axPLH.plot(self.t, self.PLH_schmidt_perc,label="Schmidtmayr",c='r') + self.axPLH.plot(second_pulse.t, second_pulse.PLH_perc,c='b',linestyle='--') + self.axPLH.plot(second_pulse.t, second_pulse.PLH_schmidt_perc,c='r',linestyle='--') + self.axPLH.plot(third_pulse.t, third_pulse.PLH_perc,c='b',linestyle='-.') + self.axPLH.plot(third_pulse.t, third_pulse.PLH_schmidt_perc,c='r',linestyle='-.') + self.axPLH.set_ylabel("$P_{sep}/P_{LH}$") + self.axPLH.set_xlabel("time (s)") + self.axPLH.axhline(y=1.0, ls='-.',c='k') + self.axPLH.set_ylim(bottom=0) + plt.legend() + + ## plot total powers + + self.axP = fig.add_subplot(2, 3, 6) + self.axP.plot(self.t, self.QDT[:,-1]*5,label="fusion",c='b') + self.axP.plot(self.t, self.QICRH[:,-1],label="ICRH",c='r') + self.axP.plot(self.t, self.QECRH[:,-1],label="ECRH",c='g') + self.axP.plot(self.t, self.QNBI[:,-1],label="NBI",c='k') + self.axP.plot(self.t, self.QRAD[:,-1],label="radiation",c='y') + self.axP.plot(self.t, self.QOH[:,-1],label="ohmic",c='orange') + self.axP.plot(self.t, self.QETOT[:,-1],label="electron total",c='purple') + self.axP.plot(self.t, self.QITOT[:,-1],label="ion total",c='cyan') + self.axP.plot(second_pulse.t, second_pulse.QDT[:,-1]*5,c='b',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QICRH[:,-1],c='r',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QECRH[:,-1],c='g',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QNBI[:,-1],c='k',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QRAD[:,-1],c='y',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QOH[:,-1],c='orange',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QETOT[:,-1],c='purple',linestyle='--') + self.axP.plot(second_pulse.t, second_pulse.QITOT[:,-1],c='cyan',linestyle='--') + self.axP.plot(third_pulse.t, third_pulse.QDT[:,-1]*5,c='b',linestyle='-.') + self.axP.plot(third_pulse.t, third_pulse.QICRH[:,-1],c='r',linestyle='-.') + self.axP.plot(third_pulse.t, third_pulse.QECRH[:,-1],c='g',linestyle='-.') + self.axP.plot(third_pulse.t, third_pulse.QNBI[:,-1],c='k',linestyle='-.') + self.axP.plot(third_pulse.t, third_pulse.QRAD[:,-1],c='y',linestyle='-.') + self.axP.plot(third_pulse.t, third_pulse.QOH[:,-1],c='orange',linestyle='-.') + self.axP.plot(third_pulse.t, third_pulse.QETOT[:,-1],c='purple',linestyle='-.') + self.axP.plot(third_pulse.t, third_pulse.QITOT[:,-1],c='cyan',linestyle='-.') + self.axP.set_ylabel("P (MW)") + self.axP.set_xlabel("time (s)") + self.axP.axhline(y=500, ls='-.',c='k') + self.axP.axhline(y=1000, ls='-.',c='k') + #self.axP.set_yscale('log') + plt.legend() + + GRAPHICStools.addDenseAxis(self.axP) + GRAPHICStools.addDenseAxis(self.axtau) + GRAPHICStools.addDenseAxis(self.axglob) + GRAPHICStools.addDenseAxis(self.axPLH) + GRAPHICStools.addDenseAxis(self.axq) + GRAPHICStools.addDenseAxis(self.axEPED) + + GRAPHICStools.addLegendApart(self.axP) + GRAPHICStools.addLegendApart(self.axtau) + GRAPHICStools.addLegendApart(self.axglob) + GRAPHICStools.addLegendApart(self.axPLH) + GRAPHICStools.addLegendApart(self.axq) + GRAPHICStools.addLegendApart(self.axEPED) ### Operations: Not part of the CDF class ### def gradNorm(CDFc, varData, specialDerivative=None): @@ -883,4 +1558,4 @@ def derivativeVar(CDFc, varData, specialDerivative=None, onlyOneTime=False): else: grad = MATHtools.deriv(specialDerivative, varData) - return grad \ No newline at end of file + return grad diff --git a/src/mitim_tools/astra_tools/ASTRAtools.py b/src/mitim_tools/astra_tools/ASTRAtools.py index b5aacac0..62a0a88c 100644 --- a/src/mitim_tools/astra_tools/ASTRAtools.py +++ b/src/mitim_tools/astra_tools/ASTRAtools.py @@ -17,7 +17,7 @@ def __init__(self): pass - def prep(self,folder,file_repo = __mitimroot__ / 'templates' / 'ASTRA8_REPO.tar.gz'): + def prep(self,folder,file_repo = __mitimroot__ / 'templates' / 'ASTRA8_REPO_MIT.tar'): # Folder is the local folder where ASTRA things are, e.g. ~/scratch/testAstra/ @@ -28,19 +28,21 @@ def prep(self,folder,file_repo = __mitimroot__ / 'templates' / 'ASTRA8_REPO.tar. IOtools.askNewFolder(self.folder) # Move files - shutil.copy2(self.file_repo, self.folder / 'ASTRA8_REPO.tar.gz') + shutil.copy2(self.file_repo, self.folder / 'ASTRA8_REPO_MIT.tar') # untar with tarfile.open( - self.folder / "ASTRA8_REPO.tar.gz", "r" + self.folder / "ASTRA8_REPO_MIT.tar", "r" ) as tar: tar.extractall(path=self.folder) - (self.folder / "ASTRA8_REPO.tar.gz").unlink(missing_ok=True) + (self.folder / "ASTRA8_REPO_MIT.tar").unlink(missing_ok=True) # Define basic controls - self.equfile = 'fluxes' - self.expfile = 'aug34954' + #self.equfile = 'fluxes' + #self.expfile = 'aug34954' + self.equfile = 'V2B' + self.expfile = 'V2B' def run(self, t_ini, @@ -71,7 +73,7 @@ def run(self, # What to run self.command_to_run_astra = f''' cd {self.astra_job.folderExecution}/{name} -scripts/as_exe -m {self.equfile} -v {self.expfile} -s {self.t_ini} -e {self.t_end} -dev aug -batch +exe/as_exe -m {self.equfile} -v {self.expfile} -s {self.t_ini} -e {self.t_end} -dev aug -batch ''' self.shellPreCommand = f'cd {self.astra_job.folderExecution}/{name} && ./install.sh' @@ -80,7 +82,8 @@ def run(self, # Execute # --------------------------------------------- - self.output_folder = name / '.res' / 'ncdf' + #self.output_folder = name / '.res' / 'ncdf' + self.output_folder = name / 'ncdf_out' self.astra_job.prep( self.command_to_run_astra, diff --git a/src/mitim_tools/gacode_tools/PROFILEStools.py b/src/mitim_tools/gacode_tools/PROFILEStools.py index 88b53a15..48de2a6f 100644 --- a/src/mitim_tools/gacode_tools/PROFILEStools.py +++ b/src/mitim_tools/gacode_tools/PROFILEStools.py @@ -5,13 +5,12 @@ import matplotlib.pyplot as plt from collections import OrderedDict from mitim_tools.misc_tools import GRAPHICStools, MATHtools, PLASMAtools, IOtools -from mitim_modules.powertorch.physics import GEOMETRYtools, CALCtools +from mitim_modules.powertorch.utils import CALCtools from mitim_tools.gs_tools import GEQtools from mitim_tools.gacode_tools import NEOtools -from mitim_tools.gacode_tools.utils import GACODEdefaults +from mitim_tools.gacode_tools.utils import GACODEdefaults, GEOMETRYtools from mitim_tools.transp_tools import CDFtools from mitim_tools.transp_tools.utils import TRANSPhelpers -from mitim_tools.gacode_tools.utils import PORTALSinteraction from mitim_tools.misc_tools.LOGtools import printMsg as print from mitim_tools import __version__ from IPython import embed @@ -88,6 +87,9 @@ def process(self, mi_ref=None, calculateDerived=True): "ptot(Pa)", # e.g. if I haven't written that info from ASTRA "zeta(-)", # e.g. if TGYRO is run with zeta=0, it won't write this column in .new "zmag(m)", + "qsync(MW/m^3)", + "qbrem(MW/m^3)", + "qline(MW/m^3)", self.varqpar, self.varqpar2, "shape_cos0(-)", @@ -505,7 +507,7 @@ def deriveQuantities_full(self, mi_ref=None, n_theta_geo=1001, rederiveGeometry= self.profiles["te(keV)"], self.derived["mi_ref"], self.derived["B_unit"] ) - self.derived["q_gb"], self.derived["g_gb"], _, _, _ = PLASMAtools.gyrobohmUnits( + self.derived["q_gb"], self.derived["g_gb"], self.derived["pi_gb"], self.derived["s_gb"], _ = PLASMAtools.gyrobohmUnits( self.profiles["te(keV)"], self.profiles["ne(10^19/m^3)"] * 1e-1, self.derived["mi_ref"], @@ -1272,8 +1274,6 @@ def deriveContentByVolumes(self, rhos=[0.5], impurityPosition=3): def printInfo(self, label="", reDeriveIfNotFound=True): - if 'pfast_fraction' not in self.derived: self.derived['pfast_fraction'] = np.nan #TODO: remove this line - try: ImpurityText = "" for i in range(len(self.Species)): @@ -1287,90 +1287,30 @@ def printInfo(self, label="", reDeriveIfNotFound=True): print(f"\tkappa_sep = {self.profiles['kappa(-)'][-1]:.2f}, kappa_995 = {self.derived['kappa995']:.2f}, kappa_95 = {self.derived['kappa95']:.2f}, kappa_a = {self.derived['kappa_a']:.2f}") print(f"\tdelta_sep = {self.profiles['delta(-)'][-1]:.2f}, delta_995 = {self.derived['delta995']:.2f}, delta_95 = {self.derived['delta95']:.2f}") print("Performance:") - print( - "\tQ = {0:.2f} (Pfus = {1:.1f}MW, Pin = {2:.1f}MW)".format( - self.derived["Q"], self.derived["Pfus"], self.derived["qIn"] - ) - ) - print( - "\tH98y2 = {0:.2f} (tauE = {1:.3f} s)".format( - self.derived["H98"], self.derived["tauE"] - ) - ) - print( - "\tH89p = {0:.2f} (H97L = {1:.2f})".format( - self.derived["H89"], self.derived["H97L"] - ) - ) - print( - "\tnu_ne = {0:.2f} (nu_eff = {1:.2f})".format( - self.derived["ne_peaking"], self.derived["nu_eff"] - ) - ) - print( - "\tnu_ne0.2 = {0:.2f} (nu_eff w/Zeff2 = {1:.2f})".format( - self.derived["ne_peaking0.2"], self.derived["nu_eff2"] - ) - ) + print("\tQ = {0:.2f} (Pfus = {1:.1f}MW, Pin = {2:.1f}MW)".format(self.derived["Q"], self.derived["Pfus"], self.derived["qIn"])) + print("\tH98y2 = {0:.2f} (tauE = {1:.3f} s)".format(self.derived["H98"], self.derived["tauE"])) + print("\tH89p = {0:.2f} (H97L = {1:.2f})".format(self.derived["H89"], self.derived["H97L"])) + print("\tnu_ne = {0:.2f} (nu_eff = {1:.2f})".format(self.derived["ne_peaking"], self.derived["nu_eff"])) + print("\tnu_ne0.2 = {0:.2f} (nu_eff w/Zeff2 = {1:.2f})".format(self.derived["ne_peaking0.2"], self.derived["nu_eff2"])) print(f"\tnu_Ti = {self.derived['Ti_peaking']:.2f}") print(f"\tp_vol = {self.derived['ptot_manual_vol']:.2f} MPa ({self.derived['pfast_fraction']*100.0:.1f}% fast)") - print( - f"\tBetaN = {self.derived['BetaN']:.3f} (BetaN w/B0 = {self.derived['BetaN_engineering']:.3f})" - ) - print( - "\tPrad = {0:.1f}MW ({1:.1f}% of total)".format( - self.derived["Prad"], - self.derived["Prad"] / self.derived["qHeat"] * 100.0, - ) - ) - print( - "\tPsol = {0:.1f}MW (fLH = {1:.2f})".format( - self.derived["Psol"], self.derived["LHratio"] - ) - ) - print( - "Operational point ( [,] = [{0:.2f},{1:.2f}] ) and species:".format( - self.derived["ne_vol20"], self.derived["Te_vol"] - ) - ) - print( - "\t = {0:.2f} keV (/ = {1:.2f}, Ti0/Te0 = {2:.2f})".format( - self.derived["Ti_vol"], - self.derived["tite_vol"], - self.derived["tite"][0], - ) - ) - print( - "\tfG = {0:.2f} ( = {1:.2f} * 10^20 m^-3)".format( - self.derived["fG"], self.derived["ne_vol20"] - ) - ) - print( - f"\tZeff = {self.derived['Zeff_vol']:.2f} (M_main = {self.derived['mbg_main']:.2f}, f_main = {self.derived['fmain']:.2f}) [QN err = {self.derived['QN_Error']:.1e}]" - ) + print(f"\tBetaN = {self.derived['BetaN']:.3f} (BetaN w/B0 = {self.derived['BetaN_engineering']:.3f})") + print(f"\tPrad = {self.derived['Prad']:.1f}MW ({self.derived['Prad'] / self.derived['qHeat'] * 100.0:.1f}% of total) ({self.derived['Prad_brem']/self.derived['Prad'] * 100.0:.1f}% brem, {self.derived['Prad_line']/self.derived['Prad'] * 100.0:.1f}% line, {self.derived['Prad_sync']/self.derived['Prad'] * 100.0:.1f}% sync)") + print("\tPsol = {0:.1f}MW (fLH = {1:.2f})".format(self.derived["Psol"], self.derived["LHratio"])) + print("Operational point ( [,] = [{0:.2f},{1:.2f}] ) and species:".format(self.derived["ne_vol20"], self.derived["Te_vol"])) + print("\t = {0:.2f} keV (/ = {1:.2f}, Ti0/Te0 = {2:.2f})".format(self.derived["Ti_vol"],self.derived["tite_vol"],self.derived["tite"][0],)) + print("\tfG = {0:.2f} ( = {1:.2f} * 10^20 m^-3)".format(self.derived["fG"], self.derived["ne_vol20"])) + print(f"\tZeff = {self.derived['Zeff_vol']:.2f} (M_main = {self.derived['mbg_main']:.2f}, f_main = {self.derived['fmain']:.2f}) [QN err = {self.derived['QN_Error']:.1e}]") print(f"\tMach = {self.derived['MachNum_vol']:.2f} (vol avg)") print("Content:") - print( - "\tWe = {0:.2f} MJ, Wi_thr = {1:.2f} MJ (W_thr = {2:.2f} MJ)".format( - self.derived["We"], self.derived["Wi_thr"], self.derived["Wthr"] - ) - ) - print( - "\tNe = {0:.1f}*10^20, Ni_thr = {1:.1f}*10^20 (N_thr = {2:.1f}*10^20)".format( - self.derived["Ne"], self.derived["Ni_thr"], self.derived["Nthr"] - ) - ) - print( - f"\ttauE = { self.derived['tauE']:.3f} s, tauP = {self.derived['tauP']:.3f} s (tauP/tauE = {self.derived['tauPotauE']:.2f})" - ) + print("\tWe = {0:.2f} MJ, Wi_thr = {1:.2f} MJ (W_thr = {2:.2f} MJ)".format(self.derived["We"], self.derived["Wi_thr"], self.derived["Wthr"])) + print("\tNe = {0:.1f}*10^20, Ni_thr = {1:.1f}*10^20 (N_thr = {2:.1f}*10^20)".format(self.derived["Ne"], self.derived["Ni_thr"], self.derived["Nthr"])) + print(f"\ttauE = { self.derived['tauE']:.3f} s, tauP = {self.derived['tauP']:.3f} s (tauP/tauE = {self.derived['tauPotauE']:.2f})") print("Species concentration:") print(f"\t{ImpurityText}") print("******************************************************") except KeyError: - print( - "\t- When printing info, not all keys found, probably because this input.gacode class came from an old MITIM version", - typeMsg="w", - ) + print("\t- When printing info, not all keys found, probably because this input.gacode class came from an old MITIM version",typeMsg="w",) if reDeriveIfNotFound: self.deriveQuantities() self.printInfo(label=label, reDeriveIfNotFound=False) @@ -1530,9 +1470,7 @@ def writeMiminalKinetic(self, file): valt = f"{val:.7e}".rjust(15) f.write(f"{pos}{valt}\n") - def changeResolution( - self, n=100, rho_new=None, interpolation_function=MATHtools.extrapolateCubicSpline - ): + def changeResolution(self, n=100, rho_new=None, interpolation_function=MATHtools.extrapolateCubicSpline): rho = copy.deepcopy(self.profiles["rho(-)"]) if rho_new is None: @@ -3802,21 +3740,104 @@ def csv(self, file="input.gacode.xlsx"): IOtools.writeExcel_fromDict(dictExcel, file, fromRow=1) def parabolizePlasma(self): - PORTALSinteraction.parabolizePlasma(self) + _, T = PLASMAtools.parabolicProfile( + Tbar=self.derived["Te_vol"], + nu=self.derived["Te_peaking"], + rho=self.profiles["rho(-)"], + Tedge=self.profiles["te(keV)"][-1], + ) + _, Ti = PLASMAtools.parabolicProfile( + Tbar=self.derived["Ti_vol"], + nu=self.derived["Ti_peaking"], + rho=self.profiles["rho(-)"], + Tedge=self.profiles["ti(keV)"][-1, 0], + ) + _, n = PLASMAtools.parabolicProfile( + Tbar=self.derived["ne_vol20"] * 1e1, + nu=self.derived["ne_peaking"], + rho=self.profiles["rho(-)"], + Tedge=self.profiles["ne(10^19/m^3)"][-1], + ) + + self.profiles["te(keV)"] = T + + self.profiles["ti(keV)"][:, 0] = Ti + self.makeAllThermalIonsHaveSameTemp(refIon=0) + + factor_n = n / self.profiles["ne(10^19/m^3)"] + self.profiles["ne(10^19/m^3)"] = n + self.scaleAllThermalDensities(scaleFactor=factor_n) + + self.deriveQuantities() + def changeRFpower(self, PrfMW=25.0): - PORTALSinteraction.changeRFpower(self, PrfMW=PrfMW) + """ + keeps same partition + """ + print(f"- Changing the RF power from {self.derived['qRF_MWmiller'][-1]:.1f} MW to {PrfMW:.1f} MW",typeMsg="i",) + + if self.derived["qRF_MWmiller"][-1] == 0.0: + raise Exception("No RF power in the input.gacode, cannot modify the RF power") + + for i in ["qrfe(MW/m^3)", "qrfi(MW/m^3)"]: + self.profiles[i] = self.profiles[i] * PrfMW / self.derived["qRF_MWmiller"][-1] def imposeBCtemps(self, TkeV=0.5, rho=0.9, typeEdge="linear", Tesep=0.1, Tisep=0.2): - PORTALSinteraction.imposeBCtemps( - self, TkeV=TkeV, rho=rho, typeEdge=typeEdge, Tesep=Tesep, Tisep=Tisep - ) + + ix = np.argmin(np.abs(rho - self.profiles["rho(-)"])) + + self.profiles["te(keV)"] = self.profiles["te(keV)"] * TkeV / self.profiles["te(keV)"][ix] + + print(f"- Producing {typeEdge} boundary condition @ rho = {rho}, T = {TkeV} keV",typeMsg="i",) + + for sp in range(len(self.Species)): + if self.Species[sp]["S"] == "therm": + self.profiles["ti(keV)"][:, sp] = self.profiles["ti(keV)"][:, sp] * TkeV / self.profiles["ti(keV)"][ix, sp] + + if typeEdge == "linear": + self.profiles["te(keV)"][ix:] = np.linspace(TkeV, Tesep, len(self.profiles["rho(-)"][ix:])) + + for sp in range(len(self.Species)): + if self.Species[sp]["S"] == "therm": + self.profiles["ti(keV)"][ix:, sp] = np.linspace(TkeV, Tisep, len(self.profiles["rho(-)"][ix:])) + + elif typeEdge == "same": + pass + else: + raise Exception("no edge") + def imposeBCdens(self, n20=2.0, rho=0.9, typeEdge="linear", nedge20=0.5): - PORTALSinteraction.imposeBCdens( - self, n20=n20, rho=rho, typeEdge=typeEdge, nedge20=nedge20 - ) + ix = np.argmin(np.abs(rho - self.profiles["rho(-)"])) + + print(f"- Changing the initial average density from {self.derived['ne_vol20']:.1f} 1E20/m3 to {n20:.1f} 1E20/m3",typeMsg="i") + factor = n20 / self.derived["ne_vol20"] + + for i in ["ne(10^19/m^3)", "ni(10^19/m^3)"]: + self.profiles[i] = self.profiles[i] * factor + + if typeEdge == "linear": + factor_x = ( + np.linspace( + self.profiles["ne(10^19/m^3)"][ix], + nedge20 * 1e1, + len(self.profiles["rho(-)"][ix:]), + ) + / self.profiles["ne(10^19/m^3)"][ix:] + ) + + self.profiles["ne(10^19/m^3)"][ix:] = self.profiles["ne(10^19/m^3)"][ix:] * factor_x + + for i in range(self.profiles["ni(10^19/m^3)"].shape[1]): + self.profiles["ni(10^19/m^3)"][ix:, i] = self.profiles["ni(10^19/m^3)"][ix:, i] * factor_x + + elif typeEdge == "same": + pass + else: + raise Exception("no edge") + def addSawtoothEffectOnOhmic(self, PohTot, mixRadius=None, plotYN=False): """ This will implement a flat profile inside the mixRadius to reduce the ohmic power by certain amount diff --git a/src/mitim_tools/gacode_tools/TGLFtools.py b/src/mitim_tools/gacode_tools/TGLFtools.py index ee6c47aa..79712a3d 100644 --- a/src/mitim_tools/gacode_tools/TGLFtools.py +++ b/src/mitim_tools/gacode_tools/TGLFtools.py @@ -511,6 +511,7 @@ def run( TGLFsettings=None, extraOptions={}, multipliers={}, + minimum_delta_abs={}, runWaveForms=None, # e.g. runWaveForms = [0.3,1.0] forceClosestUnstableWF=True, # Look at the growth rate spectrum and run exactly the ky of the closest unstable ApplyCorrections=True, # Removing ions with too low density and that are fast species @@ -541,6 +542,7 @@ def run( TGLFsettings=TGLFsettings, extraOptions=extraOptions, multipliers=multipliers, + minimum_delta_abs=minimum_delta_abs, runWaveForms=runWaveForms, forceClosestUnstableWF=forceClosestUnstableWF, ApplyCorrections=ApplyCorrections, @@ -629,6 +631,7 @@ def _prepare_run_radii( TGLFsettings=None, extraOptions={}, multipliers={}, + minimum_delta_abs={}, ApplyCorrections=True, # Removing ions with too low density and that are fast species Quasineutral=False, # Ensures quasineutrality. By default is False because I may want to run the file directly launchSlurm=True, @@ -689,6 +692,7 @@ def _prepare_run_radii( TGLFsettings=TGLFsettings, extraOptions=extraOptions, multipliers=multipliers, + minimum_delta_abs=minimum_delta_abs, ApplyCorrections=ApplyCorrections, Quasineutral=Quasineutral, ) @@ -2326,6 +2330,7 @@ def runScan( self, subFolderTGLF, # 'scan1', multipliers={}, + minimum_delta_abs={}, variable="RLTS_1", varUpDown=[0.5, 1.0, 1.5], variables_scanTogether=[], @@ -2352,6 +2357,7 @@ def runScan( tglf_executor, tglf_executor_full, folders, varUpDown_new = self._prepare_scan( subFolderTGLF, multipliers=multipliers, + minimum_delta_abs=minimum_delta_abs, variable=variable, varUpDown=varUpDown_new, variables_scanTogether=variables_scanTogether, @@ -2380,6 +2386,7 @@ def _prepare_scan( self, subFolderTGLF, # 'scan1', multipliers={}, + minimum_delta_abs={}, variable="RLTS_1", varUpDown=[0.5, 1.0, 1.5], variables_scanTogether=[], @@ -2443,6 +2450,7 @@ def _prepare_scan( tglf_executor=tglf_executor, tglf_executor_full=tglf_executor_full, multipliers=multipliers_mod, + minimum_delta_abs=minimum_delta_abs, **kwargs_TGLFrun, ) @@ -2466,8 +2474,10 @@ def readScan( self.positionIon_scan = positionIon # ---- - x, Qe, Qi, Ge, Gi, ky, g, f, eta1, eta2, itg, tem, etg = [],[],[],[],[],[],[],[],[],[],[],[],[] - Qe_gb, Qi_gb, Ge_gb, Gi_gb = [], [], [], [] + x = [] + Qe, Qi, Ge, Gi, Mt, S = [],[],[],[],[],[] + Qe_gb, Qi_gb, Ge_gb, Gi_gb, Mt_gb, S_gb = [],[],[],[],[],[] + ky, g, f, eta1, eta2, itg, tem, etg = [],[],[],[],[],[],[],[] etalow_g, etalow_f, etalow_k = [], [], [] cont = 0 for ikey in self.results: @@ -2476,8 +2486,10 @@ def readScan( if isThisTheRightReadResults: self.scans[label]["results_tags"].append(ikey) - x0, Qe0, Qi0, Ge0, Gi0, ky0, g0, f0, eta10, eta20, itg0, tem0, etg0 = [],[],[],[],[],[],[],[],[],[],[],[],[] - Qe_gb0, Qi_gb0, Ge_gb0, Gi_gb0 = [], [], [], [] + x0 = [] + Qe0, Qi0, Ge0, Gi0, Mt0, S0 = [],[],[],[],[],[] + Qe_gb0, Qi_gb0, Ge_gb0, Gi_gb0, Mt_gb0, S_gb0 = [],[],[],[],[],[] + ky0, g0, f0, eta10, eta20, itg0, tem0, etg0 = [],[],[],[],[],[],[],[] etalow_g0, etalow_f0, etalow_k0 = [], [], [] for irho_cont in range(len(self.rhos)): irho = np.where(self.results[ikey]["x"] == self.rhos[irho_cont])[0][0] @@ -2488,6 +2500,8 @@ def readScan( Qi_gb0.append(self.results[ikey]["TGLFout"][irho].Qi) Ge_gb0.append(self.results[ikey]["TGLFout"][irho].Ge) Gi_gb0.append(self.results[ikey]["TGLFout"][irho].GiAll[self.positionIon_scan - 2]) + Mt_gb0.append(self.results[ikey]["TGLFout"][irho].Mt) + S_gb0.append(self.results[ikey]["TGLFout"][irho].Se) ky0.append(self.results[ikey]["TGLFout"][irho].ky) g0.append(self.results[ikey]["TGLFout"][irho].g) f0.append(self.results[ikey]["TGLFout"][irho].f) @@ -2505,6 +2519,8 @@ def readScan( Qi0.append(self.results[ikey]["TGLFout"][irho].Qi_unn) Ge0.append(self.results[ikey]["TGLFout"][irho].Ge_unn) Gi0.append(self.results[ikey]["TGLFout"][irho].GiAll_unn[self.positionIon_scan - 2]) + Mt0.append(self.results[ikey]["TGLFout"][irho].Mt_unn) + S0.append(self.results[ikey]["TGLFout"][irho].Se_unn) else: self.scans[label]["unnormalization_successful"] = False @@ -2517,6 +2533,8 @@ def readScan( Ge_gb.append(Ge_gb0) Gi_gb.append(Gi_gb0) Gi.append(Gi0) + Mt.append(Mt0) + S.append(S0) ky.append(ky0) g.append(g0) f.append(f0) @@ -2539,10 +2557,14 @@ def readScan( self.scans[label]["Qi_gb"] = np.atleast_2d(np.transpose(Qi_gb)) self.scans[label]["Ge_gb"] = np.atleast_2d(np.transpose(Ge_gb)) self.scans[label]["Gi_gb"] = np.atleast_2d(np.transpose(Gi_gb)) + self.scans[label]["Mt_gb"] = np.atleast_2d(np.transpose(Mt_gb)) + self.scans[label]["S_gb"] = np.atleast_2d(np.transpose(S_gb)) self.scans[label]["Qe"] = np.atleast_2d(np.transpose(Qe)) self.scans[label]["Qi"] = np.atleast_2d(np.transpose(Qi)) self.scans[label]["Ge"] = np.atleast_2d(np.transpose(Ge)) self.scans[label]["Gi"] = np.atleast_2d(np.transpose(Gi)) + self.scans[label]["Mt"] = np.atleast_2d(np.transpose(Mt)) + self.scans[label]["S"] = np.atleast_2d(np.transpose(S)) self.scans[label]["eta1"] = np.atleast_2d(np.transpose(eta1)) self.scans[label]["eta2"] = np.atleast_2d(np.transpose(eta2)) self.scans[label]["itg"] = np.atleast_2d(np.transpose(itg)) @@ -3126,8 +3148,8 @@ def runScanTurbulenceDrives( resolutionPoints=5, variation=0.5, add_baseline_to = 'none', # 'all' or 'first' or 'none' - add_also_baseline_to_first = True, variablesDrives=["RLTS_1", "RLTS_2", "RLNS_1", "XNUE", "TAUS_2"], + minimum_delta_abs={}, positionIon=2, **kwargs_TGLFrun, ): @@ -3151,9 +3173,7 @@ def runScanTurbulenceDrives( tglf_executor, tglf_executor_full, folders = {}, {}, [] for cont, variable in enumerate(self.variablesDrives): # Only ask the cold_start in the first round - kwargs_TGLFrun["forceIfcold_start"] = cont > 0 or ( - "forceIfcold_start" in kwargs_TGLFrun and kwargs_TGLFrun["forceIfcold_start"] - ) + kwargs_TGLFrun["forceIfcold_start"] = cont > 0 or ("forceIfcold_start" in kwargs_TGLFrun and kwargs_TGLFrun["forceIfcold_start"]) scan_name = f"{subFolderTGLF}_{variable}" # e.g. turbDrives_RLTS_1 @@ -3161,6 +3181,7 @@ def runScanTurbulenceDrives( scan_name, variable=variable, varUpDown=varUpDown_dict[variable], + minimum_delta_abs=minimum_delta_abs, **kwargs_TGLFrun, ) @@ -3654,7 +3675,7 @@ def plotAnalysis(self, labels=["analysis1"], analysisType="chi_e", figs=None): rho_mod = np.append([0], rho) aLn = np.append([0], y) import torch - from mitim_modules.powertorch.physics import CALCtools + from mitim_modules.powertorch.utils import CALCtools BC = 1.0 T = CALCtools.integrateGradient( @@ -3787,6 +3808,7 @@ def changeANDwrite_TGLF( TGLFsettings=None, extraOptions={}, multipliers={}, + minimum_delta_abs={}, ApplyCorrections=True, Quasineutral=False, ): @@ -3807,6 +3829,7 @@ def changeANDwrite_TGLF( Settings=TGLFsettings, extraOptions=extraOptions, multipliers=multipliers, + minimum_delta_abs=minimum_delta_abs, position_change=i, addControlFunction=GACODEdefaults.addTGLFcontrol, NS=NS, @@ -4494,13 +4517,9 @@ def __init__(self, FolderGACODE, suffix="",require_all_files=True): self.FolderGACODE, self.suffix = FolderGACODE, suffix if suffix == "": - print( - f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} without suffix" - ) + print(f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} without suffix") else: - print( - f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} with suffix {suffix}" - ) + print(f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} with suffix {suffix}") self.inputclass = TGLFinput(file=self.FolderGACODE / f"input.tglf{self.suffix}") self.roa = self.inputclass.geom["RMIN_LOC"] @@ -4555,17 +4574,29 @@ def read(self,require_all_files=True): self.Ge = data[0, 0] self.Qe = data[1, 0] + self.Me = data[2, 0] + self.Se = data[3, 0] self.GiAll = data[0, 1:] self.QiAll = data[1, 1:] + self.MiAll = data[2, 1:] + self.SiAll = data[3, 1:] - print( - f"\t\t- For Qi, summing contributions from ions {self.ions_included} (#0 is e-)", - typeMsg="i", - ) + print(f"\t\t- For Qi, summing contributions from ions {self.ions_included} (#0 is e-)",typeMsg="i",) self.Gi = data[0, self.ions_included].sum() self.Qi = data[1, self.ions_included].sum() + signMt = - self.inputclass.plasma['SIGN_IT'] # Following tgyro_flux.f90 + print(f"\t\t- Sign of Mt given by toroidal current direction (SIGN_IT={-signMt}): {signMt}",typeMsg="i",) + self.Me *= signMt + self.MiAll *= signMt + + print("\t\t- For Mt, summing all species contributions",typeMsg="i",) + self.Mt = self.Me + self.MiAll.sum() + + print("\t\t- For St, summing all ion species contributions",typeMsg="i",) + self.Si = self.SiAll.sum() + if require_all_files: # ------------------------------------------------------------------------ @@ -5012,14 +5043,14 @@ def read(self,require_all_files=True): lines = fi.readlines() self.inputFileTGLF = "".join(lines) - def unnormalize( - self, normalization, rho=None, convolution_fun_fluct=None, factorTot_to_Perp=1.0 - ): + def unnormalize(self, normalization, rho=None, convolution_fun_fluct=None, factorTot_to_Perp=1.0): if normalization is not None: rho_x = normalization["rho"] roa_x = normalization["roa"] q_gb = normalization["q_gb"] g_gb = normalization["g_gb"] + pi_gb = normalization["pi_gb"] + s_gb = normalization["s_gb"] rho_s = normalization["rho_s"] a = normalization["rmin"][-1] @@ -5039,6 +5070,9 @@ def unnormalize( self.Ge_unn = self.Ge * g_gb[ir] self.GiAll_unn = self.GiAll * g_gb[ir] + self.Mt_unn = self.Mt * pi_gb[ir] + self.Se_unn = self.Se * s_gb[ir] + self.AmplitudeSpectrum_Te_level = GACODErun.obtainFluctuationLevel( self.ky, self.AmplitudeSpectrum_Te, diff --git a/src/mitim_tools/gacode_tools/TGYROtools.py b/src/mitim_tools/gacode_tools/TGYROtools.py index 139ec674..5b83e895 100644 --- a/src/mitim_tools/gacode_tools/TGYROtools.py +++ b/src/mitim_tools/gacode_tools/TGYROtools.py @@ -12,15 +12,6 @@ from mitim_tools.gacode_tools.utils import GACODEinterpret, GACODEdefaults, GACODErun from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed -import time - -try: - from mitim_tools.gacode_tools.utils import PORTALSinteraction -except: - print( - "- I could not import PORTALSinteraction, likely a consequence of botorch incompatbility", - typeMsg="w", - ) """ Same philosophy as the TGLFtools @@ -1441,14 +1432,14 @@ def readFluxes(self): # Errors - Constructed outside of TGYRO call (e.g. powerstate) # *************************************************************** - if not (self.FolderTGYRO / f"out.tgyro.flux_e_stds").exists(): + if not (self.FolderTGYRO / "out.tgyro.flux_e_stds").exists(): self.tgyro_stds = False else: print("\t- Errors in TGYRO fluxes and targets found, adding to class") self.tgyro_stds = True - file = self.FolderTGYRO / f"out.tgyro.flux_e_stds" + file = self.FolderTGYRO / "out.tgyro.flux_e_stds" ( _, self.GeGB_sim_neo_stds, @@ -2100,9 +2091,7 @@ def derived(self): Note: This is only valid in the converged case??????????????? """ - if (self.profiles_final is not None) and ( - "derived" in self.profiles_final.__dict__ - ): + if (self.profiles_final is not None) and ("derived" in self.profiles_final.__dict__): prof = self.profiles_final elif (self.profiles is not None) and ("derived" in self.profiles.__dict__): prof = self.profiles @@ -2116,9 +2105,7 @@ def derived(self): self.Q_better = self.P_fusT_tgyro / self.P_inT - if (self.profiles_final is not None) and ( - "derived" in self.profiles_final.__dict__ - ): + if (self.profiles_final is not None) and ("derived" in self.profiles_final.__dict__): self.Q_best = self.profiles_final.derived["Q"] """ @@ -2170,10 +2157,7 @@ def useFineGridTargets(self, impurityPosition=1): ) # Profiles do not include ion fluxes for j in range(self.Gi_tar.shape[0]): - self.Gi_tar[j, i, :], self.Ci_tar[j, i, :] = ( - self.Ce_tar[i, :] * 0.0, - self.Ce_tar[i, :] * 0.0, - ) + self.Gi_tar[j, i, :], self.Ci_tar[j, i, :] = self.Ce_tar[i, :] * 0.0, self.Ce_tar[i, :] * 0.0 self.Mt_tar[i, :] = np.interp( rho_coarse, rho_fine, self.profiles_final.derived["mt_Jm2"] @@ -2185,9 +2169,6 @@ def useFineGridTargets(self, impurityPosition=1): self.Ge_tarMW = self.Ge_tar * self.dvoldr self.Ce_tarMW = self.Ce_tar * self.dvoldr - def TGYROmodeledVariables(self, *args, **kwargs): - return PORTALSinteraction.TGYROmodeledVariables(self, *args, **kwargs) - def plot(self, fn=None, label="", prelabel="", fn_color=None): if fn is None: from mitim_tools.misc_tools.GUItools import FigureNotebook diff --git a/src/mitim_tools/gacode_tools/scripts/read_gacode.py b/src/mitim_tools/gacode_tools/scripts/read_gacode.py index 9c994607..14b10374 100644 --- a/src/mitim_tools/gacode_tools/scripts/read_gacode.py +++ b/src/mitim_tools/gacode_tools/scripts/read_gacode.py @@ -1,4 +1,5 @@ import argparse +from turtle import st from mitim_tools.gacode_tools import PROFILEStools """ @@ -11,13 +12,13 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument("files", type=str, nargs="*") - parser.add_argument( - "--rho", type=float, required=False, default=0.89 - ) # Last rho for gradients plot + parser.add_argument("--rho", type=float, required=False, default=0.89) # Last rho for gradients plot + parser.add_argument("--print", required=False, default=False, action="store_true") # Last rho for gradients plot args = parser.parse_args() files = args.files rho = args.rho + print_only = args.print # Read profs = [] @@ -29,9 +30,11 @@ def main(): # Plot - fn = PROFILEStools.plotAll(profs, lastRhoGradients=rho) + if not print_only: - fn.show() + fn = PROFILEStools.plotAll(profs, lastRhoGradients=rho) + + fn.show() # Import IPython and embed an interactive session from IPython import embed diff --git a/src/mitim_tools/gacode_tools/utils/GACODErun.py b/src/mitim_tools/gacode_tools/utils/GACODErun.py index 74e7f0d0..8d1d7e16 100644 --- a/src/mitim_tools/gacode_tools/utils/GACODErun.py +++ b/src/mitim_tools/gacode_tools/utils/GACODErun.py @@ -99,6 +99,7 @@ def modifyInputs( Settings=None, extraOptions={}, multipliers={}, + minimum_delta_abs={}, position_change=0, addControlFunction=None, **kwargs_to_function, @@ -160,18 +161,12 @@ def modifyInputs( input_class.plasma[ikey] = var_new else: # If the variable in extraOptions wasn't in there, consider it a control param - print( - "\t\t- Variable to change did not exist previously, creating now", - typeMsg="i", - ) + print("\t\t- Variable to change did not exist previously, creating now",typeMsg="i") var_orig = None var_new = value_to_change_to input_class.controls[ikey] = var_new - print( - f"\t\t- Changing {ikey} from {var_orig} to {var_new}", - typeMsg="i", - ) + print(f"\t\t- Changing {ikey} from {var_orig} to {var_new}",typeMsg="i",) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Change with multipliers -> Input directly, not as multiplier @@ -184,31 +179,41 @@ def modifyInputs( specie = int(ikey.split("_")[-1]) varK = "_".join(ikey.split("_")[:-1]) var_orig = input_class.species[specie][varK] - var_new = var_orig * multipliers[ikey] + var_new = multiplier_tglf_input(var_orig, multipliers[ikey], minimum_delta_abs = minimum_delta_abs.get(ikey,None)) input_class.species[specie][varK] = var_new else: if ikey in input_class.controls: var_orig = input_class.controls[ikey] - var_new = var_orig * multipliers[ikey] + var_new = multiplier_tglf_input(var_orig, multipliers[ikey], minimum_delta_abs = minimum_delta_abs.get(ikey,None)) input_class.controls[ikey] = var_new + elif ikey in input_class.geom: var_orig = input_class.geom[ikey] - var_new = var_orig * multipliers[ikey] + var_new = multiplier_tglf_input(var_orig, multipliers[ikey], minimum_delta_abs = minimum_delta_abs.get(ikey,None)) input_class.geom[ikey] = var_new + elif ikey in input_class.plasma: var_orig = input_class.plasma[ikey] - var_new = var_orig * multipliers[ikey] + var_new = multiplier_tglf_input(var_orig, multipliers[ikey], minimum_delta_abs = minimum_delta_abs.get(ikey,None)) input_class.plasma[ikey] = var_new + else: - print( - "\t- Variable to scan did not exist in original file, add it as extraOptions first", - typeMsg="w", - ) + print("\t- Variable to scan did not exist in original file, add it as extraOptions first",typeMsg="w",) print(f"\t\t\t- Changing {ikey} from {var_orig} to {var_new} (x{multipliers[ikey]})") return input_class +def multiplier_tglf_input(var_orig, multiplier, minimum_delta_abs = None): + + delta = var_orig * (multiplier - 1.0) + + if minimum_delta_abs is not None: + if (multiplier != 1.0) and abs(delta) < minimum_delta_abs: + print(f"\t\t\t- delta = {delta} is smaller than minimum_delta_abs = {minimum_delta_abs}, enforcing",typeMsg="i") + delta = np.sign(delta) * minimum_delta_abs + + return var_orig + delta def findNamelist(LocationCDF, folderWork=None, nameRunid="10000", ForceFirst=True): # ----------------------------------------------------------- diff --git a/src/mitim_modules/powertorch/physics/GEOMETRYtools.py b/src/mitim_tools/gacode_tools/utils/GEOMETRYtools.py similarity index 100% rename from src/mitim_modules/powertorch/physics/GEOMETRYtools.py rename to src/mitim_tools/gacode_tools/utils/GEOMETRYtools.py diff --git a/src/mitim_tools/gacode_tools/utils/NORMtools.py b/src/mitim_tools/gacode_tools/utils/NORMtools.py index 86ea444e..42bbae99 100644 --- a/src/mitim_tools/gacode_tools/utils/NORMtools.py +++ b/src/mitim_tools/gacode_tools/utils/NORMtools.py @@ -87,6 +87,8 @@ def normalizations_tgyro(tgyro, rho, roa): "rho": rho, "q_gb": np.interp(rho, x_tgyro, tgyro.Q_GB[iteration]), "g_gb": np.interp(rho, x_tgyro, tgyro.Gamma_GB[iteration]), + "pi_gb": np.interp(rho, x_tgyro, tgyro.Pi_GB[iteration]), + "s_gb": np.interp(rho, x_tgyro, tgyro.S_GB[iteration]), "c_s": np.interp(rho, x_tgyro, tgyro.c_s[iteration]), } @@ -94,6 +96,7 @@ def normalizations_tgyro(tgyro, rho, roa): def normalizations_profiles(profiles): + if profiles is not None: Set_norm = { "rho": profiles.profiles["rho(-)"], @@ -101,26 +104,18 @@ def normalizations_profiles(profiles): "rmin": np.abs(profiles.profiles["rmin(m)"]), "q_gb": np.abs(profiles.derived["q_gb"]), "g_gb": np.abs(profiles.derived["g_gb"]), - "exp_Qe": np.abs(profiles.derived["qe"]), - "exp_Qi": np.abs(profiles.derived["qi"]), - "exp_Ge": np.abs(profiles.derived["ge"]), + "pi_gb": np.abs(profiles.derived["pi_gb"]), + "s_gb": np.abs(profiles.derived["s_gb"]), "B_unit": np.abs(profiles.derived["B_unit"]), "rho_s": np.abs(profiles.derived["rho_s"]), "c_s": np.abs(profiles.derived["c_s"]), - "Te_keV": np.abs( - profiles.profiles[ - "te(keV)" if "te(keV)" in profiles.profiles else "Te(keV)" - ] - ), + "Te_keV": np.abs(profiles.profiles["te(keV)"]), "ne_20": np.abs(profiles.profiles["ne(10^19/m^3)"]) * 1e-1, "Ti_keV": np.abs(profiles.profiles["ti(keV)"][:, 0]), "ni_20": np.abs(profiles.derived["ni_thrAll"]) * 1e-1, - "exp_Qe": profiles.derived["qe_MWmiller"] - / profiles.derived["surfGACODE_miller"], # This is the same as qe_MWm2 - "exp_Qi": profiles.derived["qi_MWmiller"] - / profiles.derived["surfGACODE_miller"], - "exp_Ge": profiles.derived["ge_10E20miller"] - / profiles.derived["surfGACODE_miller"], + "exp_Qe": profiles.derived["qe_MWmiller"] / profiles.derived["surfGACODE_miller"], # This is the same as qe_MWm2 + "exp_Qi": profiles.derived["qi_MWmiller"] / profiles.derived["surfGACODE_miller"], + "exp_Ge": profiles.derived["ge_10E20miller"] / profiles.derived["surfGACODE_miller"], "mi_ref": profiles.derived["mi_ref"], } @@ -252,6 +247,7 @@ def plotNormalizations( colors=["b", "r", "g"], legYN=True, extralab="", + fn = None, ): if NormalizationSets is not None: if axs is None: diff --git a/src/mitim_tools/gacode_tools/utils/PORTALSinteraction.py b/src/mitim_tools/gacode_tools/utils/PORTALSinteraction.py deleted file mode 100644 index b4e7d657..00000000 --- a/src/mitim_tools/gacode_tools/utils/PORTALSinteraction.py +++ /dev/null @@ -1,559 +0,0 @@ -import torch -import numpy as np -from mitim_tools.misc_tools import PLASMAtools -from mitim_modules.portals import PORTALStools -from mitim_tools.misc_tools.LOGtools import printMsg as print -from IPython import embed - -def parabolizePlasma(self): - _, T = PLASMAtools.parabolicProfile( - Tbar=self.derived["Te_vol"], - nu=self.derived["Te_peaking"], - rho=self.profiles["rho(-)"], - Tedge=self.profiles["te(keV)"][-1], - ) - _, Ti = PLASMAtools.parabolicProfile( - Tbar=self.derived["Ti_vol"], - nu=self.derived["Ti_peaking"], - rho=self.profiles["rho(-)"], - Tedge=self.profiles["ti(keV)"][-1, 0], - ) - _, n = PLASMAtools.parabolicProfile( - Tbar=self.derived["ne_vol20"] * 1e1, - nu=self.derived["ne_peaking"], - rho=self.profiles["rho(-)"], - Tedge=self.profiles["ne(10^19/m^3)"][-1], - ) - - self.profiles["te(keV)"] = T - - self.profiles["ti(keV)"][:, 0] = Ti - self.makeAllThermalIonsHaveSameTemp(refIon=0) - - factor_n = n / self.profiles["ne(10^19/m^3)"] - self.profiles["ne(10^19/m^3)"] = n - self.scaleAllThermalDensities(scaleFactor=factor_n) - - self.deriveQuantities() - - -def changeRFpower(self, PrfMW=25.0): - """ - keeps same partition - """ - print(f"- Changing the RF power from {self.derived['qRF_MWmiller'][-1]:.1f} MW to {PrfMW:.1f} MW",typeMsg="i",) - - if self.derived["qRF_MWmiller"][-1] == 0.0: - raise Exception("No RF power in the input.gacode, cannot modify the RF power") - - for i in ["qrfe(MW/m^3)", "qrfi(MW/m^3)"]: - self.profiles[i] = self.profiles[i] * PrfMW / self.derived["qRF_MWmiller"][-1] - -def imposeBCtemps(self, TkeV=0.5, rho=0.9, typeEdge="linear", Tesep=0.1, Tisep=0.2): - ix = np.argmin(np.abs(rho - self.profiles["rho(-)"])) - - self.profiles["te(keV)"] = ( - self.profiles["te(keV)"] * TkeV / self.profiles["te(keV)"][ix] - ) - - print( - f"- Producing {typeEdge} boundary condition @ rho = {rho}, T = {TkeV} keV", - typeMsg="i", - ) - - for sp in range(len(self.Species)): - if self.Species[sp]["S"] == "therm": - self.profiles["ti(keV)"][:, sp] = ( - self.profiles["ti(keV)"][:, sp] - * TkeV - / self.profiles["ti(keV)"][ix, sp] - ) - - if typeEdge == "linear": - self.profiles["te(keV)"][ix:] = np.linspace( - TkeV, Tesep, len(self.profiles["rho(-)"][ix:]) - ) - - for sp in range(len(self.Species)): - if self.Species[sp]["S"] == "therm": - self.profiles["ti(keV)"][ix:, sp] = np.linspace( - TkeV, Tisep, len(self.profiles["rho(-)"][ix:]) - ) - - elif typeEdge == "same": - pass - else: - raise Exception("no edge") - - -def imposeBCdens(self, n20=2.0, rho=0.9, typeEdge="linear", nedge20=0.5): - ix = np.argmin(np.abs(rho - self.profiles["rho(-)"])) - - print( - f"- Changing the initial average density from {self.derived['ne_vol20']:.1f} 1E20/m3 to {n20:.1f} 1E20/m3", - typeMsg="i", - ) - - factor = n20 / self.derived["ne_vol20"] - - for i in ["ne(10^19/m^3)", "ni(10^19/m^3)"]: - self.profiles[i] = self.profiles[i] * factor - - if typeEdge == "linear": - factor_x = ( - np.linspace( - self.profiles["ne(10^19/m^3)"][ix], - nedge20 * 1e1, - len(self.profiles["rho(-)"][ix:]), - ) - / self.profiles["ne(10^19/m^3)"][ix:] - ) - - self.profiles["ne(10^19/m^3)"][ix:] = ( - self.profiles["ne(10^19/m^3)"][ix:] * factor_x - ) - for i in range(self.profiles["ni(10^19/m^3)"].shape[1]): - self.profiles["ni(10^19/m^3)"][ix:, i] = ( - self.profiles["ni(10^19/m^3)"][ix:, i] * factor_x - ) - elif typeEdge == "same": - pass - else: - raise Exception("no edge") - - -# ------------------------------------------------------------------------------------------------------------------------------------------------------ -# This is where the definitions for the summation variables happen for mitim and PORTALSplot -# ------------------------------------------------------------------------------------------------------------------------------------------------------ - -def TGYROmodeledVariables(TGYROresults, - powerstate, - useConvectiveFluxes=False, - forceZeroParticleFlux=False, - includeFast=False, - impurityPosition=1, - UseFineGridTargets=False, - OriginalFimp=1.0, - provideTurbulentExchange=False, - provideTargets=False - ): - """ - This function is used to extract the TGYRO results and store them in the powerstate object, from numpy arrays to torch tensors. - """ - - if "tgyro_stds" not in TGYROresults.__dict__: - TGYROresults.tgyro_stds = False - - if UseFineGridTargets: - TGYROresults.useFineGridTargets(impurityPosition=impurityPosition) - - - nr = powerstate.plasma['rho'].shape[-1] - if powerstate.plasma['rho'].shape[-1] != TGYROresults.rho.shape[-1]: - print('\t- TGYRO was run with an extra point in the grid, treating it carefully now') - - - # ********************************** - # *********** Electron Energy Fluxes - # ********************************** - - powerstate.plasma["Pe_tr_turb"] = torch.Tensor(TGYROresults.Qe_sim_turb[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Pe_tr_neo"] = torch.Tensor(TGYROresults.Qe_sim_neo[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Pe_tr_turb_stds"] = torch.Tensor(TGYROresults.Qe_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Pe_tr_neo_stds"] = torch.Tensor(TGYROresults.Qe_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["Pe"] = torch.Tensor(TGYROresults.Qe_tar[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Pe_stds"] = torch.Tensor(TGYROresults.Qe_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - # ********************************** - # *********** Ion Energy Fluxes - # ********************************** - - if includeFast: - - powerstate.plasma["Pi_tr_turb"] = torch.Tensor(TGYROresults.QiIons_sim_turb[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Pi_tr_neo"] = torch.Tensor(TGYROresults.QiIons_sim_neo[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Pi_tr_turb_stds"] = torch.Tensor(TGYROresults.QiIons_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Pi_tr_neo_stds"] = torch.Tensor(TGYROresults.QiIons_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - else: - - powerstate.plasma["Pi_tr_turb"] = torch.Tensor(TGYROresults.QiIons_sim_turb_thr[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Pi_tr_neo"] = torch.Tensor(TGYROresults.QiIons_sim_neo_thr[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Pi_tr_turb_stds"] = torch.Tensor(TGYROresults.QiIons_sim_turb_thr_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Pi_tr_neo_stds"] = torch.Tensor(TGYROresults.QiIons_sim_neo_thr_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["Pi"] = torch.Tensor(TGYROresults.Qi_tar[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Pi_stds"] = torch.Tensor(TGYROresults.Qi_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - # ********************************** - # *********** Momentum Fluxes - # ********************************** - - powerstate.plasma["Mt_tr_turb"] = torch.Tensor(TGYROresults.Mt_sim_turb[:, :nr]).to(powerstate.dfT) # So far, let's include fast in momentum - powerstate.plasma["Mt_tr_neo"] = torch.Tensor(TGYROresults.Mt_sim_neo[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Mt_tr_turb_stds"] = torch.Tensor(TGYROresults.Mt_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Mt_tr_neo_stds"] = torch.Tensor(TGYROresults.Mt_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["Mt"] = torch.Tensor(TGYROresults.Mt_tar[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Mt_stds"] = torch.Tensor(TGYROresults.Mt_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - # ********************************** - # *********** Particle Fluxes - # ********************************** - - # Store raw fluxes for better plotting later - powerstate.plasma["Ce_raw_tr_turb"] = torch.Tensor(TGYROresults.Ge_sim_turb[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Ce_raw_tr_neo"] = torch.Tensor(TGYROresults.Ge_sim_neo[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Ce_raw_tr_turb_stds"] = torch.Tensor(TGYROresults.Ge_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Ce_raw_tr_neo_stds"] = torch.Tensor(TGYROresults.Ge_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["Ce_raw"] = torch.Tensor(TGYROresults.Ge_tar[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Ce_raw_stds"] = torch.Tensor(TGYROresults.Ge_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if not useConvectiveFluxes: - - powerstate.plasma["Ce_tr_turb"] = powerstate.plasma["Ce_raw_tr_turb"] - powerstate.plasma["Ce_tr_neo"] = powerstate.plasma["Ce_raw_tr_neo"] - - powerstate.plasma["Ce_tr_turb_stds"] = powerstate.plasma["Ce_raw_tr_turb_stds"] - powerstate.plasma["Ce_tr_neo_stds"] = powerstate.plasma["Ce_raw_tr_neo_stds"] - - if provideTargets: - powerstate.plasma["Ce"] = powerstate.plasma["Ce_raw"] - powerstate.plasma["Ce_stds"] = powerstate.plasma["Ce_raw_stds"] - - else: - - powerstate.plasma["Ce_tr_turb"] = torch.Tensor(TGYROresults.Ce_sim_turb[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Ce_tr_neo"] = torch.Tensor(TGYROresults.Ce_sim_neo[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Ce_tr_turb_stds"] = torch.Tensor(TGYROresults.Ce_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Ce_tr_neo_stds"] = torch.Tensor(TGYROresults.Ce_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["Ce"] = torch.Tensor(TGYROresults.Ce_tar[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Ce_stds"] = torch.Tensor(TGYROresults.Ce_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - # ********************************** - # *********** Impurity Fluxes - # ********************************** - - # Store raw fluxes for better plotting later - powerstate.plasma["CZ_raw_tr_turb"] = torch.Tensor(TGYROresults.Gi_sim_turb[impurityPosition, :, :nr]).to(powerstate.dfT) - powerstate.plasma["CZ_raw_tr_neo"] = torch.Tensor(TGYROresults.Gi_sim_neo[impurityPosition, :, :nr]).to(powerstate.dfT) - - powerstate.plasma["CZ_raw_tr_turb_stds"] = torch.Tensor(TGYROresults.Gi_sim_turb_stds[impurityPosition, :, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["CZ_raw_tr_neo_stds"] = torch.Tensor(TGYROresults.Gi_sim_neo_stds[impurityPosition, :, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["CZ_raw"] = torch.Tensor(TGYROresults.Gi_tar[impurityPosition, :, :nr]).to(powerstate.dfT) - powerstate.plasma["CZ_raw_stds"] = torch.Tensor(TGYROresults.Gi_tar_stds[impurityPosition, :, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if not useConvectiveFluxes: - - powerstate.plasma["CZ_tr_turb"] = powerstate.plasma["CZ_raw_tr_turb"] / OriginalFimp - powerstate.plasma["CZ_tr_neo"] = powerstate.plasma["CZ_raw_tr_neo"] / OriginalFimp - - powerstate.plasma["CZ_tr_turb_stds"] = powerstate.plasma["CZ_raw_tr_turb_stds"] / OriginalFimp if TGYROresults.tgyro_stds else None - powerstate.plasma["CZ_tr_neo_stds"] = powerstate.plasma["CZ_raw_tr_neo_stds"] / OriginalFimp if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["CZ"] = powerstate.plasma["CZ_raw"] / OriginalFimp - powerstate.plasma["CZ_stds"] = powerstate.plasma["CZ_raw_stds"] / OriginalFimp if TGYROresults.tgyro_stds else None - - else: - - powerstate.plasma["CZ_tr_turb"] = torch.Tensor(TGYROresults.Ci_sim_turb[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp - powerstate.plasma["CZ_tr_neo"] = torch.Tensor(TGYROresults.Ci_sim_neo[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp - - powerstate.plasma["CZ_tr_turb_stds"] = torch.Tensor(TGYROresults.Ci_sim_turb_stds[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp if TGYROresults.tgyro_stds else None - powerstate.plasma["CZ_tr_neo_stds"] = torch.Tensor(TGYROresults.Ci_sim_neo_stds[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["CZ"] = torch.Tensor(TGYROresults.Ci_tar[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp - powerstate.plasma["CZ_stds"] = torch.Tensor(TGYROresults.Ci_tar_stds[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp if TGYROresults.tgyro_stds else None - - # ********************************** - # *********** Energy Exchange - # ********************************** - - if provideTurbulentExchange: - powerstate.plasma["PexchTurb"] = torch.Tensor(TGYROresults.EXe_sim_turb[:, :nr]).to(powerstate.dfT) - powerstate.plasma["PexchTurb_stds"] = torch.Tensor(TGYROresults.EXe_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - else: - powerstate.plasma["PexchTurb"] = powerstate.plasma["Pe_tr_turb"] * 0.0 - powerstate.plasma["PexchTurb_stds"] = powerstate.plasma["Pe_tr_turb"] * 0.0 - - # ********************************** - # *********** Traget extra - # ********************************** - - if forceZeroParticleFlux and provideTargets: - powerstate.plasma["Ce"] = powerstate.plasma["Ce"] * 0.0 - powerstate.plasma["Ce_stds"] = powerstate.plasma["Ce_stds"] * 0.0 - - # ------------------------------------------------------------------------------------------------------------------------ - # Sum here turbulence and neoclassical, after modifications - # ------------------------------------------------------------------------------------------------------------------------ - - quantities = ['Pe', 'Pi', 'Ce', 'CZ', 'Mt', 'Ce_raw', 'CZ_raw'] - for ikey in quantities: - powerstate.plasma[ikey+"_tr"] = powerstate.plasma[ikey+"_tr_turb"] + powerstate.plasma[ikey+"_tr_neo"] - - return powerstate - - -def calculate_residuals(powerstate, PORTALSparameters, specific_vars=None): - """ - Notes - ----- - - Works with tensors - - It should be independent on how many dimensions it has, except that the last dimension is the multi-ofs - """ - - # Case where I have already constructed the dictionary (i.e. in scalarized objective) - if specific_vars is not None: - var_dict = specific_vars - # Prepare dictionary from powerstate (for use in Analysis) - else: - var_dict = {} - - mapper = { - "QeTurb": "Pe_tr_turb", - "QiTurb": "Pi_tr_turb", - "GeTurb": "Ce_tr_turb", - "GZTurb": "CZ_tr_turb", - "MtTurb": "Mt_tr_turb", - "QeNeo": "Pe_tr_neo", - "QiNeo": "Pi_tr_neo", - "GeNeo": "Ce_tr_neo", - "GZNeo": "CZ_tr_neo", - "MtNeo": "Mt_tr_neo", - "QeTar": "Pe", - "QiTar": "Pi", - "GeTar": "Ce", - "GZTar": "CZ", - "MtTar": "Mt", - "PexchTurb": "PexchTurb" - } - - for ikey in mapper: - var_dict[ikey] = powerstate.plasma[mapper[ikey]][..., 1:] - if mapper[ikey] + "_stds" in powerstate.plasma: - var_dict[ikey + "_stds"] = powerstate.plasma[mapper[ikey] + "_stds"][..., 1:] - else: - var_dict[ikey + "_stds"] = None - - dfT = list(var_dict.values())[0] # as a reference for sizes - - # ------------------------------------------------------------------------- - # Volume integrate energy exchange from MW/m^3 to a flux MW/m^2 to be added - # ------------------------------------------------------------------------- - - if PORTALSparameters["surrogateForTurbExch"]: - PexchTurb_integrated = PORTALStools.computeTurbExchangeIndividual( - var_dict["PexchTurb"], powerstate - ) - else: - PexchTurb_integrated = torch.zeros(dfT.shape).to(dfT) - - # ------------------------------------------------------------------------ - # Go through each profile that needs to be predicted, calculate components - # ------------------------------------------------------------------------ - - of, cal, res = ( - torch.Tensor().to(dfT), - torch.Tensor().to(dfT), - torch.Tensor().to(dfT), - ) - for prof in powerstate.ProfilesPredicted: - if prof == "te": - var = "Qe" - elif prof == "ti": - var = "Qi" - elif prof == "ne": - var = "Ge" - elif prof == "nZ": - var = "GZ" - elif prof == "w0": - var = "Mt" - - """ - ----------------------------------------------------------------------------------- - Transport (Turb+Neo) - ----------------------------------------------------------------------------------- - """ - of0 = var_dict[f"{var}Turb"] + var_dict[f"{var}Neo"] - - """ - ----------------------------------------------------------------------------------- - Target (Sum here the turbulent exchange power) - ----------------------------------------------------------------------------------- - """ - if var == "Qe": - cal0 = var_dict[f"{var}Tar"] + PexchTurb_integrated - elif var == "Qi": - cal0 = var_dict[f"{var}Tar"] - PexchTurb_integrated - else: - cal0 = var_dict[f"{var}Tar"] - - """ - ----------------------------------------------------------------------------------- - Ad-hoc modifications for different weighting - ----------------------------------------------------------------------------------- - """ - - if var == "Qe": - of0, cal0 = ( - of0 * PORTALSparameters["Pseudo_multipliers"][0], - cal0 * PORTALSparameters["Pseudo_multipliers"][0], - ) - elif var == "Qi": - of0, cal0 = ( - of0 * PORTALSparameters["Pseudo_multipliers"][1], - cal0 * PORTALSparameters["Pseudo_multipliers"][1], - ) - elif var == "Ge": - of0, cal0 = ( - of0 * PORTALSparameters["Pseudo_multipliers"][2], - cal0 * PORTALSparameters["Pseudo_multipliers"][2], - ) - elif var == "GZ": - of0, cal0 = ( - of0 * PORTALSparameters["Pseudo_multipliers"][3], - cal0 * PORTALSparameters["Pseudo_multipliers"][3], - ) - elif var == "Mt": - of0, cal0 = ( - of0 * PORTALSparameters["Pseudo_multipliers"][4], - cal0 * PORTALSparameters["Pseudo_multipliers"][4], - ) - - of, cal = torch.cat((of, of0), dim=-1), torch.cat((cal, cal0), dim=-1) - - # ----------- - # Composition - # ----------- - - # Source term is (TARGET - TRANSPORT) - source = cal - of - - # Residual is defined as the negative (bc it's maximization) normalized (1/N) norm of radial & channel residuals -> L2 - res = -1 / source.shape[-1] * torch.norm(source, p=2, dim=-1) - - return of, cal, source, res - - -def calculate_residuals_distributions(powerstate, PORTALSparameters): - """ - - Works with tensors - - It should be independent on how many dimensions it has, except that the last dimension is the multi-ofs - """ - - # Prepare dictionary from powerstate (for use in Analysis) - - mapper = { - "QeTurb": "Pe_tr_turb", - "QiTurb": "Pi_tr_turb", - "GeTurb": "Ce_tr_turb", - "GZTurb": "CZ_tr_turb", - "MtTurb": "Mt_tr_turb", - "QeNeo": "Pe_tr_neo", - "QiNeo": "Pi_tr_neo", - "GeNeo": "Ce_tr_neo", - "GZNeo": "CZ_tr_neo", - "MtNeo": "Mt_tr_neo", - "QeTar": "Pe", - "QiTar": "Pi", - "GeTar": "Ce", - "GZTar": "CZ", - "MtTar": "Mt", - "PexchTurb": "PexchTurb" - } - - var_dict = {} - for ikey in mapper: - var_dict[ikey] = powerstate.plasma[mapper[ikey]][:, 1:] - if mapper[ikey] + "_stds" in powerstate.plasma: - var_dict[ikey + "_stds"] = powerstate.plasma[mapper[ikey] + "_stds"][:, 1:] - else: - var_dict[ikey + "_stds"] = None - - dfT = var_dict["QeTurb"] # as a reference for sizes - - # ------------------------------------------------------------------------- - # Volume integrate energy exchange from MW/m^3 to a flux MW/m^2 to be added - # ------------------------------------------------------------------------- - - if PORTALSparameters["surrogateForTurbExch"]: - PexchTurb_integrated = PORTALStools.computeTurbExchangeIndividual( - var_dict["PexchTurb"], powerstate - ) - PexchTurb_integrated_stds = PORTALStools.computeTurbExchangeIndividual( - var_dict["PexchTurb_stds"], powerstate - ) - else: - PexchTurb_integrated = torch.zeros(dfT.shape).to(dfT) - PexchTurb_integrated_stds = torch.zeros(dfT.shape).to(dfT) - - # ------------------------------------------------------------------------ - # Go through each profile that needs to be predicted, calculate components - # ------------------------------------------------------------------------ - - of, cal = torch.Tensor().to(dfT), torch.Tensor().to(dfT) - ofE, calE = torch.Tensor().to(dfT), torch.Tensor().to(dfT) - for prof in powerstate.ProfilesPredicted: - if prof == "te": - var = "Qe" - elif prof == "ti": - var = "Qi" - elif prof == "ne": - var = "Ge" - elif prof == "nZ": - var = "GZ" - elif prof == "w0": - var = "Mt" - - """ - ----------------------------------------------------------------------------------- - Transport (Turb+Neo) - ----------------------------------------------------------------------------------- - """ - of0 = var_dict[f"{var}Turb"] + var_dict[f"{var}Neo"] - of0E = ( - var_dict[f"{var}Turb_stds"] ** 2 + var_dict[f"{var}Neo_stds"] ** 2 - ) ** 0.5 - - """ - ----------------------------------------------------------------------------------- - Target (Sum here the turbulent exchange power) - ----------------------------------------------------------------------------------- - """ - if var == "Qe": - cal0 = var_dict[f"{var}Tar"] + PexchTurb_integrated - cal0E = ( - var_dict[f"{var}Tar_stds"] ** 2 + PexchTurb_integrated_stds**2 - ) ** 0.5 - elif var == "Qi": - cal0 = var_dict[f"{var}Tar"] - PexchTurb_integrated - cal0E = ( - var_dict[f"{var}Tar_stds"] ** 2 + PexchTurb_integrated_stds**2 - ) ** 0.5 - else: - cal0 = var_dict[f"{var}Tar"] - cal0E = var_dict[f"{var}Tar_stds"] - - of, cal = torch.cat((of, of0), dim=-1), torch.cat((cal, cal0), dim=-1) - ofE, calE = torch.cat((ofE, of0E), dim=-1), torch.cat((calE, cal0E), dim=-1) - - return of, cal, ofE, calE diff --git a/src/mitim_tools/misc_tools/GUItools.py b/src/mitim_tools/misc_tools/GUItools.py index 152e6cba..bc43a177 100644 --- a/src/mitim_tools/misc_tools/GUItools.py +++ b/src/mitim_tools/misc_tools/GUItools.py @@ -151,7 +151,7 @@ def __init__(self, parent=None, vertical=False, xextend=1600): self.setStyleSheet( """ QTabBar::tab { - font-size: 9pt; + font-size: 16pt; } QTabBar::tab:selected { background: #00FF00; diff --git a/src/mitim_tools/misc_tools/IOtools.py b/src/mitim_tools/misc_tools/IOtools.py index ae1b5a6c..ac74c0ac 100644 --- a/src/mitim_tools/misc_tools/IOtools.py +++ b/src/mitim_tools/misc_tools/IOtools.py @@ -750,7 +750,7 @@ def findFileByExtension( f"\t\t\t~ Folder ...{fstr} does not exist, returning None", ) - # TODO: We really should not change return type + #TODO: We really should not change return type #retval = None #if retpath is not None: # if not provide_full_path: diff --git a/src/mitim_tools/misc_tools/PLASMAtools.py b/src/mitim_tools/misc_tools/PLASMAtools.py index 55c64ed9..e044737a 100644 --- a/src/mitim_tools/misc_tools/PLASMAtools.py +++ b/src/mitim_tools/misc_tools/PLASMAtools.py @@ -8,7 +8,7 @@ import matplotlib.pyplot as plt from IPython import embed from mitim_tools.misc_tools import MATHtools -from mitim_modules.powertorch.physics import CALCtools +from mitim_modules.powertorch.utils import CALCtools from mitim_tools.popcon_tools import FunctionalForms from mitim_tools.misc_tools.LOGtools import printMsg as print diff --git a/src/mitim_tools/opt_tools/BOTORCHtools.py b/src/mitim_tools/opt_tools/BOTORCHtools.py index d832cb95..f6b07cb6 100644 --- a/src/mitim_tools/opt_tools/BOTORCHtools.py +++ b/src/mitim_tools/opt_tools/BOTORCHtools.py @@ -704,15 +704,8 @@ def __init__( self.indeces_grad = tuple(grad_vector) # ---------------------------------------------------------------- - self.register_parameter( - name="weights_lin", - parameter=torch.nn.Parameter( - torch.randn(*batch_shape, len(self.indeces_grad), 1) - ), - ) - self.register_parameter( - name="bias", parameter=torch.nn.Parameter(torch.randn(*batch_shape, 1)) - ) + self.register_parameter(name="weights_lin",parameter=torch.nn.Parameter(torch.randn(*batch_shape, len(self.indeces_grad), 1)),) + self.register_parameter(name="bias", parameter=torch.nn.Parameter(torch.randn(*batch_shape, 1))) # set the parameter constraint to be [0,1], when nothing is specified diffusion_constraint = gpytorch.constraints.constraints.Positive() diff --git a/src/mitim_tools/opt_tools/STEPtools.py b/src/mitim_tools/opt_tools/STEPtools.py index e20f4e24..c97ec116 100644 --- a/src/mitim_tools/opt_tools/STEPtools.py +++ b/src/mitim_tools/opt_tools/STEPtools.py @@ -184,13 +184,8 @@ def fit_step(self, avoidPoints=None, fitWithTrainingDataIfContains=None): surrogate_options = copy.deepcopy(self.surrogate_options) # Then, depending on application (e.g. targets in mitim are fitted differently) - if ( - "selectSurrogate" in surrogate_options - and surrogate_options["selectSurrogate"] is not None - ): - surrogate_options = surrogate_options["selectSurrogate"]( - outi, surrogate_options - ) + if "surrogate_selection" in surrogate_options and surrogate_options["surrogate_selection"] is not None: + surrogate_options = surrogate_options["surrogate_selection"](outi, surrogate_options) # --------------------------------------------------------------------------------------------------- # To avoid problems with fixed values (e.g. calibration terms that are fixed) diff --git a/src/mitim_tools/opt_tools/STRATEGYtools.py b/src/mitim_tools/opt_tools/STRATEGYtools.py index dc7e1fda..49c0d40b 100644 --- a/src/mitim_tools/opt_tools/STRATEGYtools.py +++ b/src/mitim_tools/opt_tools/STRATEGYtools.py @@ -362,13 +362,6 @@ def __init__( self.seed = seed self.avoidPoints = [] - if (not self.cold_start) and askQuestions: - if not print( - f"\t* Because {cold_start = }, MITIM will try to read existing results from folder", - typeMsg="q", - ): - raise Exception("[MITIM] - User requested to stop") - if self.optimization_object.name_objectives is None: self.optimization_object.name_objectives = "y" @@ -381,6 +374,13 @@ def __init__( self.folderOutputs = self.folderExecution / "Outputs" + if (not self.cold_start) and askQuestions: + + # Check if Outputs folder is empty (if it's empty, do not ask the user, just continue) + if self.folderOutputs.exists() and (len(list(self.folderOutputs.iterdir())) > 0): + if not print(f"\t* Because {cold_start = }, MITIM will try to read existing results from folder",typeMsg="q"): + raise Exception("[MITIM] - User requested to stop") + if optimization_object.optimization_options is not None: if not self.folderOutputs.exists(): IOtools.askNewFolder(self.folderOutputs, force=True) @@ -690,22 +690,14 @@ def run(self): current_step = self.read() if current_step is None: - print( - "\t* Because reading pkl step had problems, disabling cold_starting-from-previous from this point on", - typeMsg="w", - ) - print( - "\t* Are you aware of the consequences of continuing?", - typeMsg="q", - ) + print("\t* Because reading pkl step had problems, disabling cold_starting-from-previous from this point on",typeMsg="w") + print("\t* Are you aware of the consequences of continuing?",typeMsg="q") self.cold_start = True if not self.cold_start: # Read next from Tabular - self.x_next, _, _ = self.optimization_data.extract_points( - points=np.arange(len(self.train_X), len(self.train_X) + self.best_points) - ) + self.x_next, _, _ = self.optimization_data.extract_points(points=np.arange(len(self.train_X), len(self.train_X) + self.best_points)) self.x_next = torch.from_numpy(self.x_next).to(self.dfT) # Re-write x_next from the pkl... reason for this is that if optimization is heuristic, I may prefer what was in Tabular @@ -1010,12 +1002,8 @@ def updateSet( """ print("\n~~~~~~~~~~~~~~~ Entering bounds upgrade module ~~~~~~~~~~~~~~~~~~~") print("(if extrapolations were allowed during optimization)") - self.bounds = SBOcorrections.upgradeBounds( - self.bounds, self.train_X, self.avoidPoints_outside - ) - print( - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" - ) + self.bounds = SBOcorrections.upgradeBounds(self.bounds, self.train_X, self.avoidPoints_outside) + print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n") # ~~~~~~~~~~~~~~~~~~ # Possible corrections to modeled & optimization region @@ -1074,13 +1062,9 @@ def initializeOptimization(self): # Print Optimization Settings - print( - "\n==============================================================================" - ) + print("\n==============================================================================") print(f" {IOtools.getStringFromTime()}, Starting MITIM Optimization") - print( - "==============================================================================" - ) + print("==============================================================================") print(f"* Folder: {self.folderExecution}") print("* Optimization Settings:") @@ -1114,7 +1098,7 @@ def initializeOptimization(self): if (not self.cold_start) and (self.optimization_data is not None): self.type_initialization = 3 print( - "--> Since cold_start from a previous MITIM has been requested, forcing initialization type to 3 (read from optimization_data)", + "--> Since restart from a previous MITIM has been requested, forcing initialization type to 3 (read from optimization_data)", typeMsg="i", ) @@ -1123,19 +1107,14 @@ def initializeOptimization(self): try: tabExists = len(self.optimization_data.data) >= self.initial_training - print( - f"\t- optimization_data file has {len(self.optimization_data.data)} elements, and initial_training were {self.initial_training}" - ) + print(f"\t- optimization_data file has {len(self.optimization_data.data)} elements, and initial_training were {self.initial_training}") except: tabExists = False print("\n\nCould not read Tabular, because:", typeMsg="w") print(traceback.format_exc()) if not tabExists: - print( - "--> type_initialization 3 requires optimization_data but something failed. Assigning type_initialization=1 and cold_starting from scratch", - typeMsg="i", - ) + print("--> type_initialization 3 requires optimization_data but something failed. Assigning type_initialization=1 and cold_starting from scratch",typeMsg="i",) if self.askQuestions: flagger = print("Are you sure?", typeMsg="q") if not flagger: diff --git a/src/mitim_tools/opt_tools/scripts/evaluate_model.py b/src/mitim_tools/opt_tools/scripts/evaluate_model.py index 4b5ac2fa..da2b417d 100644 --- a/src/mitim_tools/opt_tools/scripts/evaluate_model.py +++ b/src/mitim_tools/opt_tools/scripts/evaluate_model.py @@ -10,8 +10,8 @@ This way, you can try plot, re-ft, find best parameters, etc. It calculates speed, and generates profile file to look at bottlenecks e.g. - evaluate_model.py --folder run1/ --output QiTurb_5 --input aLti_5 --around -3 - evaluate_model.py --folder run1/ --step -1 --output QiTurb_5 --file figure.eps + evaluate_model.py --folder run1/ --output Qi_tr_turb_5 --input aLti_5 --around -3 + evaluate_model.py --folder run1/ --step -1 --output Qi_tr_turb_5 --file figure.eps """ # ***************** Inputs @@ -19,7 +19,7 @@ parser = argparse.ArgumentParser() parser.add_argument("--folder", required=True, type=str) parser.add_argument("--step", type=int, required=False, default=-1) -parser.add_argument("--output", required=False, type=str, default="QiTurb_1") +parser.add_argument("--output", required=False, type=str, default="Qi_tr_turb_1") parser.add_argument("--input", required=False, type=str, default="aLti_1") parser.add_argument("--around", type=int, required=False, default=-1) parser.add_argument("--xrange", type=float, required=False, default=0.5) diff --git a/src/mitim_tools/opt_tools/scripts/slurm.py b/src/mitim_tools/opt_tools/scripts/slurm.py index 4eacb7a7..392ff0bd 100644 --- a/src/mitim_tools/opt_tools/scripts/slurm.py +++ b/src/mitim_tools/opt_tools/scripts/slurm.py @@ -39,7 +39,7 @@ def run_slurm( command = [venv,script + (f" --seed {seed}" if seed is not None else "")] - nameJob = f"mitim_opt_{folder.name}{extra_name}" + nameJob = f"mitim_{folder.name}{extra_name}" _, fileSBATCH, _ = FARMINGtools.create_slurm_execution_files( command, diff --git a/src/mitim_tools/popcon_tools/FunctionalForms.py b/src/mitim_tools/popcon_tools/FunctionalForms.py index 45b60748..f2d9cf7f 100644 --- a/src/mitim_tools/popcon_tools/FunctionalForms.py +++ b/src/mitim_tools/popcon_tools/FunctionalForms.py @@ -4,7 +4,7 @@ from mitim_tools.popcon_tools.utils import PRFfunctionals, FUNCTIONALScalc from mitim_tools.misc_tools import MATHtools, GRAPHICStools from mitim_tools.misc_tools.LOGtools import printMsg as print -from mitim_modules.powertorch.physics import CALCtools +from mitim_modules.powertorch.utils import CALCtools from IPython import embed diff --git a/src/mitim_tools/popcon_tools/RAPIDStools.py b/src/mitim_tools/popcon_tools/RAPIDStools.py index 3d6ad27e..77f0a6e1 100644 --- a/src/mitim_tools/popcon_tools/RAPIDStools.py +++ b/src/mitim_tools/popcon_tools/RAPIDStools.py @@ -138,7 +138,7 @@ def pedestal(p): power = STATEtools.powerstate(p,EvolutionOptions={"rhoPredicted": np.linspace(0.0, 0.9, 50)[1:]}) power.calculate(None, folder='~/scratch/power/') - profiles_new = power.to_gacode(insert_highres_powers=True) + profiles_new = power.from_powerstate(insert_highres_powers=True) return ptop_kPa,profiles_new, eped_evaluation diff --git a/src/mitim_tools/popcon_tools/scripts/test_functionals.py b/src/mitim_tools/popcon_tools/scripts/test_functionals.py index 949af0b4..7e8ceb21 100644 --- a/src/mitim_tools/popcon_tools/scripts/test_functionals.py +++ b/src/mitim_tools/popcon_tools/scripts/test_functionals.py @@ -1,6 +1,6 @@ import torch, datetime import matplotlib.pyplot as plt -from mitim_modules.powertorch.physics import CALCtools +from mitim_modules.powertorch.utils import CALCtools from mitim_tools.misc_tools import GRAPHICStools, IOtools, PLASMAtools from mitim_tools.popcon_tools.FunctionalForms import ( diff --git a/src/mitim_tools/popcon_tools/utils/FUNCTIONALScalc.py b/src/mitim_tools/popcon_tools/utils/FUNCTIONALScalc.py index 4758f47a..db8ec4f8 100644 --- a/src/mitim_tools/popcon_tools/utils/FUNCTIONALScalc.py +++ b/src/mitim_tools/popcon_tools/utils/FUNCTIONALScalc.py @@ -1,7 +1,7 @@ import torch import numpy as np from IPython import embed -from mitim_modules.powertorch.physics import CALCtools +from mitim_modules.powertorch.utils import CALCtools from mitim_tools.misc_tools.LOGtools import printMsg as print diff --git a/src/mitim_tools/transp_tools/UFILEStools.py b/src/mitim_tools/transp_tools/UFILEStools.py index c1527b5a..a5e466b9 100644 --- a/src/mitim_tools/transp_tools/UFILEStools.py +++ b/src/mitim_tools/transp_tools/UFILEStools.py @@ -209,9 +209,14 @@ def writeUFILE(self, filename, orderZvariable="C"): f.write("".join(self.STR_header)) # Variable labels - f.write(self.STR_labelX) + if self.dim==1: + self.Variables['Y']=self.Variables['X'] + self.STR_labelY=self.STR_labelX + self.numY=self.numX + self.labelY=self.labelX + f.write(self.STR_labelY) if self.dim > 1: - f.write(self.STR_labelY) + f.write(self.STR_labelX) if self.dim == 3: f.write(self.STR_labelQ) f.write(self.STR_labelZ) @@ -220,13 +225,13 @@ def writeUFILE(self, filename, orderZvariable="C"): # Labels with number of points f.write( "{0} ;-# OF {1} PTS-\n".format( - str(self.numX).rjust(11), self.labelX + str(self.numY).rjust(11), self.labelY ) ) if self.dim > 1: f.write( "{0} ;-# OF {1} PTS-\n".format( - str(self.numY).rjust(11), self.labelY + str(self.numX).rjust(11), self.labelX ) ) if self.dim == 3: @@ -240,11 +245,11 @@ def writeUFILE(self, filename, orderZvariable="C"): # Variables # ------------------ - # ~~~~~~ Write X variable - self.writeVar(f, self.Variables["X"]) + # ~~~~~~ Write Y variable + self.writeVar(f, self.Variables["Y"]) if self.dim > 1: - # ~~~~~~ Write Y variable - self.writeVar(f, self.Variables["Y"]) + # ~~~~~~ Write X variable + self.writeVar(f, self.Variables["X"]) # ~~~~~~ Prepare Z variable if self.dim == 3: self.writeVar(f, self.Variables["Q"]) @@ -254,7 +259,6 @@ def writeUFILE(self, filename, orderZvariable="C"): else: Zall = ( self.Variables["Z"] - .transpose() .reshape(self.numX * self.numY, order=orderZvariable) ) timepoints = len(self.Variables["Y"]) @@ -283,7 +287,7 @@ def writeVar(self, f, var, ncols=6): j = 0 vecS.append(f"{j:.6e}".rjust(13)) lineToWrite = "".join(vecS) + "\n" - + lineToWrite = " "+lineToWrite cont += ncols f.write(lineToWrite) diff --git a/templates/ASTRA8_REPO_MIT.tar b/templates/ASTRA8_REPO_MIT.tar new file mode 100644 index 00000000..9c61ba1b Binary files /dev/null and b/templates/ASTRA8_REPO_MIT.tar differ diff --git a/templates/maestro_namelist.json b/templates/maestro_namelist.json index cd358fbc..27ff90b2 100644 --- a/templates/maestro_namelist.json +++ b/templates/maestro_namelist.json @@ -69,7 +69,6 @@ "RoaLocations": [0.35,0.45,0.55,0.65,0.75,0.875,0.9], "Physics_options": {"TypeTarget": 3}, "transport_model": { - "turbulence": "TGLF", "TGLFsettings": 100, "extraOptionsTGLF": {"USE_BPER": true} } diff --git a/tests/PORTALS_workflow.py b/tests/PORTALS_workflow.py index 427a935d..421d3251 100644 --- a/tests/PORTALS_workflow.py +++ b/tests/PORTALS_workflow.py @@ -26,8 +26,8 @@ # Initialize class portals_fun = PORTALSmain.portals(folderWork) -portals_fun.optimization_options["convergence_options"]["maximum_iterations"] = 2 -portals_fun.optimization_options["initialization_options"]["initial_training"] = 3 +portals_fun.optimization_options["convergence_options"]["maximum_iterations"] = 1 +portals_fun.optimization_options["initialization_options"]["initial_training"] = 2 portals_fun.MODELparameters["RhoLocations"] = [0.25, 0.45, 0.65, 0.85] portals_fun.MODELparameters['ProfilesPredicted'] = ["te", "ti", "ne", "nZ", 'w0'] portals_fun.PORTALSparameters['ImpurityOfInterest'] = 'N' @@ -52,12 +52,12 @@ portals_fun.plot_optimization_results(analysis_level=4) # For fun and to show capabilities, let's do a flux match of the current surrogates and plot in the same notebook -# PORTALSoptimization.flux_match_surrogate( -# mitim_bo.steps[-1],PROFILEStools.PROFILES_GACODE(inputgacode), -# fn = portals_fun.fn, -# plot_results = True, -# keep_within_bounds = False -# ) +PORTALSoptimization.flux_match_surrogate( + mitim_bo.steps[-1],PROFILEStools.PROFILES_GACODE(inputgacode), + fn = portals_fun.fn, + plot_results = True, + keep_within_bounds = False + ) # Required if running in non-interactive mode portals_fun.fn.show() diff --git a/tutorials/PORTALS_tutorial.py b/tutorials/PORTALS_tutorial.py index ea36678c..9d4e0f82 100644 --- a/tutorials/PORTALS_tutorial.py +++ b/tutorials/PORTALS_tutorial.py @@ -8,7 +8,7 @@ # Starting input.gacode file inputgacode = __mitimroot__ / "tests" / "data" / "input.gacode" -folder = __mitimroot__ / "tests" / "scratch" / "portals_tut" +folder = __mitimroot__ / "tests" / "scratch" / "portals_tutorial" # Initialize PORTALS class portals_fun = PORTALSmain.portals(folder) @@ -20,12 +20,11 @@ portals_fun.MODELparameters["ProfilesPredicted"] = ["te", "ti", "ne"] # Codes to use -from mitim_modules.powertorch.physics import TRANSPORTtools -portals_fun.PORTALSparameters["transport_evaluator"] = TRANSPORTtools.tgyro_model +from mitim_modules.powertorch.physics_models.transport_tgyro import tgyro_model +portals_fun.PORTALSparameters["transport_evaluator"] = tgyro_model # TGLF specifications portals_fun.MODELparameters["transport_model"] = { - "turbulence":'TGLF', "TGLFsettings": 6, # Check out templates/input.tglf.models.json for more options "extraOptionsTGLF": {"USE_BPER": False} # Turn off BPER }